repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mirror/vbox | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/Common/VpdInfoFile.py | 11 | 11296 | ## @file
#
# This package manage the VPD PCD information file which will be generated
# by build tool's autogen.
# The VPD PCD information file will be input for third-party BPDG tool which
# is pointed by *_*_*_VPD_TOOL_GUID in conf/tools_def.txt
#
#
# Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import os
import re
import Common.EdkLogger as EdkLogger
import Common.BuildToolError as BuildToolError
import subprocess
FILE_COMMENT_TEMPLATE = \
"""
## @file
#
# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
#
# This file lists all VPD informations for a platform collected by build.exe.
#
# Copyright (c) 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
## The class manage VpdInfoFile.
#
# This file contains an ordered (based on position in the DSC file) list of the PCDs specified in the platform description file (DSC). The Value field that will be assigned to the PCD comes from the DSC file, INF file (if not defined in the DSC file) or the DEC file (if not defined in the INF file). This file is used as an input to the BPDG tool.
# Format for this file (using EBNF notation) is:
# <File> :: = [<CommentBlock>]
# [<PcdEntry>]*
# <CommentBlock> ::= ["#" <String> <EOL>]*
# <PcdEntry> ::= <PcdName> "|" <Offset> "|" <Size> "|" <Value> <EOL>
# <PcdName> ::= <TokenSpaceCName> "." <PcdCName>
# <TokenSpaceCName> ::= C Variable Name of the Token Space GUID
# <PcdCName> ::= C Variable Name of the PCD
# <Offset> ::= {"*"} {<HexNumber>}
# <HexNumber> ::= "0x" (a-fA-F0-9){1,8}
# <Size> ::= <HexNumber>
# <Value> ::= {<HexNumber>} {<NonNegativeInt>} {<QString>} {<Array>}
# <NonNegativeInt> ::= (0-9)+
# <QString> ::= ["L"] <DblQuote> <String> <DblQuote>
# <DblQuote> ::= 0x22
# <Array> ::= {<CArray>} {<NList>}
# <CArray> ::= "{" <HexNumber> ["," <HexNumber>]* "}"
# <NList> ::= <HexNumber> ["," <HexNumber>]*
#
class VpdInfoFile:
## The mapping dictionary from datum type to size string.
_MAX_SIZE_TYPE = {"BOOLEAN":"1", "UINT8":"1", "UINT16":"2", "UINT32":"4", "UINT64":"8"}
_rVpdPcdLine = None
## Constructor
def __init__(self):
## Dictionary for VPD in following format
#
# Key : PcdClassObject instance.
# @see BuildClassObject.PcdClassObject
# Value : offset in different SKU such as [sku1_offset, sku2_offset]
self._VpdArray = {}
## Add a VPD PCD collected from platform's autogen when building.
#
# @param vpds The list of VPD PCD collected for a platform.
# @see BuildClassObject.PcdClassObject
#
# @param offset integer value for VPD's offset in specific SKU.
#
def Add(self, Vpd, Offset):
if (Vpd == None):
EdkLogger.error("VpdInfoFile", BuildToolError.ATTRIBUTE_UNKNOWN_ERROR, "Invalid VPD PCD entry.")
if not (Offset >= 0 or Offset == "*"):
EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID, "Invalid offset parameter: %s." % Offset)
if Vpd.DatumType == "VOID*":
if Vpd.MaxDatumSize <= 0:
EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
"Invalid max datum size for VPD PCD %s.%s" % (Vpd.TokenSpaceGuidCName, Vpd.TokenCName))
elif Vpd.DatumType in ["BOOLEAN", "UINT8", "UINT16", "UINT32", "UINT64"]:
if Vpd.MaxDatumSize == None or Vpd.MaxDatumSize == "":
Vpd.MaxDatumSize = VpdInfoFile._MAX_SIZE_TYPE[Vpd.DatumType]
else:
EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
"Invalid DatumType %s for VPD PCD %s.%s" % (Vpd.DatumType, Vpd.TokenSpaceGuidCName, Vpd.TokenCName))
if Vpd not in self._VpdArray.keys():
#
# If there is no Vpd instance in dict, that imply this offset for a given SKU is a new one
#
self._VpdArray[Vpd] = [Offset]
else:
#
# If there is an offset for a specific SKU in dict, then append this offset for other sku to array.
#
self._VpdArray[Vpd].append(Offset)
## Generate VPD PCD information into a text file
#
# If parameter FilePath is invalid, then assert.
# If
# @param FilePath The given file path which would hold VPD information
def Write(self, FilePath):
if not (FilePath != None or len(FilePath) != 0):
EdkLogger.error("VpdInfoFile", BuildToolError.PARAMETER_INVALID,
"Invalid parameter FilePath: %s." % FilePath)
try:
fd = open(FilePath, "w")
except:
EdkLogger.error("VpdInfoFile",
BuildToolError.FILE_OPEN_FAILURE,
"Fail to open file %s for written." % FilePath)
try:
# write file header
fd.write(FILE_COMMENT_TEMPLATE)
# write each of PCD in VPD type
Pcds = self._VpdArray.keys()
Pcds.sort()
for Pcd in Pcds:
for Offset in self._VpdArray[Pcd]:
PcdValue = str(Pcd.SkuInfoList[Pcd.SkuInfoList.keys()[0]].DefaultValue).strip()
if PcdValue == "" :
PcdValue = Pcd.DefaultValue
fd.write("%s.%s|%s|%s|%s \n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, str(Offset).strip(), str(Pcd.MaxDatumSize).strip(),PcdValue))
except:
EdkLogger.error("VpdInfoFile",
BuildToolError.FILE_WRITE_FAILURE,
"Fail to write file %s" % FilePath)
fd.close()
## Read an existing VPD PCD info file.
#
# This routine will read VPD PCD information from existing file and construct
# internal PcdClassObject array.
# This routine could be used by third-party tool to parse VPD info file content.
#
# @param FilePath The full path string for existing VPD PCD info file.
def Read(self, FilePath):
try:
fd = open(FilePath, "r")
except:
EdkLogger.error("VpdInfoFile",
BuildToolError.FILE_OPEN_FAILURE,
"Fail to open file %s for written." % FilePath)
Lines = fd.readlines()
for Line in Lines:
Line = Line.strip()
if len(Line) == 0 or Line.startswith("#"):
continue
#
# the line must follow output format defined in BPDG spec.
#
try:
PcdName, Offset, Size, Value = Line.split("#")[0].split("|")
TokenSpaceName, PcdTokenName = PcdName.split(".")
except:
EdkLogger.error("BPDG", BuildToolError.PARSER_ERROR, "Fail to parse VPD information file %s" % FilePath)
Found = False
for VpdObject in self._VpdArray.keys():
if VpdObject.TokenSpaceGuidCName == TokenSpaceName and VpdObject.TokenCName == PcdTokenName.strip():
if self._VpdArray[VpdObject][0] == "*":
if Offset == "*":
EdkLogger.error("BPDG", BuildToolError.FORMAT_INVALID, "The offset of %s has not been fixed up by third-party BPDG tool." % PcdName)
self._VpdArray[VpdObject][0] = Offset
Found = True
break
if not Found:
EdkLogger.error("BPDG", BuildToolError.PARSER_ERROR, "Can not find PCD defined in VPD guid file.")
## Get count of VPD PCD collected from platform's autogen when building.
#
# @return The integer count value
def GetCount(self):
Count = 0
for OffsetList in self._VpdArray.values():
Count += len(OffsetList)
return Count
## Get an offset value for a given VPD PCD
#
# Because BPDG only support one Sku, so only return offset for SKU default.
#
# @param vpd A given VPD PCD
def GetOffset(self, vpd):
if not self._VpdArray.has_key(vpd):
return None
if len(self._VpdArray[vpd]) == 0:
return None
return self._VpdArray[vpd]
## Call external BPDG tool to process VPD file
#
# @param ToolPath The string path name for BPDG tool
# @param VpdFileName The string path name for VPD information guid.txt
#
def CallExtenalBPDGTool(ToolPath, VpdFileName):
assert ToolPath != None, "Invalid parameter ToolPath"
assert VpdFileName != None and os.path.exists(VpdFileName), "Invalid parameter VpdFileName"
OutputDir = os.path.dirname(VpdFileName)
FileName = os.path.basename(VpdFileName)
BaseName, ext = os.path.splitext(FileName)
OutputMapFileName = os.path.join(OutputDir, "%s.map" % BaseName)
OutputBinFileName = os.path.join(OutputDir, "%s.bin" % BaseName)
try:
PopenObject = subprocess.Popen([ToolPath,
'-o', OutputBinFileName,
'-m', OutputMapFileName,
'-q',
'-f',
VpdFileName],
stdout=subprocess.PIPE,
stderr= subprocess.PIPE)
except Exception, X:
EdkLogger.error("BPDG", BuildToolError.COMMAND_FAILURE, ExtraData="%s" % (str(X)))
(out, error) = PopenObject.communicate()
print out
while PopenObject.returncode == None :
PopenObject.wait()
if PopenObject.returncode != 0:
if PopenObject.returncode != 0:
EdkLogger.debug(EdkLogger.DEBUG_1, "Fail to call BPDG tool", str(error))
EdkLogger.error("BPDG", BuildToolError.COMMAND_FAILURE, "Fail to execute BPDG tool with exit code: %d, the error message is: \n %s" % \
(PopenObject.returncode, str(error)))
return PopenObject.returncode
| gpl-2.0 |
Drooids/odoo | addons/auth_crypt/auth_crypt.py | 49 | 3999 | import logging
from passlib.context import CryptContext
import openerp
from openerp.osv import fields, osv
openerp.addons.base.res.res_users.USER_PRIVATE_FIELDS.append('password_crypt')
_logger = logging.getLogger(__name__)
default_crypt_context = CryptContext(
# kdf which can be verified by the context. The default encryption kdf is
# the first of the list
['pbkdf2_sha512', 'md5_crypt'],
# deprecated algorithms are still verified as usual, but ``needs_update``
# will indicate that the stored hash should be replaced by a more recent
# algorithm. Passlib 1.6 supports an `auto` value which deprecates any
# algorithm but the default, but Debian only provides 1.5 so...
deprecated=['md5_crypt'],
)
class res_users(osv.osv):
_inherit = "res.users"
def init(self, cr):
_logger.info("Hashing passwords, may be slow for databases with many users...")
cr.execute("SELECT id, password FROM res_users"
" WHERE password IS NOT NULL"
" AND password != ''")
for uid, pwd in cr.fetchall():
self._set_password(cr, openerp.SUPERUSER_ID, uid, pwd)
def set_pw(self, cr, uid, id, name, value, args, context):
if value:
self._set_password(cr, uid, id, value, context=context)
self.invalidate_cache(cr, uid, context=context)
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
return dict(cr.fetchall())
_columns = {
'password': fields.function(get_pw, fnct_inv=set_pw, type='char', string='Password', invisible=True, store=True),
'password_crypt': fields.char(string='Encrypted Password', invisible=True, copy=False),
}
def check_credentials(self, cr, uid, password):
# convert to base_crypt if needed
cr.execute('SELECT password, password_crypt FROM res_users WHERE id=%s AND active', (uid,))
encrypted = None
if cr.rowcount:
stored, encrypted = cr.fetchone()
if stored and not encrypted:
self._set_password(cr, uid, uid, stored)
self.invalidate_cache(cr, uid)
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
if encrypted:
valid_pass, replacement = self._crypt_context(cr, uid, uid)\
.verify_and_update(password, encrypted)
if replacement is not None:
self._set_encrypted_password(cr, uid, uid, replacement)
if valid_pass:
return
raise
def _set_password(self, cr, uid, id, password, context=None):
""" Encrypts then stores the provided plaintext password for the user
``id``
"""
encrypted = self._crypt_context(cr, uid, id, context=context).encrypt(password)
self._set_encrypted_password(cr, uid, id, encrypted, context=context)
def _set_encrypted_password(self, cr, uid, id, encrypted, context=None):
""" Store the provided encrypted password to the database, and clears
any plaintext password
:param uid: id of the current user
:param id: id of the user on which the password should be set
"""
cr.execute(
"UPDATE res_users SET password='', password_crypt=%s WHERE id=%s",
(encrypted, id))
def _crypt_context(self, cr, uid, id, context=None):
""" Passlib CryptContext instance used to encrypt and verify
passwords. Can be overridden if technical, legal or political matters
require different kdfs than the provided default.
Requires a CryptContext as deprecation and upgrade notices are used
internally
"""
return default_crypt_context
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
paolap/cwsl-mas | cwsl/tests/test_scheduler.py | 4 | 2051 | """
Authors: Tim Bedin, Tim Erwin
Copyright 2014 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Tests for the scheduler classes.
"""
import unittest
from cwsl.core.scheduler import SimpleExecManager
class TestScheduler(unittest.TestCase):
def setUp(self):
self.test_cons_dict = {'test_1': 'first_test',
'test_2': 'second_test'}
def test_schedulecommand(self):
""" Test that the scheduler can create a script for a simple command. """
in_files = ['infile_1']
out_files = ['outfile_1']
this_manager = SimpleExecManager(noexec=True)
this_manager.add_cmd(['echo'] + in_files + out_files, out_files)
this_manager.submit()
expected_string = """#!/bin/sh\nset -e\n\nmodule purge\nmkdir -p \necho infile_1 outfile_1\n"""
self.assertEqual(this_manager.job.to_str(), expected_string)
def test_command_annotation(self):
""" Test that the scheduler can correctly add annotations. """
in_files = ['infile_1.nc']
out_files = ['outfile_1.nc']
this_manager = SimpleExecManager(noexec=True)
this_manager.add_cmd(['echo'] + in_files + out_files, out_files, annotation="This is an annotation")
this_manager.submit()
expected_string = """#!/bin/sh\nset -e\n\nmodule purge\nmodule load nco\nmkdir -p \necho infile_1.nc outfile_1.nc\nncatted -O -a vistrails_history,global,a,c,"This is an annotation" outfile_1.nc\n"""
self.assertEqual(this_manager.job.to_str(), expected_string)
| apache-2.0 |
shakamunyi/neutron-vrrp | neutron/plugins/openvswitch/ovs_models_v2.py | 49 | 3896 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.schema import UniqueConstraint
from neutron.db import model_base
from neutron.db import models_v2
from sqlalchemy import orm
class VlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'ovs_vlan_allocations'
physical_network = Column(String(64), nullable=False, primary_key=True)
vlan_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, physical_network, vlan_id):
self.physical_network = physical_network
self.vlan_id = vlan_id
self.allocated = False
def __repr__(self):
return "<VlanAllocation(%s,%d,%s)>" % (self.physical_network,
self.vlan_id, self.allocated)
class TunnelAllocation(model_base.BASEV2):
"""Represents allocation state of tunnel_id."""
__tablename__ = 'ovs_tunnel_allocations'
tunnel_id = Column(Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = Column(Boolean, nullable=False)
def __init__(self, tunnel_id):
self.tunnel_id = tunnel_id
self.allocated = False
def __repr__(self):
return "<TunnelAllocation(%d,%s)>" % (self.tunnel_id, self.allocated)
class NetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'ovs_network_bindings'
network_id = Column(String(36),
ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'gre', 'vlan', 'flat', 'local'
network_type = Column(String(32), nullable=False)
physical_network = Column(String(64))
segmentation_id = Column(Integer) # tunnel_id or vlan_id
network = orm.relationship(
models_v2.Network,
backref=orm.backref("binding", lazy='joined',
uselist=False, cascade='delete'))
def __init__(self, network_id, network_type, physical_network,
segmentation_id):
self.network_id = network_id
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%d)>" % (self.network_id,
self.network_type,
self.physical_network,
self.segmentation_id)
class TunnelEndpoint(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ovs_tunnel_endpoints'
__table_args__ = (
UniqueConstraint('id', name='uniq_ovs_tunnel_endpoints0id'),
model_base.BASEV2.__table_args__,
)
ip_address = Column(String(64), primary_key=True)
id = Column(Integer, nullable=False)
def __init__(self, ip_address, id):
self.ip_address = ip_address
self.id = id
def __repr__(self):
return "<TunnelEndpoint(%s,%s)>" % (self.ip_address, self.id)
| apache-2.0 |
linzhaolover/myansible | capi_states/mitaka/gpu_volume.py | 2 | 3009 | #!/usr/bin/env python
"""
a module of share host GPU, created on 2016/05/31
"""
import os
import re
#import shutil
from oslo_log import log
LOG = log.getLogger(__name__)
def search_file(topPath, reValue):
print "Jian>>> topPath:%s search:%s" % (topPath, reValue)
resultList = []
for rootpath, dirnames, filenames in os.walk(topPath):
for filename in filenames:
if re.findall(reValue, filename, re.IGNORECASE):
resultList.append(os.path.join(rootpath, filename))
print "Jian>>> lib files:%s" % resultList
return resultList
def search_nvidia_dir(dstPath):
dirList = []
patt = re.compile('nvidia-\d+$', re.IGNORECASE)
for dirname in os.listdir(dstPath):
if patt.findall(dirname):
dirList.append(os.path.join(dstPath, dirname))
return dirList
"""
{"message": "Build of instance 280cc136-7b4d-43cd-ae88-9f84b9f2a083 was re-scheduled: [Errno 13] Permission denied: '/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf'", "code": 500, "created": "2016-06-01T09:40:30Z"}
def create_local_conf():
libList = search_nvidia_dir("/usr/lib/")
content = sorted(libList)[-1]
if os.path.exists("/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf"):
shutil.copyfile("/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf", "/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf.bak")
os.remove("/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf")
with open("/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf", "w") as fh:
fh.writelines(content)
#os.system("cd /etc/ld.so.conf.d/ && echo '%s' > powerpc64le-linux-gnu_GL.conf" % content)
"""
def report_gpu_volume():
#create_local_conf()
ubuntuDict = {
"nvidia-smi" : "/usr/lib/",
"libnvidia-ml" : "/usr/lib/",
"libcuda" : "/usr/lib/powerpc64le-linux-gnu/",
"etc_ld" : "/etc/ld.so.conf.d/powerpc64le-linux-gnu_GL.conf", #this file need to be create manually before running, on 2016/06/01
"usr_bin" : "/usr/bin/nvidia-smi",
}
allDict = []
allDict.append(ubuntuDict)
bindList = []
for eachDict in allDict:
if "etc_ld" in eachDict.keys():
bindList.append(eachDict["etc_ld"])
if "usr_bin" in eachDict.keys():
bindList.append(eachDict["usr_bin"])
for nvidiaPath in search_nvidia_dir(eachDict["nvidia-smi"]):
print "nvidia path:", nvidiaPath
bindList.extend(search_file(os.path.join(nvidiaPath, "bin"), "nvidia-smi"))
for nvidiaPath in search_nvidia_dir(eachDict["libnvidia-ml"]):
bindList.extend(search_file(nvidiaPath, "libnvidia-ml\.so"))
bindList.extend(search_file(eachDict["libcuda"], "libcuda\.so"))
LOG.info("Jian>>> Bind List:%s" % bindList)
print "Jian>>> Bind List:%s" % bindList
return bindList
if __name__ == '__main__':
print "Begain."
report_gpu_volume()
| apache-2.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/externals/joblib/__init__.py | 23 | 5101 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.10.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| mit |
redhat-openstack/rdo-infra | ci-scripts/dlrnapi_promoter/test_registries_client_unit.py | 1 | 10398 | import subprocess
import yaml
try:
# Python3 imports
from unittest import mock
from unittest.mock import patch
except ImportError:
# Python2 imports
from mock import patch
import mock
from common import PromotionError
from dlrn_hash import DlrnCommitDistroHash, DlrnHash
from test_unit_fixtures import LegacyConfigSetup, hashes_test_cases
class TestPrepareExtraVars(LegacyConfigSetup):
def setUp(self):
super(TestPrepareExtraVars, self).setUp()
self.client = self.promoter.registries_client
self.dlrn_hash_commitdistro = DlrnCommitDistroHash(commit_hash='abc',
distro_hash='def',
component="comp1",
timestamp=1)
def test_setup(self):
error_msg = "Container push logfile is misplaced"
assert self.client.logfile != "", error_msg
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('logging.Logger.debug')
@patch('repo_client.RepoClient.get_versions_csv')
@patch('repo_client.RepoClient.get_commit_sha')
@patch('repo_client.RepoClient.get_containers_list')
def test_prepare_extra_vars_empty_missing_reader(self,
get_containers_mock,
get_commit_mock,
get_versions_mock,
mock_log_debug,
mock_log_info,
mock_log_error):
get_versions_mock.return_value = None
with self.assertRaises(PromotionError):
self.client.prepare_extra_vars(self.dlrn_hash_commitdistro,
"current-tripleo",
"tripleo-ci-testing")
get_versions_mock.assert_has_calls([
mock.call(self.dlrn_hash_commitdistro, "tripleo-ci-testing")
])
self.assertFalse(get_commit_mock.called)
self.assertFalse(get_containers_mock.called)
self.assertFalse(mock_log_debug.called)
self.assertFalse(mock_log_info.called)
mock_log_error.assert_has_calls([
mock.call("No versions.csv found")
])
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('logging.Logger.debug')
@patch('repo_client.RepoClient.get_versions_csv')
@patch('repo_client.RepoClient.get_commit_sha')
@patch('repo_client.RepoClient.get_containers_list')
def test_prepare_extra_vars_empty_missing_sha(self,
get_containers_mock,
get_commit_mock,
get_versions_mock,
mock_log_debug,
mock_log_info,
mock_log_error):
get_versions_mock.return_value = "reader"
get_commit_mock.return_value = None
with self.assertRaises(PromotionError):
self.client.prepare_extra_vars(self.dlrn_hash_commitdistro,
"current-tripleo",
"tripleo-ci-testing")
get_versions_mock.assert_has_calls([
mock.call(self.dlrn_hash_commitdistro, "tripleo-ci-testing")
])
get_commit_mock.assert_has_calls([
mock.call("reader", "openstack-tripleo-common")
])
self.assertFalse(get_containers_mock.called)
self.assertFalse(mock_log_debug.called)
self.assertFalse(mock_log_info.called)
mock_log_error.assert_has_calls([
mock.call("Versions.csv does not contain tripleo-common commit")
])
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('logging.Logger.debug')
@patch('repo_client.RepoClient.get_versions_csv')
@patch('repo_client.RepoClient.get_commit_sha')
@patch('repo_client.RepoClient.get_containers_list')
def test_prepare_extra_vars_empty_containers_list(self,
get_containers_mock,
get_commit_mock,
get_versions_mock,
mock_log_debug,
mock_log_info,
mock_log_error):
get_versions_mock.return_value = "reader"
get_commit_mock.return_value = "abc"
get_containers_mock.return_value = []
with self.assertRaises(PromotionError):
self.client.prepare_extra_vars(self.dlrn_hash_commitdistro,
"current-tripleo",
"tripleo-ci-testing")
get_versions_mock.assert_has_calls([
mock.call(self.dlrn_hash_commitdistro, "tripleo-ci-testing")
])
get_commit_mock.assert_has_calls([
mock.call("reader", "openstack-tripleo-common")
])
get_containers_mock.assert_has_calls([
mock.call("abc")
])
self.assertFalse(mock_log_debug.called)
self.assertFalse(mock_log_info.called)
mock_log_error.assert_has_calls([
mock.call("Containers list is empty")
])
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('logging.Logger.debug')
@patch('repo_client.RepoClient.get_versions_csv')
@patch('repo_client.RepoClient.get_commit_sha')
@patch('repo_client.RepoClient.get_containers_list')
def test_prepare_extra_vars_success(self,
get_containers_mock,
get_commit_mock,
get_versions_mock,
mock_log_debug,
mock_log_info,
mock_log_error):
get_versions_mock.return_value = "reader"
get_commit_mock.return_value = "abc"
get_containers_mock.return_value = ['a', 'b']
extra_vars_path = \
self.client.prepare_extra_vars(self.dlrn_hash_commitdistro,
"current-tripleo",
"tripleo-ci-testing")
self.assertIsInstance(extra_vars_path, str)
self.assertIn(".yaml", extra_vars_path)
with open(extra_vars_path) as extra_vars_file:
extra_vars = yaml.safe_load(stream=extra_vars_file)
self.assertIsInstance(extra_vars, dict)
self.assertDictEqual(extra_vars, {
'release': "master",
'script_root': mock.ANY,
'distro_name': "centos",
'distro_version': '7',
'manifest_push': True,
'target_registries_push': True,
'candidate_label': "tripleo-ci-testing",
"named_label": "current-tripleo",
"source_namespace": "tripleomaster",
"target_namespace": "tripleomaster",
"commit_hash": self.dlrn_hash_commitdistro.commit_hash,
"distro_hash": self.dlrn_hash_commitdistro.distro_hash,
"full_hash": self.dlrn_hash_commitdistro.full_hash,
"containers_list": ['a', 'b']
})
get_versions_mock.assert_has_calls([
mock.call(self.dlrn_hash_commitdistro, "tripleo-ci-testing")
])
get_commit_mock.assert_has_calls([
mock.call("reader", "openstack-tripleo-common")
])
get_containers_mock.assert_has_calls([
mock.call("abc")
])
mock_log_debug.assert_has_calls([
mock.call("Crated extra vars file at %s", mock.ANY)
])
mock_log_info.assert_has_calls([
mock.call("Passing extra vars to playbook: %s", mock.ANY)
])
self.assertFalse(mock_log_error.called)
class TestPromote(LegacyConfigSetup):
def setUp(self):
super(TestPromote, self).setUp()
self.client = self.promoter.registries_client
self.dlrn_hash_commitdistro = DlrnCommitDistroHash(
commit_hash='abc',
distro_hash='def',
component="comp1",
timestamp=1)
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('os.unlink')
@patch('registries_client.RegistriesClient.prepare_extra_vars')
@mock.patch('subprocess.check_output')
def test_promote_success(self, check_output_mock,
extra_vars_mock,
unlink_mock,
mock_log_info,
mock_log_error
):
candidate_hash =\
DlrnHash(source=hashes_test_cases['aggregate']['dict']['valid'])
target_label = "test"
check_output_mock.return_value = "test log"
self.client.promote(candidate_hash, target_label)
self.assertTrue(check_output_mock.called)
self.assertFalse(mock_log_error.called)
@patch('logging.Logger.error')
@patch('logging.Logger.info')
@patch('os.unlink')
@patch('registries_client.RegistriesClient.prepare_extra_vars')
@mock.patch('subprocess.check_output')
def test_promote_failure(self, check_output_mock,
extra_vars_mock,
unlink_mock,
mock_log_info,
mock_log_error
):
candidate_hash = \
DlrnHash(source=hashes_test_cases['aggregate']['dict']['valid'])
target_label = "test"
exception = subprocess.CalledProcessError(1, 2)
exception.output = b"test"
check_output_mock.side_effect = exception
with self.assertRaises(PromotionError):
self.client.promote(candidate_hash, target_label)
self.assertTrue(mock_log_error.called)
| apache-2.0 |
alphatwirl/alphatwirl | alphatwirl/summary/Scan.py | 1 | 1209 | # Tai Sakuma <[email protected]>
##__________________________________________________________________||
import numpy as np
import copy
##__________________________________________________________________||
class Scan:
def __init__(self, val=None, weight=1, contents=None):
if contents is not None:
self.contents = contents
return
if val is None:
self.contents = [ ]
return
self.contents = [val]
def __add__(self, other):
contents = self.contents + other.contents
return self.__class__(contents=contents)
def __radd__(self, other):
# is called with other = 0 when e.g. sum([obj1, obj2])
if other == 0:
return self.__class__() + self
raise TypeError('unsupported: {!r} + {!r}'.format(other, self))
def __repr__(self):
return '{}(contents={})'.format(self.__class__.__name__, self.contents)
def __eq__(self, other):
return self.contents == other.contents
def __copy__(self):
contents = list(self.contents)
return self.__class__(contents=contents)
##__________________________________________________________________||
| bsd-3-clause |
paukenba/youtube-dl | youtube_dl/extractor/xhamster.py | 58 | 6318 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
str_to_int,
int_or_none,
parse_duration,
)
class XHamsterIE(InfoExtractor):
_VALID_URL = r'(?P<proto>https?)://(?:.+?\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
_TESTS = [
{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
'info_dict': {
'id': '1509445',
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
'uploader': 'Ruseful2011',
'duration': 893,
'age_limit': 18,
}
},
{
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
'info_dict': {
'id': '2221348',
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
'uploader': 'jojo747400',
'duration': 200,
'age_limit': 18,
}
},
{
'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
'only_matching': True,
},
]
def _real_extract(self, url):
def extract_video_url(webpage, name):
return self._search_regex(
[r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
webpage, name, group='mp4')
def is_hd(webpage):
return '<div class=\'icon iconHD\'' in webpage
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
seo = mobj.group('seo')
proto = mobj.group('proto')
mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo)
webpage = self._download_webpage(mrss_url, video_id)
title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>', webpage, 'title')
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
description = mobj.group(1) if mobj else None
upload_date = self._html_search_regex(r'hint=\'(\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}\'',
webpage, 'upload date', fatal=False)
if upload_date:
upload_date = unified_strdate(upload_date)
uploader = self._html_search_regex(
r"<a href='[^']+xhamster\.com/user/[^>]+>(?P<uploader>[^<]+)",
webpage, 'uploader', default='anonymous')
thumbnail = self._search_regex(
[r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = parse_duration(self._html_search_regex(r'<span>Runtime:</span> (\d+:\d+)</div>',
webpage, 'duration', fatal=False))
view_count = self._html_search_regex(r'<span>Views:</span> ([^<]+)</div>', webpage, 'view count', fatal=False)
if view_count:
view_count = str_to_int(view_count)
mobj = re.search(r"hint='(?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes'", webpage)
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
comment_count = mobj.group('commentcount') if mobj else 0
age_limit = self._rta_search(webpage)
hd = is_hd(webpage)
format_id = 'hd' if hd else 'sd'
video_url = extract_video_url(webpage, format_id)
formats = [{
'url': video_url,
'format_id': 'hd' if hd else 'sd',
'preference': 1,
}]
if not hd:
mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
if is_hd(webpage):
video_url = extract_video_url(webpage, 'hd')
formats.append({
'url': video_url,
'format_id': 'hd',
'preference': 2,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
'comment_count': int_or_none(comment_count),
'age_limit': age_limit,
'formats': formats,
}
class XHamsterEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xhamster\.com/xembed\.php\?video=(?P<id>\d+)'
_TEST = {
'url': 'http://xhamster.com/xembed.php?video=3328539',
'info_dict': {
'id': '3328539',
'ext': 'mp4',
'title': 'Pen Masturbation',
'upload_date': '20140728',
'uploader_id': 'anonymous',
'duration': 5,
'age_limit': 18,
}
}
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'href="(https?://xhamster\.com/movies/%s/[^"]+\.html[^"]*)"' % video_id,
webpage, 'xhamster url')
return self.url_result(video_url, 'XHamster')
| unlicense |
oasis-open/cti-python-stix2 | stix2/test/v20/test_kill_chain_phases.py | 1 | 1652 | """Tests for stix.ExternalReference"""
import pytest
import stix2
LMCO_RECON = """{
"kill_chain_name": "lockheed-martin-cyber-kill-chain",
"phase_name": "reconnaissance"
}"""
def test_lockheed_martin_cyber_kill_chain():
recon = stix2.v20.KillChainPhase(
kill_chain_name="lockheed-martin-cyber-kill-chain",
phase_name="reconnaissance",
)
assert recon.serialize(pretty=True) == LMCO_RECON
FOO_PRE_ATTACK = """{
"kill_chain_name": "foo",
"phase_name": "pre-attack"
}"""
def test_kill_chain_example():
preattack = stix2.v20.KillChainPhase(
kill_chain_name="foo",
phase_name="pre-attack",
)
assert preattack.serialize(pretty=True) == FOO_PRE_ATTACK
def test_kill_chain_required_properties():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v20.KillChainPhase()
assert excinfo.value.cls == stix2.v20.KillChainPhase
assert excinfo.value.properties == ["kill_chain_name", "phase_name"]
def test_kill_chain_required_property_chain_name():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v20.KillChainPhase(phase_name="weaponization")
assert excinfo.value.cls == stix2.v20.KillChainPhase
assert excinfo.value.properties == ["kill_chain_name"]
def test_kill_chain_required_property_phase_name():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v20.KillChainPhase(kill_chain_name="lockheed-martin-cyber-kill-chain")
assert excinfo.value.cls == stix2.v20.KillChainPhase
assert excinfo.value.properties == ["phase_name"]
| bsd-3-clause |
zerebubuth/mapnik | scons/scons-local-2.5.0/SCons/Tool/msvc.py | 3 | 11464 | """engine.SCons.Tool.msvc
Tool-specific initialization for Microsoft Visual C/C++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/msvc.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import os.path
import re
import sys
import SCons.Action
import SCons.Builder
import SCons.Errors
import SCons.Platform.win32
import SCons.Tool
import SCons.Tool.msvs
import SCons.Util
import SCons.Warnings
import SCons.Scanner.RC
from MSCommon import msvc_exists, msvc_setup_env_once
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def validate_vars(env):
"""Validate the PCH and PCHSTOP construction variables."""
if 'PCH' in env and env['PCH']:
if 'PCHSTOP' not in env:
raise SCons.Errors.UserError("The PCHSTOP construction must be defined if PCH is defined.")
if not SCons.Util.is_String(env['PCHSTOP']):
raise SCons.Errors.UserError("The PCHSTOP construction variable must be a string: %r"%env['PCHSTOP'])
def pch_emitter(target, source, env):
"""Adds the object file target."""
validate_vars(env)
pch = None
obj = None
for t in target:
if SCons.Util.splitext(str(t))[1] == '.pch':
pch = t
if SCons.Util.splitext(str(t))[1] == '.obj':
obj = t
if not obj:
obj = SCons.Util.splitext(str(pch))[0]+'.obj'
target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work
return (target, source)
def object_emitter(target, source, env, parent_emitter):
"""Sets up the PCH dependencies for an object file."""
validate_vars(env)
parent_emitter(target, source, env)
# Add a dependency, but only if the target (e.g. 'Source1.obj')
# doesn't correspond to the pre-compiled header ('Source1.pch').
# If the basenames match, then this was most likely caused by
# someone adding the source file to both the env.PCH() and the
# env.Program() calls, and adding the explicit dependency would
# cause a cycle on the .pch file itself.
#
# See issue #2505 for a discussion of what to do if it turns
# out this assumption causes trouble in the wild:
# http://scons.tigris.org/issues/show_bug.cgi?id=2505
if 'PCH' in env:
pch = env['PCH']
if str(target[0]) != SCons.Util.splitext(str(pch))[0] + '.obj':
env.Depends(target, pch)
return (target, source)
def static_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.StaticObjectEmitter)
def shared_object_emitter(target, source, env):
return object_emitter(target, source, env,
SCons.Defaults.SharedObjectEmitter)
pch_action = SCons.Action.Action('$PCHCOM', '$PCHCOMSTR')
pch_builder = SCons.Builder.Builder(action=pch_action, suffix='.pch',
emitter=pch_emitter,
source_scanner=SCons.Tool.SourceFileScanner)
# Logic to build .rc files into .res files (resource files)
res_scanner = SCons.Scanner.RC.RCScan()
res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR')
res_builder = SCons.Builder.Builder(action=res_action,
src_suffix='.rc',
suffix='.res',
src_builder=[],
source_scanner=res_scanner)
def msvc_batch_key(action, env, target, source):
"""
Returns a key to identify unique batches of sources for compilation.
If batching is enabled (via the $MSVC_BATCH setting), then all
target+source pairs that use the same action, defined by the same
environment, and have the same target and source directories, will
be batched.
Returning None specifies that the specified target+source should not
be batched with other compilations.
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better.
# Note we need to do the env.subst so $MSVC_BATCH can be a reference to
# another construction variable, which is why we test for False and 0
# as strings.
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
# We're not using batching; return no key.
return None
t = target[0]
s = source[0]
if os.path.splitext(t.name)[0] != os.path.splitext(s.name)[0]:
# The base names are different, so this *must* be compiled
# separately; return no key.
return None
return (id(action), id(env), t.dir, s.dir)
def msvc_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fo flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fo string that specifies the target explicitly. Otherwise,
we return an /Fo string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .obj files).
"""
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fo$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fo
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fo${TARGET.dir}' + os.sep
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
def generate(env):
"""Add Builders and construction variables for MSVC++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
# TODO(batch): shouldn't reach in to cmdgen this way; necessary
# for now to bypass the checks in Builder.DictCmdGenerator.__call__()
# and allow .cc and .cpp to be compiled in the same command line.
static_obj.cmdgen.source_ext_match = False
shared_obj.cmdgen.source_ext_match = False
for suffix in CSuffixes:
static_obj.add_action(suffix, CAction)
shared_obj.add_action(suffix, ShCAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, CXXAction)
shared_obj.add_action(suffix, ShCXXAction)
static_obj.add_emitter(suffix, static_object_emitter)
shared_obj.add_emitter(suffix, shared_object_emitter)
env['CCPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Z7") or ""}'])
env['CCPCHFLAGS'] = SCons.Util.CLVar(['${(PCH and "/Yu%s \\\"/Fp%s\\\""%(PCHSTOP or "",File(PCH))) or ""}'])
env['_MSVC_OUTPUT_FLAG'] = msvc_output_flag
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $CCPCHFLAGS $CCPDBFLAGS'
env['CC'] = 'cl'
env['CCFLAGS'] = SCons.Util.CLVar('/nologo')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '${TEMPFILE("$CC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CFLAGS $CCFLAGS $_CCCOMCOM","$CCCOMSTR")}'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '${TEMPFILE("$SHCC $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCFLAGS $SHCCFLAGS $_CCCOMCOM","$SHCCCOMSTR")}'
env['CXX'] = '$CC'
env['CXXFLAGS'] = SCons.Util.CLVar('$( /TP $)')
env['CXXCOM'] = '${TEMPFILE("$CXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $CXXFLAGS $CCFLAGS $_CCCOMCOM","$CXXCOMSTR")}'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '${TEMPFILE("$SHCXX $_MSVC_OUTPUT_FLAG /c $CHANGED_SOURCES $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM","$SHCXXCOMSTR")}'
env['CPPDEFPREFIX'] = '/D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '/I'
env['INCSUFFIX'] = ''
# env.Append(OBJEMITTER = [static_object_emitter])
# env.Append(SHOBJEMITTER = [shared_object_emitter])
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['RC'] = 'rc'
env['RCFLAGS'] = SCons.Util.CLVar('')
env['RCSUFFIXES']=['.rc','.rc2']
env['RCCOM'] = '$RC $_CPPDEFFLAGS $_CPPINCFLAGS $RCFLAGS /fo$TARGET $SOURCES'
env['BUILDERS']['RES'] = res_builder
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
# Set-up ms tools paths
msvc_setup_env_once(env)
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cc'
env['PCHPDBFLAGS'] = SCons.Util.CLVar(['${(PDB and "/Yd") or ""}'])
env['PCHCOM'] = '$CXX /Fo${TARGETS[1]} $CXXFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS /c $SOURCES /Yc$PCHSTOP /Fp${TARGETS[0]} $CCPDBFLAGS $PCHPDBFLAGS'
env['BUILDERS']['PCH'] = pch_builder
if 'ENV' not in env:
env['ENV'] = {}
if 'SystemRoot' not in env['ENV']: # required for dlls in the winsxs folders
env['ENV']['SystemRoot'] = SCons.Platform.win32.get_system_root()
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
tvalacarta/tvalacarta | python/main-classic/lib/youtube_dl/extractor/vimple.py | 64 | 1968 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import int_or_none
class SprutoBaseIE(InfoExtractor):
def _extract_spruto(self, spruto, video_id):
playlist = spruto['playlist'][0]
title = playlist['title']
video_id = playlist.get('videoId') or video_id
thumbnail = playlist.get('posterUrl') or playlist.get('thumbnailUrl')
duration = int_or_none(playlist.get('duration'))
formats = [{
'url': f['url'],
} for f in playlist['video']]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
class VimpleIE(SprutoBaseIE):
IE_DESC = 'Vimple - one-click video hosting'
_VALID_URL = r'https?://(?:player\.vimple\.(?:ru|co)/iframe|vimple\.(?:ru|co))/(?P<id>[\da-f-]{32,36})'
_TESTS = [{
'url': 'http://vimple.ru/c0f6b1687dcd4000a97ebe70068039cf',
'md5': '2e750a330ed211d3fd41821c6ad9a279',
'info_dict': {
'id': 'c0f6b168-7dcd-4000-a97e-be70068039cf',
'ext': 'mp4',
'title': 'Sunset',
'duration': 20,
'thumbnail': r're:https?://.*?\.jpg',
},
}, {
'url': 'http://player.vimple.ru/iframe/52e1beec-1314-4a83-aeac-c61562eadbf9',
'only_matching': True,
}, {
'url': 'http://vimple.co/04506a053f124483b8fb05ed73899f19',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://player.vimple.ru/iframe/%s' % video_id, video_id)
spruto = self._parse_json(
self._search_regex(
r'sprutoData\s*:\s*({.+?}),\r\n', webpage, 'spruto data'),
video_id)
return self._extract_spruto(spruto, video_id)
| gpl-3.0 |
guymakam/Kodi-Israel | plugin.video.ilten/resources/m3u8/model.py | 3 | 11066 | from collections import namedtuple
import os
import errno
import math
import urlparse
import re
from resources.m3u8 import parser
class M3U8(object):
'''
Represents a single M3U8 playlist. Should be instantiated with
the content as string.
Parameters:
`content`
the m3u8 content as string
`basepath`
all urls (key and segments url) will be updated with this basepath,
ex.:
basepath = "http://videoserver.com/hls"
/foo/bar/key.bin --> http://videoserver.com/hls/key.bin
http://vid.com/segment1.ts --> http://videoserver.com/hls/segment1.ts
can be passed as parameter or setted as an attribute to ``M3U8`` object.
`baseuri`
uri the playlist comes from. it is propagated to SegmentList and Key
ex.: http://example.com/path/to
Attributes:
`key`
it's a `Key` object, the EXT-X-KEY from m3u8. Or None
`segments`
a `SegmentList` object, represents the list of `Segment`s from this playlist
`is_variant`
Returns true if this M3U8 is a variant playlist, with links to
other M3U8s with different bitrates.
If true, `playlists` if a list of the playlists available.
`is_endlist`
Returns true if EXT-X-ENDLIST tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.8
`playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
Playlist objects
`target_duration`
Returns the EXT-X-TARGETDURATION as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.2
`media_sequence`
Returns the EXT-X-MEDIA-SEQUENCE as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.3
`version`
Return the EXT-X-VERSION as is
`allow_cache`
Return the EXT-X-ALLOW-CACHE as is
`files`
Returns an iterable with all files from playlist, in order. This includes
segments and key uri, if present.
`baseuri`
It is a property (getter and setter) used by
SegmentList and Key to have absolute URIs.
'''
simple_attributes = (
# obj attribute # parser attribute
('is_variant', 'is_variant'),
('is_endlist', 'is_endlist'),
('target_duration', 'targetduration'),
('media_sequence', 'media_sequence'),
('version', 'version'),
('allow_cache', 'allow_cache'),
)
def __init__(self, content=None, basepath=None, baseuri=None):
if content is not None:
self.data = parser.parse(content)
else:
self.data = {}
self._baseuri = baseuri
self._initialize_attributes()
self.basepath = basepath
def _initialize_attributes(self):
self.key = Key(baseuri=self.baseuri, **self.data['key']) if 'key' in self.data else None
self.segments = SegmentList([ Segment(baseuri=self.baseuri, **params)
for params in self.data.get('segments', []) ])
for attr, param in self.simple_attributes:
setattr(self, attr, self.data.get(param))
self.files = []
if self.key:
self.files.append(self.key.uri)
self.files.extend(self.segments.uri)
self.playlists = PlaylistList([ Playlist(baseuri=self.baseuri, **playlist)
for playlist in self.data.get('playlists', []) ])
def __unicode__(self):
return self.dumps()
@property
def baseuri(self):
return self._baseuri
@baseuri.setter
def baseuri(self, new_baseuri):
self._baseuri = new_baseuri
self.segments.baseuri = new_baseuri
@property
def basepath(self):
return self._basepath
@basepath.setter
def basepath(self, newbasepath):
self._basepath = newbasepath
self._update_basepath()
def _update_basepath(self):
if self._basepath is None:
return
if self.key:
self.key.basepath = self.basepath
self.segments.basepath = self.basepath
self.playlists.basepath = self.basepath
def add_playlist(self, playlist):
self.is_variant = True
self.playlists.append(playlist)
def dumps(self):
'''
Returns the current m3u8 as a string.
You could also use unicode(<this obj>) or str(<this obj>)
'''
output = ['#EXTM3U']
if self.media_sequence:
output.append('#EXT-X-MEDIA-SEQUENCE:' + str(self.media_sequence))
if self.allow_cache:
output.append('#EXT-X-ALLOW-CACHE:' + self.allow_cache.upper())
if self.version:
output.append('#EXT-X-VERSION:' + self.version)
if self.key:
output.append(str(self.key))
if self.target_duration:
output.append('#EXT-X-TARGETDURATION:' + int_or_float_to_string(self.target_duration))
if self.is_variant:
output.append(str(self.playlists))
output.append(str(self.segments))
if self.is_endlist:
output.append('#EXT-X-ENDLIST')
return '\n'.join(output)
def dump(self, filename):
'''
Saves the current m3u8 to ``filename``
'''
self._create_sub_directories(filename)
with open(filename, 'w') as fileobj:
fileobj.write(self.dumps())
def _create_sub_directories(self, filename):
basename = os.path.dirname(filename)
try:
os.makedirs(basename)
except OSError as error:
if error.errno != errno.EEXIST:
raise
class BasePathMixin(object):
@property
def absolute_uri(self):
if parser.is_url(self.uri):
return self.uri
else:
if self.baseuri is None:
raise ValueError('There can not be `absolute_uri` with no `baseuri` set')
return _urijoin(self.baseuri, self.uri)
@property
def basepath(self):
return os.path.dirname(self.uri)
@basepath.setter
def basepath(self, newbasepath):
self.uri = self.uri.replace(self.basepath, newbasepath)
class GroupedBasePathMixin(object):
def _set_baseuri(self, new_baseuri):
for item in self:
item.baseuri = new_baseuri
baseuri = property(None, _set_baseuri)
def _set_basepath(self, newbasepath):
for item in self:
item.basepath = newbasepath
basepath = property(None, _set_basepath)
class Segment(BasePathMixin):
'''
A video segment from a M3U8 playlist
`uri`
a string with the segment uri
`title`
title attribute from EXTINF parameter
`duration`
duration attribute from EXTINF paramter
`baseuri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
'''
def __init__(self, uri, baseuri, duration=None, title=None):
self.uri = uri
self.duration = duration
self.title = title
self.baseuri = baseuri
def __str__(self):
output = ['#EXTINF:%s,' % int_or_float_to_string(self.duration)]
if self.title:
output.append(quoted(self.title))
output.append('\n')
output.append(self.uri)
return ''.join(output)
class SegmentList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(segment) for segment in self]
return '\n'.join(output)
@property
def uri(self):
return [seg.uri for seg in self]
class Key(BasePathMixin):
'''
Key used to encrypt the segments in a m3u8 playlist (EXT-X-KEY)
`method`
is a string. ex.: "AES-128"
`uri`
is a string. ex:: "https://priv.example.com/key.php?r=52"
`baseuri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`iv`
initialization vector. a string representing a hexadecimal number. ex.: 0X12A
'''
def __init__(self, method, uri, baseuri, iv=None):
self.method = method
self.uri = uri
self.iv = iv
self.baseuri = baseuri
def __str__(self):
output = [
'METHOD=%s' % self.method,
'URI="%s"' % self.uri,
]
if self.iv:
output.append('IV=%s' % self.iv)
return '#EXT-X-KEY:' + ','.join(output)
class Playlist(BasePathMixin):
'''
Playlist object representing a link to a variant M3U8 with a specific bitrate.
Each `stream_info` attribute has: `program_id`, `bandwidth`, `resolution` and `codecs`
`resolution` is a tuple (h, v) of integers
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.10
'''
def __init__(self, uri, stream_info, baseuri):
self.uri = uri
self.baseuri = baseuri
resolution = stream_info.get('resolution')
if resolution != None:
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.stream_info = StreamInfo(bandwidth=stream_info['bandwidth'],
program_id=stream_info.get('program_id'),
resolution=resolution_pair,
codecs=stream_info.get('codecs'))
def __str__(self):
stream_inf = []
if self.stream_info.program_id:
stream_inf.append('PROGRAM-ID=' + self.stream_info.program_id)
if self.stream_info.bandwidth:
stream_inf.append('BANDWIDTH=' + self.stream_info.bandwidth)
if self.stream_info.resolution:
res = str(self.stream_info.resolution[0]) + 'x' + str(self.stream_info.resolution[1])
stream_inf.append('RESOLUTION=' + res)
if self.stream_info.codecs:
stream_inf.append('CODECS=' + quoted(self.stream_info.codecs))
return '#EXT-X-STREAM-INF:' + ','.join(stream_inf) + '\n' + self.uri
StreamInfo = namedtuple('StreamInfo', ['bandwidth', 'program_id', 'resolution', 'codecs'])
class PlaylistList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(playlist) for playlist in self]
return '\n'.join(output)
def denormalize_attribute(attribute):
return attribute.replace('_','-').upper()
def quoted(string):
return '"%s"' % string
def _urijoin(baseuri, path):
if parser.is_url(baseuri):
parsed_url = urlparse.urlparse(baseuri)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
new_path = os.path.normpath(parsed_url.path + '/' + path)
return urlparse.urljoin(prefix, new_path.strip('/'))
else:
return os.path.normpath(os.path.join(baseuri, path.strip('/')))
def int_or_float_to_string(number):
return str(int(number)) if number == math.floor(number) else str(number)
| gpl-2.0 |
kybriainfotech/iSocioCRM | addons/report/tests/test_reports.py | 385 | 2251 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
import openerp.tests
_logger = logging.getLogger(__name__)
@openerp.tests.common.at_install(False)
@openerp.tests.common.post_install(True)
class TestReports(openerp.tests.TransactionCase):
def test_reports(self):
registry, cr, uid = self.registry, self.cr, self.uid
r_model = registry('ir.actions.report.xml')
domain = [('report_type', 'like', 'qweb')]
for r in r_model.browse(cr, uid, r_model.search(cr, uid, domain)):
report_model = 'report.%s' % r.report_name
try:
registry(report_model)
except KeyError:
# Only test the generic reports here
_logger.info("testing report %s", r.report_name)
report_model = registry(r.model)
report_model_ids = report_model.search(cr, uid, [], limit=10)
if not report_model_ids:
_logger.info("no record found skipping report %s", r.report_name)
if not r.multi:
report_model_ids = report_model_ids[:1]
# Test report generation
registry('report').get_html(cr, uid, report_model_ids, r.report_name)
else:
continue
| agpl-3.0 |
direvus/ansible | lib/ansible/modules/network/ios/ios_logging.py | 48 | 12512 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_logging
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging
on Cisco Ios devices.
notes:
- Tested against IOS 15.6
options:
dest:
description:
- Destination of the logs.
choices: ['on', 'host', 'console', 'monitor', 'buffered']
name:
description:
- If value of C(dest) is I(file) it indicates file-name,
for I(user) it indicates username and for I(host) indicates
the host name to be notified.
size:
description:
- Size of buffer. The acceptable value is in range from 4096 to
4294967295 bytes.
default: 4096
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
state:
description:
- State of the logging configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: ios
"""
EXAMPLES = """
- name: configure host logging
ios_logging:
dest: host
name: 172.16.0.1
state: present
- name: remove host logging configuration
ios_logging:
dest: host
name: 172.16.0.1
state: absent
- name: configure console logging level and facility
ios_logging:
dest: console
facility: local7
level: debugging
state: present
- name: enable logging to all
ios_logging:
dest : on
- name: configure buffer size
ios_logging:
dest: buffered
size: 5000
- name: Configure logging using aggregate
ios_logging:
aggregate:
- { dest: console, level: notifications }
- { dest: buffered, size: 9000 }
- name: remove logging using aggregate
ios_logging:
aggregate:
- { dest: console, level: notifications }
- { dest: buffered, size: 9000 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- logging facility local7
- logging host 172.16.0.1
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec, validate_ip_address
from ansible.module_utils.network.ios.ios import get_config, load_config
from ansible.module_utils.network.ios.ios import get_capabilities
from ansible.module_utils.network.ios.ios import ios_argument_spec, check_args
def validate_size(value, module):
if value:
if not int(4096) <= int(value) <= int(4294967295):
module.fail_json(msg='size must be between 4096 and 4294967295')
else:
return value
def map_obj_to_commands(updates, module, os_version):
dest_group = ('console', 'monitor', 'buffered', 'on')
commands = list()
want, have = updates
for w in want:
dest = w['dest']
name = w['name']
size = w['size']
facility = w['facility']
level = w['level']
state = w['state']
del w['state']
if facility:
w['dest'] = 'facility'
if state == 'absent' and w in have:
if dest:
if dest == 'host':
if '12.' in os_version:
commands.append('no logging {0}'.format(name))
else:
commands.append('no logging host {0}'.format(name))
elif dest in dest_group:
commands.append('no logging {0}'.format(dest))
else:
module.fail_json(msg='dest must be among console, monitor, buffered, host, on')
if facility:
commands.append('no logging facility {0}'.format(facility))
if state == 'present' and w not in have:
if facility:
present = False
for entry in have:
if entry['dest'] == 'facility' and entry['facility'] == facility:
present = True
if not present:
commands.append('logging facility {0}'.format(facility))
if dest == 'host':
if '12.' in os_version:
commands.append('logging {0}'.format(name))
else:
commands.append('logging host {0}'.format(name))
elif dest == 'on':
commands.append('logging on')
elif dest == 'buffered' and size:
present = False
for entry in have:
if entry['dest'] == 'buffered' and entry['size'] == size and entry['level'] == level:
present = True
if not present:
if level and level != 'debugging':
commands.append('logging buffered {0} {1}'.format(size, level))
else:
commands.append('logging buffered {0}'.format(size))
else:
if dest:
dest_cmd = 'logging {0}'.format(dest)
if level:
dest_cmd += ' {0}'.format(level)
commands.append(dest_cmd)
return commands
def parse_facility(line, dest):
facility = None
if dest == 'facility':
match = re.search(r'logging facility (\S+)', line, re.M)
if match:
facility = match.group(1)
return facility
def parse_size(line, dest):
size = None
if dest == 'buffered':
match = re.search(r'logging buffered(?: (\d+))?(?: [a-z]+)?', line, re.M)
if match:
if match.group(1) is not None:
size = match.group(1)
else:
size = "4096"
return size
def parse_name(line, dest):
if dest == 'host':
match = re.search(r'logging host (\S+)', line, re.M)
if match:
name = match.group(1)
else:
name = None
return name
def parse_level(line, dest):
level_group = ('emergencies', 'alerts', 'critical', 'errors', 'warnings',
'notifications', 'informational', 'debugging')
if dest == 'host':
level = 'debugging'
else:
if dest == 'buffered':
match = re.search(r'logging buffered(?: \d+)?(?: ([a-z]+))?', line, re.M)
else:
match = re.search(r'logging {0} (\S+)'.format(dest), line, re.M)
if match and match.group(1) in level_group:
level = match.group(1)
else:
level = 'debugging'
return level
def map_config_to_obj(module):
obj = []
dest_group = ('console', 'host', 'monitor', 'buffered', 'on', 'facility')
data = get_config(module, flags=['| include logging'])
for line in data.split('\n'):
match = re.search(r'logging (\S+)', line, re.M)
if match:
if match.group(1) in dest_group:
dest = match.group(1)
obj.append({
'dest': dest,
'name': parse_name(line, dest),
'size': parse_size(line, dest),
'facility': parse_facility(line, dest),
'level': parse_level(line, dest)
})
elif validate_ip_address(match.group(1)):
dest = 'host'
obj.append({
'dest': dest,
'name': match.group(1),
'facility': parse_facility(line, dest),
'level': parse_level(line, dest)
})
else:
ip_match = re.search(r'\d+\.\d+\.\d+\.\d+', match.group(1), re.M)
if ip_match:
dest = 'host'
obj.append({
'dest': dest,
'name': match.group(1),
'facility': parse_facility(line, dest),
'level': parse_level(line, dest)
})
return obj
def map_params_to_obj(module, required_if=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_if(required_if, item)
d = item.copy()
if d['dest'] != 'host':
d['name'] = None
if d['dest'] == 'buffered':
if 'size' in d:
d['size'] = str(validate_size(d['size'], module))
elif 'size' not in d:
d['size'] = str(4096)
else:
pass
if d['dest'] != 'buffered':
d['size'] = None
obj.append(d)
else:
if module.params['dest'] != 'host':
module.params['name'] = None
if module.params['dest'] == 'buffered':
if not module.params['size']:
module.params['size'] = str(4096)
else:
module.params['size'] = None
if module.params['size'] is None:
obj.append({
'dest': module.params['dest'],
'name': module.params['name'],
'size': module.params['size'],
'facility': module.params['facility'],
'level': module.params['level'],
'state': module.params['state']
})
else:
obj.append({
'dest': module.params['dest'],
'name': module.params['name'],
'size': str(validate_size(module.params['size'], module)),
'facility': module.params['facility'],
'level': module.params['level'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
dest=dict(type='str', choices=['on', 'host', 'console', 'monitor', 'buffered']),
name=dict(type='str'),
size=dict(type='int'),
facility=dict(type='str'),
level=dict(type='str', default='debugging'),
state=dict(default='present', choices=['present', 'absent']),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(ios_argument_spec)
required_if = [('dest', 'host', ['name'])]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
device_info = get_capabilities(module)
os_version = device_info['device_info']['network_os_version']
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_if=required_if)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module, os_version)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/324_test_urllib2net.py | 6 | 13263 | #!/usr/bin/env python3
import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
try:
import ssl
except ImportError:
ssl = None
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:[email protected]")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# calling .close() on urllib2's response objects should close the
# underlying socket
response = _urlopen_with_retry("http://www.python.org/")
sock = response.fp
self.assertTrue(not sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.kernel.org/pub/linux/kernel/README',
'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
#'ftp://ftp.kernel.org/pub/leenox/kernel/test',
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "http://docs.python.org/2/glossary.html#glossary"
with support.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"http://docs.python.org/2/glossary.html#glossary")
def test_custom_headers(self):
url = "http://www.example.com"
with support.transient_internet(url):
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with support.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError as e:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with support.transient_internet(url):
debug(url)
try:
f = urlopen(url, req, TIMEOUT)
except EnvironmentError as err:
debug(err)
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
except urllib.error.URLError as err:
if isinstance(err[0], socket.timeout):
print("<timeout: %s>" % url, file=sys.stderr)
continue
else:
raise
else:
try:
with support.time_out, \
support.socket_peer_reset, \
support.ioerror_peer_reset:
buf = f.read()
debug("read %d bytes" % len(buf))
except socket.timeout:
print("<timeout: %s>" % url, file=sys.stderr)
f.close()
debug("******** next url coming up...")
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
self.addCleanup(cfh.clear_cache)
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
url = "http://www.python.org"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.raw._sock.gettimeout() is None)
def test_http_timeout(self):
url = "http://www.python.org"
with support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.addCleanup(u.close)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_default_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertTrue(socket.getdefaulttimeout() is None)
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(u.fp.fp.raw._sock.gettimeout() is None)
def test_ftp_timeout(self):
with support.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.addCleanup(u.close)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
@unittest.skipUnless(ssl, "requires SSL support")
class HTTPSTests(unittest.TestCase):
def test_sni(self):
self.skipTest("test disabled - test server needed")
# Checks that Server Name Indication works, if supported by the
# OpenSSL linked to.
# The ssl module itself doesn't have server-side support for SNI,
# so we rely on a third-party test site.
expect_sni = ssl.HAS_SNI
with support.transient_internet("XXX"):
u = urllib.request.urlopen("XXX")
contents = u.readall()
if expect_sni:
self.assertIn(b"Great", contents)
self.assertNotIn(b"Unfortunately", contents)
else:
self.assertNotIn(b"Great", contents)
self.assertIn(b"Unfortunately", contents)
def test_main():
support.requires("network")
support.run_unittest(AuthTests,
HTTPSTests,
OtherNetworkTests,
CloseSocketTest,
TimeoutTest,
)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
hbrunn/website | website_event_register_free/__init__.py | 71 | 1029 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import model
from . import controllers
| agpl-3.0 |
Designist/audacity | lib-src/lv2/lv2/plugins/eg-midigate.lv2/waflib/Build.py | 265 | 20971 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,errno,re,shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner,TaskGen,Utils,ConfigSet,Task,Logs,Options,Context,Errors
import waflib.Node
CACHE_DIR='c4che'
CACHE_SUFFIX='_cache.py'
INSTALL=1337
UNINSTALL=-1337
SAVED_ATTRS='root node_deps raw_deps task_sigs'.split()
CFG_FILES='cfg_files'
POST_AT_ONCE=0
POST_LAZY=1
POST_BOTH=2
class BuildContext(Context.Context):
'''executes the build'''
cmd='build'
variant=''
def __init__(self,**kw):
super(BuildContext,self).__init__(**kw)
self.is_install=0
self.top_dir=kw.get('top_dir',Context.top_dir)
self.run_dir=kw.get('run_dir',Context.run_dir)
self.post_mode=POST_AT_ONCE
self.out_dir=kw.get('out_dir',Context.out_dir)
self.cache_dir=kw.get('cache_dir',None)
if not self.cache_dir:
self.cache_dir=self.out_dir+os.sep+CACHE_DIR
self.all_envs={}
self.task_sigs={}
self.node_deps={}
self.raw_deps={}
self.cache_dir_contents={}
self.task_gen_cache_names={}
self.launch_dir=Context.launch_dir
self.jobs=Options.options.jobs
self.targets=Options.options.targets
self.keep=Options.options.keep
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.progress_bar=Options.options.progress_bar
self.deps_man=Utils.defaultdict(list)
self.current_group=0
self.groups=[]
self.group_names={}
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir,self.variant)
variant_dir=property(get_variant_dir,None)
def __call__(self,*k,**kw):
kw['bld']=self
ret=TaskGen.task_gen(*k,**kw)
self.task_gen_cache_names={}
self.add_to_group(ret,group=kw.get('group',None))
return ret
def rule(self,*k,**kw):
def f(rule):
ret=self(*k,**kw)
ret.rule=rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self,*k,**kw):
pass
def install_as(self,*k,**kw):
pass
def symlink_as(self,*k,**kw):
pass
def load_envs(self):
node=self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst=node.ant_glob('**/*%s'%CACHE_SUFFIX,quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name=x.path_from(node).replace(CACHE_SUFFIX,'').replace('\\','/')
env=ConfigSet.ConfigSet(x.abspath())
self.all_envs[name]=env
for f in env[CFG_FILES]:
newnode=self.root.find_resource(f)
try:
h=Utils.h_file(newnode.abspath())
except(IOError,AttributeError):
Logs.error('cannot find %r'%f)
h=Utils.SIG_NIL
newnode.sig=h
def init_dirs(self):
if not(os.path.isabs(self.top_dir)and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path=self.srcnode=self.root.find_dir(self.top_dir)
self.bldnode=self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'"%self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar==1:
c=len(self.returned_tasks)or 1
self.to_log(self.progress_line(c,c,Logs.colors.BLUE,Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'"%self.variant_dir)
self.post_build()
def restore(self):
try:
env=ConfigSet.ConfigSet(os.path.join(self.cache_dir,'build.config.py'))
except(IOError,OSError):
pass
else:
if env['version']<Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
dbfn=os.path.join(self.variant_dir,Context.DBFILE)
try:
data=Utils.readf(dbfn,'rb')
except(IOError,EOFError):
Logs.debug('build: Could not load the build cache %s (missing)'%dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
try:
data=cPickle.loads(data)
except Exception ,e:
Logs.debug('build: Could not pickle the build cache %s: %r'%(dbfn,e))
else:
for x in SAVED_ATTRS:
setattr(self,x,data[x])
finally:
waflib.Node.pickle_lock.release()
self.init_dirs()
def store(self):
data={}
for x in SAVED_ATTRS:
data[x]=getattr(self,x)
db=os.path.join(self.variant_dir,Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
x=cPickle.dumps(data,-1)
finally:
waflib.Node.pickle_lock.release()
Utils.writef(db+'.tmp',x,m='wb')
try:
st=os.stat(db)
os.remove(db)
if not Utils.is_win32:
os.chown(db+'.tmp',st.st_uid,st.st_gid)
except(AttributeError,OSError):
pass
os.rename(db+'.tmp',db)
def compile(self):
Logs.debug('build: compile()')
self.producer=Runner.Parallel(self,self.jobs)
self.producer.biter=self.get_build_iterator()
self.returned_tasks=[]
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self,tool,tooldir=None,funs=None):
if isinstance(tool,list):
for i in tool:self.setup(i,tooldir)
return
module=Context.load_tool(tool,tooldir)
if hasattr(module,"setup"):module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def add_manual_dependency(self,path,value):
if path is None:
raise ValueError('Invalid input')
if isinstance(path,waflib.Node.Node):
node=path
elif os.path.isabs(path):
node=self.root.find_resource(path)
else:
node=self.path.find_resource(path)
if isinstance(value,list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
try:
return self.p_ln
except AttributeError:
self.p_ln=self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self,env,vars_lst):
if not env.table:
env=env.parent
if not env:
return Utils.SIG_NIL
idx=str(id(env))+str(vars_lst)
try:
cache=self.cache_env
except AttributeError:
cache=self.cache_env={}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst=[env[a]for a in vars_lst]
ret=Utils.h_list(lst)
Logs.debug('envhash: %s %r',Utils.to_hex(ret),lst)
cache[idx]=ret
return ret
def get_tgen_by_name(self,name):
cache=self.task_gen_cache_names
if not cache:
for g in self.groups:
for tg in g:
try:
cache[tg.name]=tg
except AttributeError:
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r'%name)
def progress_line(self,state,total,col1,col2):
n=len(str(total))
Utils.rot_idx+=1
ind=Utils.rot_chr[Utils.rot_idx%4]
pc=(100.*state)/total
eta=str(self.timer)
fs="[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s]["%(n,n,ind)
left=fs%(state,total,col1,pc,col2)
right='][%s%s%s]'%(col1,eta,col2)
cols=Logs.get_term_cols()-len(left)-len(right)+2*len(col1)+2*len(col2)
if cols<7:cols=7
ratio=((cols*state)//total)-1
bar=('='*ratio+'>').ljust(cols)
msg=Utils.indicator%(left,bar,right)
return msg
def declare_chain(self,*k,**kw):
return TaskGen.declare_chain(*k,**kw)
def pre_build(self):
for m in getattr(self,'pre_funs',[]):
m(self)
def post_build(self):
for m in getattr(self,'post_funs',[]):
m(self)
def add_pre_fun(self,meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs=[meth]
def add_post_fun(self,meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs=[meth]
def get_group(self,x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self,tgen,group=None):
assert(isinstance(tgen,TaskGen.task_gen)or isinstance(tgen,Task.TaskBase))
tgen.bld=self
self.get_group(group).append(tgen)
def get_group_name(self,g):
if not isinstance(g,list):
g=self.groups[g]
for x in self.group_names:
if id(self.group_names[x])==id(g):
return x
return''
def get_group_idx(self,tg):
se=id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t)==se:
return i
return None
def add_group(self,name=None,move=True):
if name and name in self.group_names:
Logs.error('add_group: name %s already present'%name)
g=[]
self.group_names[name]=g
self.groups.append(g)
if move:
self.current_group=len(self.groups)-1
def set_group(self,idx):
if isinstance(idx,str):
g=self.group_names[idx]
for i in range(len(self.groups)):
if id(g)==id(self.groups[i]):
self.current_group=i
else:
self.current_group=idx
def total(self):
total=0
for group in self.groups:
for tg in group:
try:
total+=len(tg.tasks)
except AttributeError:
total+=1
return total
def get_targets(self):
to_post=[]
min_grp=0
for name in self.targets.split(','):
tg=self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist'%name)
m=self.get_group_idx(tg)
if m>min_grp:
min_grp=m
to_post=[tg]
elif m==min_grp:
to_post.append(tg)
return(min_grp,to_post)
def get_all_task_gen(self):
lst=[]
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets=='*':
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur<self._min_grp:
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln=self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln=self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)'%(ln.abspath(),self.srcnode.abspath()))
ln=self.srcnode
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self,idx):
tasks=[]
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError:
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur=0
if self.targets and self.targets!='*':
(self._min_grp,self._exact_tg)=self.get_targets()
global lazy_post
if self.post_mode!=POST_LAZY:
while self.cur<len(self.groups):
self.post_group()
self.cur+=1
self.cur=0
while self.cur<len(self.groups):
if self.post_mode!=POST_AT_ONCE:
self.post_group()
tasks=self.get_tasks_group(self.cur)
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks=tasks
self.cur+=1
if not tasks:
continue
yield tasks
while 1:
yield[]
class inst(Task.Task):
color='CYAN'
def uid(self):
lst=[self.dest,self.path]+self.source
return Utils.h_list(repr(lst))
def post(self):
buf=[]
for x in self.source:
if isinstance(x,waflib.Node.Node):
y=x
else:
y=self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)'%x)
idx=self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg,inst)and id(tg)!=id(self):
tg.post()
y=self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r'%(x,self.path))
buf.append(y)
self.inputs=buf
def runnable_status(self):
ret=super(inst,self).runnable_status()
if ret==Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
return''
def run(self):
return self.generator.exec_task()
def get_install_path(self,destdir=True):
dest=Utils.subst_vars(self.dest,self.env)
dest=dest.replace('/',os.sep)
if destdir and Options.options.destdir:
dest=os.path.join(Options.options.destdir,os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
destpath=self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r'%self.generator)
for x,y in zip(self.source,self.inputs):
if self.relative_trick:
destfile=os.path.join(destpath,y.path_from(self.path))
else:
destfile=os.path.join(destpath,y.name)
self.generator.bld.do_install(y.abspath(),destfile,self.chmod)
def exec_install_as(self):
destfile=self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(),destfile,self.chmod)
def exec_symlink_as(self):
destfile=self.get_install_path()
src=self.link
if self.relative_trick:
src=os.path.relpath(src,os.path.dirname(destfile))
self.generator.bld.do_link(src,destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd='install'
def __init__(self,**kw):
super(InstallContext,self).__init__(**kw)
self.uninstall=[]
self.is_install=INSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
d,_=os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r'%(src,tgt))
Utils.check_dir(d)
srclbl=src.replace(self.srcnode.abspath()+os.sep,'')
if not Options.options.force:
try:
st1=os.stat(tgt)
st2=os.stat(src)
except OSError:
pass
else:
if st1.st_mtime+2>=st2.st_mtime and st1.st_size==st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)'%(tgt,srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)'%(tgt,srclbl))
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src,tgt)
os.chmod(tgt,chmod)
except IOError:
try:
os.stat(src)
except(OSError,IOError):
Logs.error('File %r does not exist'%src)
raise Errors.WafError('Could not install the file %r'%tgt)
def do_link(self,src,tgt):
d,_=os.path.split(tgt)
Utils.check_dir(d)
link=False
if not os.path.islink(tgt):
link=True
elif os.readlink(tgt)!=src:
link=True
if link:
try:os.remove(tgt)
except OSError:pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)'%(tgt,src))
os.symlink(src,tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)'%(tgt,src))
def run_task_now(self,tsk,postpone):
tsk.post()
if not postpone:
if tsk.runnable_status()==Task.ASK_LATER:
raise self.WafError('cannot post the task %r'%tsk)
tsk.run()
def install_files(self,dest,files,env=None,chmod=Utils.O644,relative_trick=False,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
if isinstance(files,waflib.Node.Node):
tsk.source=[files]
else:
tsk.source=Utils.to_list(files)
tsk.dest=dest
tsk.exec_task=tsk.exec_install_files
tsk.relative_trick=relative_trick
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def install_as(self,dest,srcfile,env=None,chmod=Utils.O644,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
tsk.source=[srcfile]
tsk.dest=dest
tsk.exec_task=tsk.exec_install_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def symlink_as(self,dest,src,env=None,cwd=None,add=True,postpone=True,relative_trick=False):
if Utils.is_win32:
return
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.dest=dest
tsk.path=cwd or self.path
tsk.source=[]
tsk.link=src
tsk.relative_trick=relative_trick
tsk.exec_task=tsk.exec_symlink_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd='uninstall'
def __init__(self,**kw):
super(UninstallContext,self).__init__(**kw)
self.is_install=UNINSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError ,e:
if e.errno!=errno.ENOENT:
if not getattr(self,'uninstall_error',None):
self.uninstall_error=True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose>1:
Logs.warn('Could not remove %s (error code %r)'%(e.filename,e.errno))
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self,src,tgt):
try:
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
os.remove(tgt)
except OSError:
pass
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
try:
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task,'runnable_status_back',Task.Task.runnable_status)
setattr(Task.Task,'runnable_status',runnable_status)
super(UninstallContext,self).execute()
finally:
setattr(Task.Task,'runnable_status',Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd='clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode!=self.srcnode:
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f)for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*',excl='.lock* *conf_check_*/** config.log c4che/*',quiet=True):
if n in lst:
continue
n.delete()
self.root.children={}
for v in'node_deps task_sigs raw_deps'.split():
setattr(self,v,{})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd='list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
for g in self.groups:
for tg in g:
try:
f=tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Exception:
pass
lst=list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN',k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd='step'
def __init__(self,**kw):
super(StepContext,self).__init__(**kw)
self.files=Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets=None
if self.targets and self.targets!='*':
targets=self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f=tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher=self.get_matcher(pat)
for tg in g:
if isinstance(tg,Task.TaskBase):
lst=[tg]
else:
lst=tg.tasks
for tsk in lst:
do_exec=False
for node in getattr(tsk,'inputs',[]):
if matcher(node,output=False):
do_exec=True
break
for node in getattr(tsk,'outputs',[]):
if matcher(node,output=True):
do_exec=True
break
if do_exec:
ret=tsk.run()
Logs.info('%s -> exit %r'%(str(tsk),ret))
def get_matcher(self,pat):
inn=True
out=True
if pat.startswith('in:'):
out=False
pat=pat.replace('in:','')
elif pat.startswith('out:'):
inn=False
pat=pat.replace('out:','')
anode=self.root.find_node(pat)
pattern=None
if not anode:
if not pat.startswith('^'):
pat='^.+?%s'%pat
if not pat.endswith('$'):
pat='%s$'%pat
pattern=re.compile(pat)
def match(node,output):
if output==True and not out:
return False
if output==False and not inn:
return False
if anode:
return anode==node
else:
return pattern.match(node.abspath())
return match
BuildContext.store=Utils.nogc(BuildContext.store)
BuildContext.restore=Utils.nogc(BuildContext.restore)
| gpl-2.0 |
kaiyuanl/gem5 | src/cpu/kvm/X86KvmCPU.py | 54 | 2012 | # Copyright (c) 2013 Andreas Sandberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from BaseKvmCPU import BaseKvmCPU
class X86KvmCPU(BaseKvmCPU):
type = 'X86KvmCPU'
cxx_header = "cpu/kvm/x86_cpu.hh"
@classmethod
def export_methods(cls, code):
code('''
void dumpFpuRegs();
void dumpIntRegs();
void dumpSpecRegs();
void dumpXCRs();
void dumpXSave();
void dumpVCpuEvents();
''')
useXSave = Param.Bool(True, "Use XSave to synchronize FPU/SIMD registers")
| bsd-3-clause |
ArcEye/MK-Qt5 | nosetests/unittest_instbindings.py | 11 | 1279 | #!/usr/bin/env python
# verify the cython inst bindings
from nose import with_setup
from machinekit.nosetests.realtime import setup_module ,teardown_module
from machinekit.nosetests.support import fnear
from unittest import TestCase
import time,os,ConfigParser
from machinekit import rtapi,hal
class TestIinst(TestCase):
def setUp(self):
self.cfg = ConfigParser.ConfigParser()
self.cfg.read(os.getenv("MACHINEKIT_INI"))
self.uuid = self.cfg.get("MACHINEKIT", "MKUUID")
rt = rtapi.RTAPIcommand(uuid=self.uuid)
rt.loadrt("icomp");
rt.newinst("icomp","foo")
assert len(instances) == 1
rt.newinst("icomp","bar")
assert len(instances) == 2
rt.delinst("foo")
assert len(instances) == 1
c = hal.Component("icomp")
for i in instances:
assert c.id == i.owner_id
assert c.name == i.owner().name
assert "foo" in instances
assert "bar" in instances
assert instances["foo"].size > 0
assert instances["bar"].size > 0
try:
x = instances["nonexistent"]
raise "should not happen"
except NameError:
pass
(lambda s=__import__('signal'):
s.signal(s.SIGTERM, s.SIG_IGN))()
| lgpl-2.1 |
js0701/chromium-crosswalk | tools/telemetry/telemetry/value/list_of_scalar_values.py | 9 | 7147 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import numbers
import math
from telemetry import value as value_module
from telemetry.value import none_values
from telemetry.value import summarizable
def Variance(sample):
""" Compute the population variance.
Args:
sample: a list of numbers.
"""
k = len(sample) - 1 # Bessel correction
if k <= 0:
return 0
m = _Mean(sample)
return sum((x - m)**2 for x in sample)/k
def StandardDeviation(sample):
""" Compute standard deviation for a list of numbers.
Args:
sample: a list of numbers.
"""
return math.sqrt(Variance(sample))
def PooledStandardDeviation(list_of_samples, list_of_variances=None):
""" Compute standard deviation for a list of samples.
See: https://en.wikipedia.org/wiki/Pooled_variance for the formula.
Args:
list_of_samples: a list of lists, each is a list of numbers.
list_of_variances: a list of numbers, the i-th element is the variance of
the i-th sample in list_of_samples. If this is None, we use
Variance(sample) to get the variance of the i-th sample.
"""
pooled_variance = 0.0
total_degrees_of_freedom = 0
for i in xrange(len(list_of_samples)):
l = list_of_samples[i]
k = len(l) - 1 # Bessel correction
if k <= 0:
continue
variance = list_of_variances[i] if list_of_variances else Variance(l)
pooled_variance += k * variance
total_degrees_of_freedom += k
if total_degrees_of_freedom:
return (pooled_variance/total_degrees_of_freedom) ** 0.5
return 0
def _Mean(values):
return float(sum(values)) / len(values) if len(values) > 0 else 0.0
class ListOfScalarValues(summarizable.SummarizableValue):
""" ListOfScalarValues represents a list of numbers.
By default, std is the standard deviation of all numbers in the list. Std can
also be specified in the constructor if the numbers are not from the same
population.
"""
def __init__(self, page, name, units, values,
important=True, description=None,
tir_label=None, none_value_reason=None,
std=None, same_page_merge_policy=value_module.CONCATENATE,
improvement_direction=None):
super(ListOfScalarValues, self).__init__(page, name, units, important,
description, tir_label,
improvement_direction)
if values is not None:
assert isinstance(values, list)
assert len(values) > 0
assert all(isinstance(v, numbers.Number) for v in values)
assert std is None or isinstance(std, numbers.Number)
else:
assert std is None
none_values.ValidateNoneValueReason(values, none_value_reason)
self.values = values
self.none_value_reason = none_value_reason
self.same_page_merge_policy = same_page_merge_policy
if values is not None and std is None:
std = StandardDeviation(values)
assert std is None or std >= 0, (
'standard deviation cannot be negative: %s' % std)
self._std = std
@property
def std(self):
return self._std
@property
def variance(self):
return self._std ** 2
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
if self.same_page_merge_policy == value_module.CONCATENATE:
merge_policy = 'CONCATENATE'
else:
merge_policy = 'PICK_FIRST'
return ('ListOfScalarValues(%s, %s, %s, %s, '
'important=%s, description=%s, tir_label=%s, std=%s, '
'same_page_merge_policy=%s, improvement_direction=%s)') % (
page_name,
self.name,
self.units,
repr(self.values),
self.important,
self.description,
self.tir_label,
self.std,
merge_policy,
self.improvement_direction)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
return self.values
def GetRepresentativeNumber(self):
return _Mean(self.values)
def GetRepresentativeString(self):
return repr(self.values)
def IsMergableWith(self, that):
return (super(ListOfScalarValues, self).IsMergableWith(that) and
self.same_page_merge_policy == that.same_page_merge_policy)
@staticmethod
def GetJSONTypeName():
return 'list_of_scalar_values'
def AsDict(self):
d = super(ListOfScalarValues, self).AsDict()
d['values'] = self.values
d['std'] = self.std
if self.none_value_reason is not None:
d['none_value_reason'] = self.none_value_reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
kwargs['values'] = value_dict['values']
kwargs['std'] = value_dict['std']
if 'improvement_direction' in value_dict:
kwargs['improvement_direction'] = value_dict['improvement_direction']
if 'none_value_reason' in value_dict:
kwargs['none_value_reason'] = value_dict['none_value_reason']
if 'tir_label' in value_dict:
kwargs['tir_label'] = value_dict['tir_label']
return ListOfScalarValues(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
if v0.same_page_merge_policy == value_module.PICK_FIRST:
return ListOfScalarValues(
v0.page, v0.name, v0.units,
values[0].values,
important=v0.important,
same_page_merge_policy=v0.same_page_merge_policy,
none_value_reason=v0.none_value_reason,
improvement_direction=v0.improvement_direction)
assert v0.same_page_merge_policy == value_module.CONCATENATE
return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, None, v0.name, v0.tir_label)
@classmethod
def _MergeLikeValues(cls, values, page, name, tir_label):
v0 = values[0]
merged_values = []
list_of_samples = []
none_value_reason = None
pooled_std = None
for v in values:
if v.values is None:
merged_values = None
none_value_reason = none_values.MERGE_FAILURE_REASON
break
merged_values.extend(v.values)
list_of_samples.append(v.values)
if merged_values:
pooled_std = PooledStandardDeviation(
list_of_samples, list_of_variances=[v.variance for v in values])
return ListOfScalarValues(
page, name, v0.units,
merged_values,
important=v0.important,
tir_label=tir_label,
same_page_merge_policy=v0.same_page_merge_policy,
std=pooled_std,
none_value_reason=none_value_reason,
improvement_direction=v0.improvement_direction)
| bsd-3-clause |
Orav/kbengine | kbe/res/scripts/common/Lib/email/mime/text.py | 2 | 1408 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Class representing text/* type MIME documents."""
__all__ = ['MIMEText']
from email.mime.nonmultipart import MIMENonMultipart
class MIMEText(MIMENonMultipart):
"""Class for generating text/* type MIME documents."""
def __init__(self, _text, _subtype='plain', _charset=None):
"""Create a text/* type MIME document.
_text is the string for this message object.
_subtype is the MIME sub content type, defaulting to "plain".
_charset is the character set parameter added to the Content-Type
header. This defaults to "us-ascii". Note that as a side-effect, the
Content-Transfer-Encoding header will also be set.
"""
# If no _charset was specified, check to see if there are non-ascii
# characters present. If not, use 'us-ascii', otherwise use utf-8.
# XXX: This can be removed once #7304 is fixed.
if _charset is None:
try:
_text.encode('us-ascii')
_charset = 'us-ascii'
except UnicodeEncodeError:
_charset = 'utf-8'
MIMENonMultipart.__init__(self, 'text', _subtype,
**{'charset': _charset})
self.set_payload(_text, _charset)
| lgpl-3.0 |
cc272309126/panda3d | contrib/src/sceneeditor/propertyWindow.py | 8 | 70519 | #################################################################
# propertyWindow.py
# Written by Yi-Hong Lin, [email protected], 2004
#################################################################
from direct.tkwidgets.AppShell import *
from direct.showbase.TkGlobal import *
from seColorEntry import *
from direct.tkwidgets import Floater
from direct.tkwidgets import Dial
from direct.tkwidgets import Slider
from direct.tkwidgets import VectorWidgets
from pandac.PandaModules import *
from Tkinter import *
import Pmw
class propertyWindow(AppShell,Pmw.MegaWidget):
#################################################################
# propertyWindow(AppShell,Pmw.MegaWidget)
# This class will create a widow to show the object property and
# let user can change shoe of them.
#################################################################
appversion = '1.0'
appname = 'Property Window'
frameWidth = 400
frameHeight = 400
padx = 0
pady = 0
usecommandarea = 0
usestatusarea = 0
widgetsDict = {}
def __init__(self, target, type, info, parent = None, nodePath = render, **kw):
self.nodePath = target
self.name = target.getName()
self.type = type
self.info = info
# Initialise superclass
Pmw.MegaWidget.__init__(self, parent)
# Define the megawidget options.
optiondefs = (
('title', self.appname, None),
)
self.defineoptions(kw, optiondefs)
if parent == None:
self.parent = Toplevel()
AppShell.__init__(self, self.parent)
self.parent.resizable(False,False) ## Disable the ability to resize for this Window.
def appInit(self):
return
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
mainFrame = Frame(interior)
name_label = Label(mainFrame, text= self.name,font=('MSSansSerif', 15),
relief = RIDGE, borderwidth=5)
name_label.pack()
outFrame = Frame(mainFrame, relief = RIDGE, borderwidth=3)
self.contentWidge = self.createcomponent(
'scrolledFrame',
(), None,
Pmw.ScrolledFrame, (outFrame,),
hull_width = 200, hull_height = 300,
usehullsize = 1)
self.contentFrame = self.contentWidge.component('frame')
self.contentWidge.pack(fill = 'both', expand = 1,padx = 3, pady = 5)
outFrame.pack(fill = 'both', expand = 1)
# Creating different interface depands on object's type
if self.type == 'camera':
self.cameraInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneCamera)
elif self.type == 'Model':
self.modelInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneModel)
elif self.type == 'Actor':
self.modelInterface(self.contentFrame)
self.actorInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneActor)
pass
elif self.type == 'Light':
self.lightInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneLight)
pass
elif self.type == 'dummy':
self.dummyInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneDummy)
pass
elif self.type == 'collisionNode':
self.collisionInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, self.trackDataFromSceneCollision)
pass
elif self.type == 'Special':
# If user try to open the property window for node "SEditor"
# It will show the grid property.
self.gridInterface(self.contentFrame)
self.accept('forPorpertyWindow'+self.name, None)
pass
self.curveFrame = None
#### If nodePath has been binded with any curves
if self.info.has_key('curveList'):
self.createCurveFrame(self.contentFrame)
## Set all stuff done
mainFrame.pack(fill = 'both', expand = 1)
def createMenuBar(self):
# we don't need menu bar here.
self.menuBar.destroy()
def onDestroy(self, event):
self.ignore('forPorpertyWindow'+self.name)
messenger.send('PW_close', [self.name])
'''
If you have open any thing, please rewrite here!
'''
pass
def createEntryField(self, parent,text, value,
command, initialState, labelWidth = 12,
side = 'left', fill = X, expand = 0,
validate = None,
defaultButton = False, buttonText = 'Default',defaultFunction = None ):
#################################################################
# createEntryField(self, parent,text, value,
# command, initialState, labelWidth = 12,
# side = 'left', fill = X, expand = 0,
# validate = None,
# defaultButton = False, buttonText = 'Default',defaultFunction = None ):
# This function will create a Entry on the frame "parent"
# Also, if user has enabled the "defaultButton," it will create a button right after the entry.
#################################################################
frame = Frame(parent)
widget = Pmw.EntryField(frame, labelpos='w', label_text = text,
value = value, entry_font=('MSSansSerif', 10),label_font=('MSSansSerif', 10),
modifiedcommand=command, validate = validate,
label_width = labelWidth)
widget.configure(entry_state = initialState)
widget.pack(side=LEFT)
self.widgetsDict[text] = widget
if defaultButton and (defaultFunction!=None):
# create a button if they need.
widget = Button(frame, text=buttonText, font=('MSSansSerif', 10), command = defaultFunction)
widget.pack(side=LEFT, padx=3)
self.widgetsDict[text+'-'+'DefaultButton']=widget
frame.pack(side = side, fill = fill, expand = expand,pady=3)
def createPosEntry(self, contentFrame):
#################################################################
# createPosEntry(self, contentFrame)
# This function will create three entries for setting position for the objects.
# the entry type is Floater.
# And, it will set the call back function to setNodePathPosHprScale()
#################################################################
posInterior = Frame(contentFrame)
self.posX = self.createcomponent('posX', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = self.nodePath.getX(),
label_foreground = 'Red',
entry_width = 9)
self.posX['commandData'] = ['x']
self.posX['command'] = self.setNodePathPosHprScale
self.posX.pack(side=LEFT,expand=0,fill=X, padx=1)
self.posY = self.createcomponent('posY', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = self.nodePath.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.posY['commandData'] = ['y']
self.posY['command'] = self.setNodePathPosHprScale
self.posY.pack(side=LEFT, expand=0,fill=X, padx=1)
self.posZ = self.createcomponent('posZ', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = self.nodePath.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.posZ['commandData'] = ['z']
self.posZ['command'] = self.setNodePathPosHprScale
self.posZ.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
def createHprEntry(self, contentFrame):
#################################################################
# createHprEntry(self, contentFrame)
# This function will create three entries for setting orientation for the objects.
# the entry type is Floater.
# And, it will set the call back function to setNodePathPosHprScale()
#################################################################
hprInterior = Frame(contentFrame)
self.hprH = self.createcomponent('hprH', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'H', value = self.nodePath.getH(),
relief = FLAT,
label_foreground = 'blue',
entry_width = 9)
self.hprH['commandData'] = ['h']
self.hprH['command'] = self.setNodePathPosHprScale
self.hprH.pack(side = LEFT, expand=0,fill=X)
self.hprP = self.createcomponent('hprP', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'P', value = self.nodePath.getP(),
relief = FLAT,
label_foreground = 'red',
entry_width = 9)
self.hprP['commandData'] = ['p']
self.hprP['command'] = self.setNodePathPosHprScale
self.hprP.pack(side = LEFT, expand=0,fill=X)
self.hprR = self.createcomponent('hprR', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'R', value = self.nodePath.getR(),
relief = FLAT,
label_foreground = '#00A000',
entry_width = 9)
self.hprR['commandData'] = ['r']
self.hprR['command'] = self.setNodePathPosHprScale
self.hprR.pack(side = LEFT, expand=0,fill=X)
hprInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
def createScaleEntry(self, contentFrame):
#################################################################
# createScaleEntry(self, contentFrame)
# This function will create three entries for setting scale for the objects.
# the entry type is Floater.
# And, it will set the call back function to setNodePathPosHprScale()
#################################################################
scaleInterior = Frame(contentFrame)
self.scale = self.createcomponent('scale', (), None,
Floater.Floater, (scaleInterior,),
text = 'Scale',
relief = FLAT,
min = 0.0001, value = self.nodePath.getScale().getX(),
resetValue = 1.0,
label_foreground = 'Blue')
self.scale['commandData'] = ['s']
self.scale['command'] = self.setNodePathPosHprScale
self.scale.pack(side=LEFT,expand=0,fill=X)
scaleInterior.pack(side=TOP,expand=0,fill=X, padx=3, pady=3)
def createColorEntry(self, contentFrame):
#################################################################
# createColorEntry(self, contentFrame)
# This function will create three entries for setting color for the objects.
# the entry type is Floater.
# And, it will set the call back function to setNodeColorVec()
#################################################################
color = self.nodePath.getColor()
print color
self.nodeColor = VectorWidgets.ColorEntry(
contentFrame, text = 'Node Color', value=[color.getX()*255,
color.getY()*255,
color.getZ()*255,
color.getW()*255])
self.nodeColor['command'] = self.setNodeColorVec
self.nodeColor['resetValue'] = [255,255,255,255]
self.nodeColor.place(anchor=NW,y=235)
self.bind(self.nodeColor, 'Set nodePath color')
self.nodeColor.pack(side=TOP,expand=0,fill=X, padx=3, pady=3)
return
def setNodeColorVec(self, color):
#################################################################
# setNodeColorVec(self, color)
# This function will set the color of the object
#################################################################
self.nodePath.setColor(color[0]/255.0,
color[1]/255.0,
color[2]/255.0,
color[3]/255.0)
return
def setNodePathPosHprScale(self, data, axis):
#################################################################
# setNodePathPosHprScale(self, data, axis)
# This function will set the postion, orientation or scale of the object
# use the "axis" parameter to decide which property should be set.
#################################################################
if axis == 'x':
self.nodePath.setX(data)
elif axis == 'y':
self.nodePath.setY(data)
elif axis == 'z':
self.nodePath.setZ(data)
elif axis == 'h':
self.nodePath.setH(data)
elif axis == 'p':
self.nodePath.setP(data)
elif axis == 'r':
self.nodePath.setR(data)
elif axis == 's':
self.nodePath.setScale(data)
#### Curve property
def createCurveFrame(self, contentFrame):
#################################################################
# createCurveFrame(self, contentFrame)
# Draw the curve property frame
# This function will draw the property frame and content of curves
# pass the target frame as a variable
#################################################################
if self.curveFrame==None:
self.curveFrame = Frame(contentFrame)
group = Pmw.Group(self.curveFrame,
tag_text='Motion Path List for this Node',
tag_font=('MSSansSerif', 10))
innerFrame = group.interior()
n = 0
for curve in self.info['curveList']:
n += 1
self.createEntryField(innerFrame,'Curve %d:' %n,
value = curve.getCurve(0).getName(),
command = None,
initialState='disabled',
side = 'top',
defaultButton = True,
buttonText = 'delete',
defaultFunction = lambda a = n, b = self : b.deleteCurve(a))
group.pack(side = TOP, fill = X, expand = 0,pady=3, padx=3)
self.curveFrame.pack(side = TOP, fill = X, expand = 0,pady=3, padx=3)
return
def deleteCurve(self, number = 0):
#################################################################
# deleteCurve(self, number = 0)
# Call back function, will be called when user click on the "delete" button beside the curve name.
# This function will send the message to sceneEditor to remove the target curve
# and will set a callback function waitting the result.
#################################################################
widget = self.widgetsDict['Curve %d:' %number]
curveName = widget.getvalue()
self.accept('curveRemovedFromNode',self.redrawCurveProperty)
messenger.send('PW_removeCurveFromNode',[self.nodePath, curveName])
return
def redrawCurveProperty(self, nodePath, curveList):
#################################################################
# redrawCurveProperty(self, nodePath, curveList)
# Callback function, will be called once get the result from dataHolder.
# It will check the target nodePath first, then check the curve list is empty or not.
# If yes, then delete whole curve frame. If not, then renew the data and redraw the curve frame again.
#################################################################
self.name = self.nodePath.getName()
if self.name != nodePath.getName():
messenger.send('curveRemovedFromNode',[nodePath, curveList])
return
else:
self.ignore('curveRemovedFromNode')
if curveList!= None:
del self.info['curveList']
self.info['curveList'] = curveList
self.curveFrame.destroy()
del self.curveFrame
self.curveFrame = None
self.createCurveFrame(self.contentFrame)
else:
del self.info['curveList']
self.curveFrame.destroy()
del self.curveFrame
self.curveFrame = None
return
####
#### Anything about Camera will be here!
####
def cameraInterface(self, contentFrame):
#################################################################
# cameraInterface(self, interior, mainFrame)
# Create the interface for camera node.
#################################################################
## Type entry : unchageable
widget = self.createEntryField(contentFrame,'Type:',
value = self.type,
command = None,
initialState='disabled',
side = 'top')
## lens Type entry
widget = self.createEntryField(contentFrame, 'Lens Type:',
value = self.info['lensType'],
command = None,
initialState='disabled',
side = 'top')
## Pos
group = Pmw.Group(contentFrame,tag_text='Position',
tag_font=('MSSansSerif', 10))
self.createPosEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
## Orientation
group = Pmw.Group(contentFrame,tag_text='Orientation',
tag_font=('MSSansSerif', 10))
self.createHprEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
## near entry
group = Pmw.Group(contentFrame,tag_text='Lens Property',
tag_font=('MSSansSerif', 10))
lensFrame = group.interior()
widget = self.createEntryField(lensFrame, 'Near:',value = self.info['near'],
command = self.setCameraNear,
initialState='normal',
validate = Pmw.realvalidator,
side = 'top',
defaultButton = True,
defaultFunction = self.defaultCameraNear)
## far entry
widget = self.createEntryField(lensFrame, 'Far:',
value = self.info['far'],
command = self.setCameraFar,
initialState='normal',
side = 'top',
validate = Pmw.realvalidator,
defaultButton = True,
defaultFunction = self.defaultCameraFar)
## Hfov entry
widget = self.createEntryField(lensFrame, 'H.F.O.V.:',
value = self.info['hFov'],
command = self.setCameraFov,
validate = Pmw.realvalidator,
initialState='normal',
side = 'top',
defaultButton = True,
defaultFunction = self.defaultCameraHfov)
## Vfov entry
widget = self.createEntryField(lensFrame, 'V.F.O.V.:',
value = self.info['vFov'],
command = self.setCameraFov,
validate = Pmw.realvalidator,
initialState='normal',
side = 'top',
defaultButton = True,
defaultFunction = self.defaultCameraVfov)
## Film Size entry
frame = Frame(lensFrame)
widget = Label(frame, text = "Film Size:", font=('MSSansSerif', 10),width=12)
widget.pack(side=LEFT)
frame.pack(side = TOP, fill = X, expand = 0, pady=3)
frame = Frame(lensFrame)
widget = Pmw.EntryField(frame, labelpos='w', label_text = ' ',
value = self.info['FilmSize'].getX(),
entry_font=('MSSansSerif', 10),
label_font=('MSSansSerif', 10),
modifiedcommand=self.setCameraFilmSize, validate = Pmw.realvalidator,
entry_width = 8)
self.widgetsDict['FilmSizeX']=widget
widget.pack(side=LEFT, padx=3)
widget = Pmw.EntryField(frame, labelpos='w', label_text = ': ', value = self.info['FilmSize'].getY() ,
label_font=('MSSansSerif', 10),
entry_font=('MSSansSerif', 10),
modifiedcommand=self.setCameraFilmSize, validate = Pmw.realvalidator,
entry_width = 8)
self.widgetsDict['FilmSizeY']=widget
widget.pack(side=LEFT, padx=3)
widget = Button(frame, text='Default', font=('MSSansSerif', 10), command = self.defaultCameraFilmSize)
widget.pack(side=LEFT, padx=3)
self.widgetsDict['FilmSize'+'-'+'DefaultButton']=widget
frame.pack(side = TOP, fill = X, expand = 0,pady=0)
## Focal Length entry
widget = self.createEntryField(lensFrame, 'Focal Length:',
value = self.info['focalLength'],
command = self.setCameraFocalLength,
validate = Pmw.realvalidator,
initialState='normal',
side = 'top',
defaultButton = True,
defaultFunction = self.defaultCameraFocalLength)
group.pack(side = TOP, fill = X, expand = 0,pady=2)
def defaultCameraFar(self):
#################################################################
# defaultCameraFar(self)
# set the camera "Far" value back to default.
#################################################################
widget = self.widgetsDict['Far:']
widget.setvalue(base.cam.node().getLens().getDefaultFar())
return
def setCameraFar(self):
#################################################################
# setCameraFar(self)
# set the camera "Far" value to what now user has typed in the entry
#################################################################
if self.widgetsDict['Far:'].getvalue() != '':
value = float(self.widgetsDict['Far:'].getvalue())
else:
value = 0
camera.getChild(0).node().getLens().setFar(value)
return
def defaultCameraNear(self):
#################################################################
# defaultCameraNear(self)
# set the camera "Near" value back to default.
#################################################################
widget = self.widgetsDict['Near:']
widget.setvalue(base.cam.node().getLens().getDefaultNear())
return
def setCameraNear(self):
#################################################################
# setCameraNear(self)
# set the camera "Near" value to what now user has typed in the entry
#################################################################
if self.widgetsDict['Near:'].getvalue() != '':
value = float(self.widgetsDict['Near:'].getvalue())
else:
value = 0
camera.getChild(0).node().getLens().setNear(value)
return
def defaultCameraHfov(self):
#################################################################
# defaultCameraHfov(self)
# set the camera "Hfov" value back to default.
#################################################################
widget = self.widgetsDict['H.F.O.V.:']
widget.setvalue(45.0)
return
def setCameraFov(self):
#################################################################
# setCameraFov(self)
# set the camera "Fov" value to what now user has typed in the entry
#################################################################
if self.widgetsDict['H.F.O.V.:'].getvalue() != '':
value1 = float(self.widgetsDict['H.F.O.V.:'].getvalue())
else:
value1 = 0
if self.widgetsDict['V.F.O.V.:'].getvalue() != '':
value2 = float(self.widgetsDict['V.F.O.V.:'].getvalue())
else:
value2 = 0
camera.getChild(0).node().getLens().setFov(VBase2(value1,value2))
return
def defaultCameraVfov(self):
#################################################################
# defaultCameraVfov(self)
# set the camera "Vfov" value back to default.
#################################################################
widget = self.widgetsDict['V.F.O.V.:']
widget.setvalue(34.51587677)
return
def defaultCameraFocalLength(self):
#################################################################
# defaultCameraFocalLength(self)
# set the camera "Focal Length" value back to default.
#################################################################
widget = self.widgetsDict['Focal Length:']
widget.setvalue(1.20710682869)
return
def setCameraFocalLength(self):
#################################################################
# setCameraFocalLength(self)
# set the camera "Focal Length" value to what now user has typed in the entry
#################################################################
if self.widgetsDict['Focal Length:'].getvalue() != '':
value = float(self.widgetsDict['Focal Length:'].getvalue())
else:
value = 0
camera.getChild(0).node().getLens().setFocalLength(value)
camera.getChild(0).node().getLens().setFilmSize(VBase2(float(self.widgetsDict['FilmSizeX'].getvalue()),float(self.widgetsDict['FilmSizeY'].getvalue())))
return
def defaultCameraFilmSize(self):
#################################################################
# defaultCameraFilmSize(self)
# set the camera "Film Size" value back to default.
#################################################################
widget = self.widgetsDict['FilmSizeX']
widget.setvalue(1)
widget = self.widgetsDict['FilmSizeY']
widget.setvalue(0.75)
return
def setCameraFilmSize(self):
#################################################################
# setCameraFilmSize(self)
# set the camera "Film Size" value to what now user has typed in the entry
#################################################################
if self.widgetsDict['FilmSizeX'].getvalue() != '':
value1 = float(self.widgetsDict['FilmSizeX'].getvalue())
else:
value1 = 0
if self.widgetsDict['FilmSizeY'].getvalue() != '':
value2 = float(self.widgetsDict['FilmSizeY'].getvalue())
else:
value2 = 0
camera.getChild(0).node().getLens().setFilmSize(VBase2(value1,value2))
return
####
#### Anything about Model & Actor will be here!
####
def modelInterface(self, contentFrame):
#################################################################
# modelInterface(self, contentFrame)
# Create the basic interface for ModelRoot Type Node
#################################################################
widget = self.createEntryField(contentFrame,'Type:',
value = self.type,
command = None,
initialState='disabled',
side = 'top')
widget = self.createEntryField(contentFrame,'Model File:',
value = self.info['filePath'].getFullpath(),
command = None,
initialState='disabled',
side = 'top',
defaultButton = False,
buttonText = 'Change',
defaultFunction = None)
group = Pmw.Group(contentFrame,tag_text='Position',
tag_font=('MSSansSerif', 10))
self.createPosEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
group = Pmw.Group(contentFrame,tag_text='Orientation',
tag_font=('MSSansSerif', 10))
self.createHprEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
self.createScaleEntry(contentFrame)
group = Pmw.Group(contentFrame,tag_text='Color',
tag_font=('MSSansSerif', 10))
frame = group.interior()
self.createColorEntry(frame)
self.varAlpha = IntVar()
self.varAlpha.set(self.nodePath.hasTransparency())
checkButton = Checkbutton(frame, text='Enable Alpha',
variable=self.varAlpha, command=self.toggleAlpha)
checkButton.pack(side=RIGHT,pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
return
def toggleAlpha(self):
#################################################################
# toggleAlpha(self)
# This funtion will toggle the objects alpha value
# And, it will also reset the "Bin" to
# "fixed" if user enable the alpha for this object.
#################################################################
if self.nodePath.hasTransparency():
self.nodePath.clearTransparency()
self.nodePath.setBin("default", 0)
else:
self.nodePath.setTransparency(True)
self.nodePath.setBin("fixed", 1)
return
def actorInterface(self, contentFrame):
#################################################################
# actorInterface(self, contentFrame)
# Create the basic interface for Actor Type Node
#################################################################
self.animFrame = None
animeDict = self.info['animDict']
if len(animeDict)==0:
return
self.animFrame = Frame(contentFrame)
group = Pmw.Group(self.animFrame,tag_text='Animations',
tag_font=('MSSansSerif', 10))
innerFrame = group.interior()
for name in animeDict:
self.createEntryField(innerFrame, name,
value = animeDict[name],
command = None,
initialState='disabled',
side = 'top',
defaultButton = True,
buttonText = 'Remove',
defaultFunction = lambda a = name, b = self : b.deleteAnimation(a))
group.pack(side=TOP,fill = X, expand = 0, pady=3)
self.animFrame.pack(side=TOP,fill = X, expand = 0, pady=3)
return
def deleteAnimation(self, anim):
#################################################################
# deleteAnimation(self, anim)
# This function will delete the animation named "anim" in this actor
# But, not directly removed be this function.
# This function will send out a message to notice dataHolder to remove this animation
#################################################################
print anim
widget = self.widgetsDict[anim]
self.accept('animRemovedFromNode',self.redrawAnimProperty)
messenger.send('PW_removeAnimFromNode',[self.name, anim])
return
def redrawAnimProperty(self, nodePath, animDict):
#################################################################
# redrawCurveProperty(self, nodePath, curveList)
# Callback function, will be called once get the result from dataHolder.
# It will check the target nodePath first, then check the curve list is empty or not.
# If yes, then delete whole curve frame. If not, then renew the data and redraw the curve frame again.
#################################################################
self.name = self.nodePath.getName()
if self.name != nodePath.getName():
messenger.send('animRemovedFromNode',[nodePath, animDict])
return
else:
self.ignore('animRemovedFromNode')
if len(animDict)!= 0:
del self.info['animDict']
self.info['animDict'] = animDict
self.animFrame.destroy()
del self.animFrame
self.animFrame = None
self.actorInterface(self.contentFrame)
else:
del self.info['animDict']
self.animFrame.destroy()
del self.animFrame
self.animFrame = None
return
####
#### Anything about Light will be here!
####
def lightInterface(self, contentFrame):
#################################################################
# lightInterface(self, contentFrame)
# Create the basic interface for light Type Node
#################################################################
widget = self.createEntryField(contentFrame,'Type:',
value = self.nodePath.node().getType().getName(),
command = None,
initialState='disabled',
side = 'top')
self.lightNode = self.info['lightNode']
lightingGroup = Pmw.Group(contentFrame,tag_pyclass=None)
frame = lightingGroup.interior()
self.lightColor = seColorEntry(
frame, text = 'Light Color', label_font=('MSSansSerif', 10),
value=[self.lightNode.lightcolor.getX()*255, self.lightNode.lightcolor.getY()*255,self.lightNode.lightcolor.getZ()*255,0])
self.lightColor['command'] = self.setLightingColorVec
self.lightColor['resetValue'] = [0.3*255,0.3*255,0.3*255,0]
self.lightColor.pack(side=TOP, fill=X,expand=1, padx = 2, pady =2)
self.bind(self.lightColor, 'Set light color')
self.varActive = IntVar()
self.varActive.set(self.lightNode.active)
checkButton = Checkbutton(frame, text='Enable This Light',
variable=self.varActive, command=self.toggleLight)
checkButton.pack(side=RIGHT,pady=3)
lightingGroup.pack(side=TOP, fill = X, expand =1)
# Directional light controls
if self.lightNode.type == 'directional':
lightingGroup = Pmw.Group(contentFrame,tag_pyclass=None)
directionalPage = lightingGroup.interior()
self.dSpecularColor = seColorEntry(
directionalPage, text = 'Specular Color', label_font=('MSSansSerif', 10),value = [self.lightNode.specularColor.getX()*255,self.lightNode.specularColor.getY()*255,self.lightNode.specularColor.getZ()*255,0])
self.dSpecularColor['command'] = self.setSpecularColor
self.dSpecularColor.pack(fill = X, expand = 1)
self.bind(self.dSpecularColor,
'Set directional light specular color')
self.dPosition = VectorWidgets.Vector3Entry(
directionalPage, text = 'Position', label_font=('MSSansSerif', 10),value = [self.lightNode.getPosition().getX(),self.lightNode.getPosition().getY(),self.lightNode.getPosition().getZ()])
self.dPosition['command'] = self.setPosition
self.dPosition['resetValue'] = [0,0,0,0]
self.dPosition.pack(fill = X, expand = 1)
self.bind(self.dPosition, 'Set directional light position')
self.dOrientation = VectorWidgets.Vector3Entry(
directionalPage, text = 'Orientation', label_font=('MSSansSerif', 10),
value = [self.lightNode.getOrientation().getX(),self.lightNode.getOrientation().getY(),self.lightNode.getOrientation().getZ(),0])
self.dOrientation['command'] = self.setOrientation
self.dOrientation['resetValue'] = [0,0,0,0]
self.dOrientation.pack(fill = X, expand = 1)
self.bind(self.dOrientation, 'Set directional light orientation')
lightingGroup.pack(side=TOP, fill = X, expand =1)
elif self.lightNode.type == 'point':
# Point light controls
lightingGroup = Pmw.Group(contentFrame,tag_pyclass=None)
pointPage = lightingGroup.interior()
self.pSpecularColor = seColorEntry(
pointPage, text = 'Specular Color', label_font=('MSSansSerif', 10),
value = [self.lightNode.specularColor.getX(),self.lightNode.specularColor.getY(),self.lightNode.specularColor.getZ(),0])
self.pSpecularColor['command'] = self.setSpecularColor
self.pSpecularColor.pack(fill = X, expand = 1)
self.bind(self.pSpecularColor,
'Set point light specular color')
self.pPosition = VectorWidgets.Vector3Entry(
pointPage, text = 'Position', label_font=('MSSansSerif', 10),
value = [self.lightNode.getPosition().getX(),self.lightNode.getPosition().getY(),self.lightNode.getPosition().getZ(),0])
self.pPosition['command'] = self.setPosition
self.pPosition['resetValue'] = [0,0,0,0]
self.pPosition.pack(fill = X, expand = 1)
self.bind(self.pPosition, 'Set point light position')
self.pConstantAttenuation = Slider.Slider(
pointPage,
text = 'Constant Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.constant)
self.pConstantAttenuation['command'] = self.setConstantAttenuation
self.pConstantAttenuation.pack(fill = X, expand = 1)
self.bind(self.pConstantAttenuation,
'Set point light constant attenuation')
self.pLinearAttenuation = Slider.Slider(
pointPage,
text = 'Linear Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.linear)
self.pLinearAttenuation['command'] = self.setLinearAttenuation
self.pLinearAttenuation.pack(fill = X, expand = 1)
self.bind(self.pLinearAttenuation,
'Set point light linear attenuation')
self.pQuadraticAttenuation = Slider.Slider(
pointPage,
text = 'Quadratic Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.quadratic)
self.pQuadraticAttenuation['command'] = self.setQuadraticAttenuation
self.pQuadraticAttenuation.pack(fill = X, expand = 1)
self.bind(self.pQuadraticAttenuation,
'Set point light quadratic attenuation')
lightingGroup.pack(side=TOP, fill = X, expand =1)
elif self.lightNode.type == 'spot':
# Spot light controls
lightingGroup = Pmw.Group(contentFrame,tag_pyclass=None)
spotPage = lightingGroup.interior()
self.sSpecularColor = seColorEntry(
spotPage, text = 'Specular Color', label_font=('MSSansSerif', 10),
value = [self.lightNode.specularColor.getX()*255,self.lightNode.specularColor.getY()*255,self.lightNode.specularColor.getZ()*255,0])
self.sSpecularColor['command'] = self.setSpecularColor
self.sSpecularColor.pack(fill = X, expand = 1)
self.bind(self.sSpecularColor,
'Set spot light specular color')
self.sConstantAttenuation = Slider.Slider(
spotPage,
text = 'Constant Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.constant)
self.sConstantAttenuation['command'] = self.setConstantAttenuation
self.sConstantAttenuation.pack(fill = X, expand = 1)
self.bind(self.sConstantAttenuation,
'Set spot light constant attenuation')
self.sLinearAttenuation = Slider.Slider(
spotPage,
text = 'Linear Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.linear)
self.sLinearAttenuation['command'] = self.setLinearAttenuation
self.sLinearAttenuation.pack(fill = X, expand = 1)
self.bind(self.sLinearAttenuation,
'Set spot light linear attenuation')
self.sQuadraticAttenuation = Slider.Slider(
spotPage,
text = 'Quadratic Attenuation', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.quadratic)
self.sQuadraticAttenuation['command'] = self.setQuadraticAttenuation
self.sQuadraticAttenuation.pack(fill = X, expand = 1)
self.bind(self.sQuadraticAttenuation,
'Set spot light quadratic attenuation')
self.sExponent = Slider.Slider(
spotPage,
text = 'Exponent', label_font=('MSSansSerif', 10),
max = 1.0,
value = self.lightNode.exponent)
self.sExponent['command'] = self.setExponent
self.sExponent.pack(fill = X, expand = 1)
self.bind(self.sExponent,
'Set spot light exponent')
lightingGroup.pack(side=TOP, fill = X, expand =1)
return
def setLightingColorVec(self,color):
if self.lightNode==None:
return
self.lightNode.setColor(VBase4((color[0]/255),(color[1]/255),(color[2]/255),1))
return
def setSpecularColor(self,color):
if self.lightNode==None:
return
self.lightNode.setSpecColor(VBase4((color[0]/255),(color[1]/255),(color[2]/255),1))
return
def setPosition(self,position):
if self.lightNode==None:
return
self.lightNode.setPosition(Point3(position[0],position[1],position[2]))
return
def setOrientation(self, orient):
if self.lightNode==None:
return
self.lightNode.setOrientation(Vec3(orient[0],orient[1],orient[2]))
return
def setConstantAttenuation(self, value):
self.lightNode.setConstantAttenuation(value)
return
def setLinearAttenuation(self, value):
self.lightNode.setLinearAttenuation(value)
return
def setQuadraticAttenuation(self, value):
self.lightNode.setQuadraticAttenuation(value)
return
def setExponent(self, value):
self.lightNode.setExponent(value)
return
def toggleLight(self):
messenger.send('PW_toggleLight',[self.lightNode])
return
####
#### Anything about Dummy will be here!
####
def dummyInterface(self, contentFrame):
#################################################################
# dummyInterface(self, contentFrame)
# Create the basic interface for dummy Type Node
#################################################################
'''dummyInterface(self, contentFrame)
Create the basic interface for dummy Node
'''
widget = self.createEntryField(contentFrame,'Type:',
value = 'Dummy Nodepath',
command = None,
initialState='disabled',
side = 'top')
group = Pmw.Group(contentFrame,tag_text='Position',
tag_font=('MSSansSerif', 10))
self.createPosEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
group = Pmw.Group(contentFrame,tag_text='Orientation',
tag_font=('MSSansSerif', 10))
self.createHprEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
self.createScaleEntry(contentFrame)
group = Pmw.Group(contentFrame,tag_text='Color',
tag_font=('MSSansSerif', 10))
frame = group.interior()
self.createColorEntry(frame)
self.varAlpha = IntVar()
self.varAlpha.set(self.nodePath.hasTransparency())
checkButton = Checkbutton(frame, text='Enable Alpha',
variable=self.varAlpha, command=self.toggleAlpha)
checkButton.pack(side=RIGHT,pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
return
#########
####### This will be called when user try to open property window for SEditor Node
#########
def gridInterface(self, contentFrame):
#################################################################
# gridInterface(self, contentFrame)
# Create the basic interface for grid (Which is stolen from directGrid)
#################################################################
group = Pmw.Group(contentFrame,tag_text='Grid Property',
tag_font=('MSSansSerif', 10))
group.pack(side=TOP,fill = X, expand = 0, padx = 3, pady=3)
gridPage = group.interior()
self.xyzSnap = BooleanVar()
self.xyzSnapButton = Checkbutton(
gridPage,
text = 'XYZ Snap',
anchor = 'w', justify = LEFT,
variable = self.xyzSnap,
command = self.toggleXyzSnap)
self.xyzSnapButton.pack(fill = X, expand = 0, pady=3)
self.hprSnap = BooleanVar()
self.hprSnapButton = Checkbutton(
gridPage,
text = 'HPR Snap',
anchor = 'w', justify = LEFT,
variable = self.hprSnap,
command = self.toggleHprSnap)
self.hprSnapButton.pack(fill = X, expand = 0, pady=3)
self.xyzSnap.set(SEditor.grid.getXyzSnap())
self.hprSnap.set(SEditor.grid.getHprSnap())
self.gridSpacing = Floater.Floater(
gridPage,
text = 'Grid Spacing',
min = 0.1,
value = SEditor.grid.getGridSpacing())
self.gridSpacing['command'] = SEditor.grid.setGridSpacing
self.gridSpacing.pack(fill = X, expand = 0, pady=3)
self.gridSize = Floater.Floater(
gridPage,
text = 'Grid Size',
min = 1.0,
value = SEditor.grid.getGridSize())
self.gridSize['command'] = SEditor.grid.setGridSize
self.gridSize.pack(fill = X, expand = 0, pady=3)
self.gridSnapAngle = Dial.AngleDial(
gridPage,
text = 'Snap Angle',
style = 'mini',
value = SEditor.grid.getSnapAngle())
self.gridSnapAngle['command'] = SEditor.grid.setSnapAngle
self.gridSnapAngle.pack(fill = X, expand = 0, pady=3)
return
def toggleXyzSnap(self):
SEditor.grid.setXyzSnap(self.xyzSnap.get())
return
def toggleHprSnap(self):
SEditor.grid.setHprSnap(self.hprSnap.get())
return
###### Collision Section!!!!
def collisionInterface(self, contentFrame):
#################################################################
# collisionInterface(self, contentFrame)
# Create the basic interface for CollisionNode Type Node
#################################################################
collisionNode = self.info['collisionNode']
self.collisionObj = collisionNode.node().getSolid(0)
widget = self.createEntryField(contentFrame,'Node Type:',
value = self.type,
command = None,
initialState='disabled',
side = 'top')
cType = self.collisionObj.getType().getName()
widget = self.createEntryField(contentFrame,'Object Type:',
value = cType,
command = None,
initialState='disabled',
side = 'top')
group = Pmw.Group(contentFrame,tag_text='Position',
tag_font=('MSSansSerif', 10))
self.createPosEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
group = Pmw.Group(contentFrame,tag_text='Orientation',
tag_font=('MSSansSerif', 10))
self.createHprEntry(group.interior())
group.pack(side=TOP,fill = X, expand = 0, pady=3)
self.createScaleEntry(contentFrame)
collisionGroup = Pmw.Group(contentFrame,tag_text='Collision Object Properties',
tag_font=('MSSansSerif', 10))
cObjFrame = collisionGroup.interior()
### Generate different Interface for each different kinds of Collision Objects
### Yeah, yeah. I know this part of code looks so ugly...
if cType == 'CollisionSphere':
centerPos = self.collisionObj.getCenter()
radius = self.collisionObj.getRadius()
group = Pmw.Group(cObjFrame,tag_text='Origin',
tag_font=('MSSansSerif', 10))
posInterior = Frame(group.interior())
self.cPosX = self.createcomponent('originX', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = centerPos.getX(),
label_foreground = 'Red',
entry_width = 9)
self.cPosX['commandData'] = ['sphere-o']
self.cPosX['command'] = self.setCollisionPosHprScale
self.cPosX.pack(side=LEFT,expand=0,fill=X, padx=1)
self.cPosY = self.createcomponent('originY', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = centerPos.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.cPosY['commandData'] = ['sphere-o']
self.cPosY['command'] = self.setCollisionPosHprScale
self.cPosY.pack(side=LEFT, expand=0,fill=X, padx=1)
self.cPosZ = self.createcomponent('originZ', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = centerPos.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.cPosZ['commandData'] = ['sphere-o']
self.cPosZ['command'] = self.setCollisionPosHprScale
self.cPosZ.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
scaleInterior = Frame(cObjFrame)
self.scaleS = self.createcomponent('radius', (), None,
Floater.Floater, (scaleInterior,),
text = 'Radius',
relief = FLAT,
min = 0.0001, value = radius,
resetValue = 1.0,
label_foreground = 'Blue')
self.scaleS['commandData'] = ['sphere-radius']
self.scaleS['command'] = self.setCollisionPosHprScale
self.scaleS.pack(side=LEFT,expand=0,fill=X)
scaleInterior.pack(side=TOP,expand=0,fill=X, padx=3, pady=3)
pass
elif cType == 'CollisionPolygon':
frame = Frame(cObjFrame)
label = Label(frame, text= "Sorry!",font=('MSSansSerif', 10),
borderwidth=5)
label.pack(side=LEFT)
frame.pack(side=TOP, fill=X, expand=True)
frame = Frame(cObjFrame)
label = Label(frame, text= "There is no way to change",font=('MSSansSerif', 10),
borderwidth=5)
label.pack(side=LEFT)
frame.pack(side=TOP, fill=X, expand=True)
frame = Frame(cObjFrame)
label = Label(frame, text= "the basic properties of Collision Polygon!",font=('MSSansSerif', 10),
borderwidth=5)
label.pack(side=LEFT)
frame.pack(side=TOP, fill=X, expand=True)
frame = Frame(cObjFrame)
label = Label(frame, text= "If you really need to change, recreate one...",font=('MSSansSerif', 10),
borderwidth=5)
label.pack(side=LEFT)
frame.pack(side=TOP, fill=X, expand=True)
pass
elif cType == 'CollisionSegment':
pointA = self.collisionObj.getPointA()
pointB = self.collisionObj.getPointB()
group = Pmw.Group(cObjFrame,tag_text='Point A',
tag_font=('MSSansSerif', 10))
posInterior = Frame(group.interior())
self.cPosX = self.createcomponent('pointA-X', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = pointA.getX(),
label_foreground = 'Red',
entry_width = 9)
self.cPosX['commandData'] = ['segment-A']
self.cPosX['command'] = self.setCollisionPosHprScale
self.cPosX.pack(side=LEFT,expand=0,fill=X, padx=1)
self.cPosY = self.createcomponent('pointA-Y', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = pointA.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.cPosY['commandData'] = ['segment-A']
self.cPosY['command'] = self.setCollisionPosHprScale
self.cPosY.pack(side=LEFT, expand=0,fill=X, padx=1)
self.cPosZ = self.createcomponent('pointA-Z', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = pointA.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.cPosZ['commandData'] = ['segment-A']
self.cPosZ['command'] = self.setCollisionPosHprScale
self.cPosZ.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
group = Pmw.Group(cObjFrame,tag_text='Point B',
tag_font=('MSSansSerif', 10))
posInterior = Frame(group.interior())
self.cPosXB = self.createcomponent('pointB-X', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = pointB.getX(),
label_foreground = 'Red',
entry_width = 9)
self.cPosXB['commandData'] = ['segment-B']
self.cPosXB['command'] = self.setCollisionPosHprScale
self.cPosXB.pack(side=LEFT,expand=0,fill=X, padx=1)
self.cPosYB = self.createcomponent('pointB-Y', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = pointB.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.cPosYB['commandData'] = ['segment-B']
self.cPosYB['command'] = self.setCollisionPosHprScale
self.cPosYB.pack(side=LEFT, expand=0,fill=X, padx=1)
self.cPosZB = self.createcomponent('pointB-Z', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = pointB.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.cPosZB['commandData'] = ['segment-B']
self.cPosZB['command'] = self.setCollisionPosHprScale
self.cPosZB.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
pass
elif cType == 'CollisionRay':
origin = self.collisionObj.getOrigin()
direction = self.collisionObj.getDirection()
group = Pmw.Group(cObjFrame,tag_text='Origin Point',
tag_font=('MSSansSerif', 10))
posInterior = Frame(group.interior())
self.cPosX = self.createcomponent('origin-X', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = origin.getX(),
label_foreground = 'Red',
entry_width = 9)
self.cPosX['commandData'] = ['ray-A']
self.cPosX['command'] = self.setCollisionPosHprScale
self.cPosX.pack(side=LEFT,expand=0,fill=X, padx=1)
self.cPosY = self.createcomponent('origin-Y', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = origin.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.cPosY['commandData'] = ['ray-A']
self.cPosY['command'] = self.setCollisionPosHprScale
self.cPosY.pack(side=LEFT, expand=0,fill=X, padx=1)
self.cPosZ = self.createcomponent('origin-Z', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = origin.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.cPosZ['commandData'] = ['ray-A']
self.cPosZ['command'] = self.setCollisionPosHprScale
self.cPosZ.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
group = Pmw.Group(cObjFrame,tag_text='Direction',
tag_font=('MSSansSerif', 10))
posInterior = Frame(group.interior())
self.cPosXB = self.createcomponent('direction-X', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = direction.getX(),
label_foreground = 'Red',
entry_width = 9)
self.cPosXB['commandData'] = ['ray-B']
self.cPosXB['command'] = self.setCollisionPosHprScale
self.cPosXB.pack(side=LEFT,expand=0,fill=X, padx=1)
self.cPosYB = self.createcomponent('direction-Y', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = direction.getY(),
label_foreground = '#00A000',
entry_width = 9)
self.cPosYB['commandData'] = ['ray-B']
self.cPosYB['command'] = self.setCollisionPosHprScale
self.cPosYB.pack(side=LEFT, expand=0,fill=X, padx=1)
self.cPosZB = self.createcomponent('direction-Z', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = direction.getZ(),
label_foreground = 'Blue',
entry_width = 9)
self.cPosZB['commandData'] = ['ray-B']
self.cPosZB['command'] = self.setCollisionPosHprScale
self.cPosZB.pack(side=LEFT, expand=0,fill=X, padx=1)
posInterior.pack(side=TOP, expand=0,fill=X, padx=3, pady=3)
group.pack(side=TOP,fill = X, expand = 0, pady=3)
pass
collisionGroup.pack(side=TOP,fill = X, expand = 0, pady=3)
return
def setCollisionPosHprScale(self, data, dataType):
#################################################################
# setCollisionPosHprScale(self, data, dataType)
# Well, the reason that we didn't use the same one with other nodePath
# is that each tyoe of collsion objects has its unique properties and way to set value.
# So, they have to be separated from other nodePath
#################################################################
if dataType == 'sphere-o':
origin = Point3(float(self.cPosX._entry.get()),
float(self.cPosY._entry.get()),
float(self.cPosZ._entry.get()))
self.collisionObj.setCenter(origin)
elif dataType == 'sphere-radius':
self.collisionObj.setRadius(data)
elif dataType == 'segment-A':
pointA = Point3(float(self.cPosX._entry.get()),
float(self.cPosY._entry.get()),
float(self.cPosZ._entry.get()))
self.collisionObj.setPointA(pointA)
elif dataType == 'segment-B':
pointB = Point3(float(self.cPosXB._entry.get()),
float(self.cPosYB._entry.get()),
float(self.cPosZB._entry.get()))
self.collisionObj.setPointB(pointB)
elif dataType == 'ray-A':
pointA = Point3(float(self.cPosX._entry.get()),
float(self.cPosY._entry.get()),
float(self.cPosZ._entry.get()))
self.collisionObj.setOrigin(pointA)
elif dataType == 'ray-B':
pointB = Vec3(float(self.cPosXB._entry.get()),
float(self.cPosYB._entry.get()),
float(self.cPosZB._entry.get()))
self.collisionObj.setDirection(pointB)
return
#################################################################
#################################################################
# Functions below are all call back function
# They will be called when user has manipulated its node on the screen
# The message itself is sent by a task called monitorSelectedNode in the sceneEditor.
#################################################################
#################################################################
def trackDataFromSceneCamera(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
self.posX.set(pos.getX())
self.posY.set(pos.getY())
self.posZ.set(pos.getZ())
self.hprH.set(hpr.getX())
self.hprP.set(hpr.getY())
self.hprR.set(hpr.getZ())
return
def trackDataFromSceneModel(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
self.posX.set(pos.getX())
self.posY.set(pos.getY())
self.posZ.set(pos.getZ())
self.hprH.set(hpr.getX())
self.hprP.set(hpr.getY())
self.hprR.set(hpr.getZ())
self.scale.set(scale.getX())
return
def trackDataFromSceneActor(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
self.posX.set(pos.getX())
self.posY.set(pos.getY())
self.posZ.set(pos.getZ())
self.hprH.set(hpr.getX())
self.hprP.set(hpr.getY())
self.hprR.set(hpr.getZ())
self.scale.set(scale.getX())
return
def trackDataFromSceneLight(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
if self.lightNode.type == 'directional':
self.dPosition.set([pos.getX(),pos.getY(),pos.getZ()])
self.dOrientation.set([hpr.getX(),hpr.getY(),hpr.getZ()])
pass
elif self.lightNode.type == 'point':
self.pPosition.set([pos.getX(),pos.getY(),pos.getZ()])
pass
return
def trackDataFromSceneDummy(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
self.posX.set(pos.getX())
self.posY.set(pos.getY())
self.posZ.set(pos.getZ())
self.hprH.set(hpr.getX())
self.hprP.set(hpr.getY())
self.hprR.set(hpr.getZ())
self.scale.set(scale.getX())
return
def trackDataFromSceneCollision(self, pos=Point3(0,0,0), hpr=Vec3(0,0,0), scale=Point3(0,0,0)):
self.posX.set(pos.getX())
self.posY.set(pos.getY())
self.posZ.set(pos.getZ())
self.hprH.set(hpr.getX())
self.hprP.set(hpr.getY())
self.hprR.set(hpr.getZ())
self.scale.set(scale.getX())
return
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/rsa/_version133.py | 82 | 11764 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deprecated version of the RSA module
.. deprecated:: 2.0
This submodule is deprecated and will be completely removed as of version 4.0.
Module for calculating large primes, and RSA encryption, decryption,
signing and verification. Includes generating public and private keys.
WARNING: this code implements the mathematics of RSA. It is not suitable for
real-world secure cryptography purposes. It has not been reviewed by a security
expert. It does not include padding of data. There are many ways in which the
output of this module, when used without any modification, can be sucessfully
attacked.
"""
__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
__date__ = "2010-02-05"
__version__ = '1.3.3'
# NOTE: Python's modulo can return negative numbers. We compensate for
# this behaviour using the abs() function
try:
import cPickle as pickle
except ImportError:
import pickle
from pickle import dumps, loads
import base64
import math
import os
import random
import sys
import types
import zlib
from rsa._compat import byte
# Display a warning that this insecure version is imported.
import warnings
warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
% __name__)
warnings.warn('This submodule is deprecated and will be completely removed as of version 4.0.',
DeprecationWarning)
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(42, 6)
6
"""
if p<q: return gcd(q, p)
if q == 0: return p
return gcd(q, abs(p%q))
def bytes2int(bytes):
"""Converts a list of bytes or a string to an integer
"""
if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
raise TypeError("You must pass a string or a list")
# Convert byte stream to integer
integer = 0
for byte in bytes:
integer *= 256
if type(byte) is types.StringType: byte = ord(byte)
integer += byte
return integer
def int2bytes(number):
"""Converts a number to a string of bytes
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (byte(number & 0xFF), string)
number /= 256
return string
def fast_exponentiation(a, p, n):
"""Calculates r = a^p mod n
"""
result = a % n
remainders = []
while p != 1:
remainders.append(p & 1)
p = p >> 1
while remainders:
rem = remainders.pop()
result = ((a ** rem) * result ** 2) % n
return result
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits rounded up
to whole bytes"""
nbytes = ceil(nbits/8.)
randomdata = os.urandom(nbytes)
return bytes2int(randomdata)
def ceil(x):
"""ceil(x) -> int(math.ceil(x))"""
return int(math.ceil(x))
def randint(minvalue, maxvalue):
"""Returns a random integer x with minvalue <= x <= maxvalue"""
# Safety - get a lot of random data even if the range is fairly
# small
min_nbits = 32
# The range of the random numbers we need to generate
range = maxvalue - minvalue
# Which is this number of bytes
rangebytes = ceil(math.log(range, 2) / 8.)
# Convert to bits, but make sure it's always at least min_nbits*2
rangebits = max(rangebytes * 8, min_nbits * 2)
# Take a random number of bits between min_nbits and rangebits
nbits = random.randint(min_nbits, rangebits)
return (read_random_int(nbits) % range) + minvalue
def fermat_little_theorem(p):
"""Returns 1 if p may be prime, and something else if p definitely
is not prime"""
a = randint(1, p-1)
return fast_exponentiation(a, p-1, p)
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b)
"""
if a % b == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
b, a = a, b % a
else:
if ((b ** 2 - 1) >> 3) & 1:
result = -result
a = a >> 1
return result
def jacobi_witness(x, n):
"""Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
"""
j = jacobi(x, n) % n
f = fast_exponentiation(x, (n-1)/2, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number if composite, and True if it's
probably prime.
"""
q = 0.5 # Property of the jacobi_witness function
# t = int(math.ceil(k / math.log(1/q, 2)))
t = ceil(k / math.log(1/q, 2))
for i in range(t+1):
x = randint(1, n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
"""
"""
if not fermat_little_theorem(number) == 1:
# Not prime, according to Fermat's little theorem
return False
"""
if randomized_primality_testing(number, 5):
# Prime, according to Jacobi
return True
# Not prime
return False
def getprime(nbits):
"""Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
other words: nbits is rounded up to whole bytes.
"""
nbytes = int(math.ceil(nbits/8.))
while True:
integer = read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer): break
# Retry if not prime
return integer
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
"""
d = gcd(a, b)
return (d == 1)
def find_p_q(nbits):
"""Returns a tuple of two different primes of nbits bits"""
p = getprime(nbits)
while True:
q = getprime(nbits)
if not q == p: break
return (p, q)
def extended_euclid_gcd(a, b):
"""Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
"""
if b == 0:
return (a, 1, 0)
q = abs(a % b)
r = long(a / b)
(d, k, l) = extended_euclid_gcd(b, q)
return (d, l, k - l*r)
# Main function: calculate encryption and decryption keys
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key for p and q, and
returns them as a tuple (e, d)"""
n = p * q
phi_n = (p-1) * (q-1)
while True:
# Make sure e has enough bits so we ensure "wrapping" through
# modulo n
e = getprime(max(8, nbits/2))
if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
(d, i, j) = extended_euclid_gcd(e, phi_n)
if not d == 1:
raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
if not (e * i) % phi_n == 1:
raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
return (e, i)
def gen_keys(nbits):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
"""
while True:
(p, q) = find_p_q(nbits)
(e, d) = calculate_keys(p, q, nbits)
# For some reason, d is sometimes negative. We don't know how
# to fix it (yet), so we keep trying until everything is shiny
if d > 0: break
return (p, q, e, d)
def gen_pubpriv_keys(nbits):
"""Generates public and private keys, and returns them as (pub,
priv).
The public key consists of a dict {e: ..., , n: ....). The private
key consists of a dict {d: ...., p: ...., q: ....).
"""
(p, q, e, d) = gen_keys(nbits)
return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo
n"""
if type(message) is types.IntType:
return encrypt_int(long(message), ekey, n)
if not type(message) is types.LongType:
raise TypeError("You must pass a long or an int")
if message > 0 and \
math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
raise OverflowError("The message is too long")
return fast_exponentiation(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
return encrypt_int(cyphertext, dkey, n)
def sign_int(message, dkey, n):
"""Signs 'message' using key 'dkey', working modulo n"""
return decrypt_int(message, dkey, n)
def verify_int(signed, ekey, n):
"""verifies 'signed' using key 'ekey', working modulo n"""
return encrypt_int(signed, ekey, n)
def picklechops(chops):
"""Pickles and base64encodes it's argument chops"""
value = zlib.compress(dumps(chops))
encoded = base64.encodestring(value)
return encoded.strip()
def unpicklechops(string):
"""base64decodes and unpickes it's argument string into chops"""
return loads(zlib.decompress(base64.decodestring(string)))
def chopstring(message, key, n, funcref):
"""Splits 'message' into chops that are at most as long as n,
converts these into integers, and calls funcref(integer, key, n)
for each chop.
Used by 'encrypt' and 'sign'.
"""
msglen = len(message)
mbits = msglen * 8
nbits = int(math.floor(math.log(n, 2)))
nbytes = nbits / 8
blocks = msglen / nbytes
if msglen % nbytes > 0:
blocks += 1
cypher = []
for bindex in range(blocks):
offset = bindex * nbytes
block = message[offset:offset+nbytes]
value = bytes2int(block)
cypher.append(funcref(value, key, n))
return picklechops(cypher)
def gluechops(chops, key, n, funcref):
"""Glues chops back together into a string. calls
funcref(integer, key, n) for each chop.
Used by 'decrypt' and 'verify'.
"""
message = ""
chops = unpicklechops(chops)
for cpart in chops:
mpart = funcref(cpart, key, n)
message += int2bytes(mpart)
return message
def encrypt(message, key):
"""Encrypts a string 'message' with the public key 'key'"""
return chopstring(message, key['e'], key['n'], encrypt_int)
def sign(message, key):
"""Signs a string 'message' with the private key 'key'"""
return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
def decrypt(cypher, key):
"""Decrypts a cypher with the private key 'key'"""
return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
def verify(cypher, key):
"""Verifies a cypher with the public key 'key'"""
return gluechops(cypher, key['e'], key['n'], encrypt_int)
# Do doctest if we're not imported
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
| gpl-3.0 |
Shrhawk/edx-platform | common/lib/xmodule/xmodule/tests/test_editing_module.py | 181 | 2640 | """ Tests for editing descriptors"""
import unittest
import os
import logging
from mock import Mock
from pkg_resources import resource_string
from opaque_keys.edx.locations import Location
from xmodule.editing_module import TabsEditingDescriptor
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
log = logging.getLogger(__name__)
class TabsEditingDescriptorTestCase(unittest.TestCase):
""" Testing TabsEditingDescriptor"""
def setUp(self):
super(TabsEditingDescriptorTestCase, self).setUp()
system = get_test_descriptor_system()
system.render_template = Mock(return_value="<div>Test Template HTML</div>")
self.tabs = [
{
'name': "Test_css",
'template': "tabs/codemirror-edit.html",
'current': True,
'css': {
'scss': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.scss'
)
],
'css': [
resource_string(
__name__,
'../../test_files/test_tabseditingdescriptor.css'
)
]
}
},
{
'name': "Subtitles",
'template': "video/subtitles.html",
},
{
'name': "Settings",
'template': "tabs/video-metadata-edit-tab.html"
}
]
TabsEditingDescriptor.tabs = self.tabs
self.descriptor = system.construct_xblock_from_class(
TabsEditingDescriptor,
scope_ids=ScopeIds(None, None, None, Location('org', 'course', 'run', 'category', 'name', 'revision')),
field_data=DictFieldData({}),
)
def test_get_css(self):
"""test get_css"""
css = self.descriptor.get_css()
test_files_dir = os.path.dirname(__file__).replace('xmodule/tests', 'test_files')
test_css_file = os.path.join(test_files_dir, 'test_tabseditingdescriptor.scss')
with open(test_css_file) as new_css:
added_css = new_css.read()
self.assertEqual(css['scss'].pop(), added_css)
self.assertEqual(css['css'].pop(), added_css)
def test_get_context(self):
""""test get_context"""
rendered_context = self.descriptor.get_context()
self.assertListEqual(rendered_context['tabs'], self.tabs)
| agpl-3.0 |
robertmattmueller/sdac-compiler | sympy/combinatorics/tests/test_subsets.py | 120 | 1918 | from sympy.combinatorics import Subset
def test_subset():
a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])
assert a.next_binary() == Subset(['b'], ['a', 'b', 'c', 'd'])
assert a.prev_binary() == Subset(['c'], ['a', 'b', 'c', 'd'])
assert a.next_lexicographic() == Subset(['d'], ['a', 'b', 'c', 'd'])
assert a.prev_lexicographic() == Subset(['c'], ['a', 'b', 'c', 'd'])
assert a.next_gray() == Subset(['c'], ['a', 'b', 'c', 'd'])
assert a.prev_gray() == Subset(['d'], ['a', 'b', 'c', 'd'])
assert a.rank_binary == 3
assert a.rank_lexicographic == 14
assert a.rank_gray == 2
assert a.cardinality == 16
a = Subset([2, 5, 7], [1, 2, 3, 4, 5, 6, 7])
assert a.next_binary() == Subset([2, 5, 6], [1, 2, 3, 4, 5, 6, 7])
assert a.prev_binary() == Subset([2, 5], [1, 2, 3, 4, 5, 6, 7])
assert a.next_lexicographic() == Subset([2, 6], [1, 2, 3, 4, 5, 6, 7])
assert a.prev_lexicographic() == Subset([2, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7])
assert a.next_gray() == Subset([2, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7])
assert a.prev_gray() == Subset([2, 5], [1, 2, 3, 4, 5, 6, 7])
assert a.rank_binary == 37
assert a.rank_lexicographic == 93
assert a.rank_gray == 57
assert a.cardinality == 128
superset = ['a', 'b', 'c', 'd']
assert Subset.unrank_binary(4, superset).rank_binary == 4
assert Subset.unrank_gray(10, superset).rank_gray == 10
superset = [1, 2, 3, 4, 5, 6, 7, 8, 9]
assert Subset.unrank_binary(33, superset).rank_binary == 33
assert Subset.unrank_gray(25, superset).rank_gray == 25
a = Subset([], ['a', 'b', 'c', 'd'])
i = 1
while a.subset != Subset(['d'], ['a', 'b', 'c', 'd']).subset:
a = a.next_lexicographic()
i = i + 1
assert i == 16
i = 1
while a.subset != Subset([], ['a', 'b', 'c', 'd']).subset:
a = a.prev_lexicographic()
i = i + 1
assert i == 16
| gpl-3.0 |
michael-lazar/rtv | rtv/packages/praw/helpers.py | 2 | 19086 | # This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions.
The functions here provide functionality that is often needed by programs using
PRAW, but which isn't part of reddit's API.
"""
from __future__ import unicode_literals
import six
import sys
import time
from collections import deque
from functools import partial
from timeit import default_timer as timer
from .errors import HTTPException, PRAWException
from operator import attrgetter
BACKOFF_START = 4 # Minimum number of seconds to sleep during errors
KEEP_ITEMS = 128 # On each iteration only remember the first # items
# for conversion between broken reddit timestamps and unix timestamps
REDDIT_TIMESTAMP_OFFSET = 28800
def comment_stream(reddit_session, subreddit, limit=None, verbosity=1):
"""Indefinitely yield new comments from the provided subreddit.
Comments are yielded from oldest to newest.
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the comment stream for all comments made to
reddit.
:param limit: The maximum number of comments to fetch in a single
iteration. When None, fetch all available comments (reddit limits this
to 1000 (or multiple of 1000 for multi-subreddits). If this number is
too small, comments may be missed.
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of comments
processed and provide the short-term number of comments processed per
second; >= 2: output when additional delays are added in order to avoid
subsequent unexpected http errors. >= 3: output debugging information
regarding the comment stream. (Default: 1)
"""
get_function = partial(reddit_session.get_comments,
six.text_type(subreddit))
return _stream_generator(get_function, limit, verbosity)
def submission_stream(reddit_session, subreddit, limit=None, verbosity=1):
"""Indefinitely yield new submissions from the provided subreddit.
Submissions are yielded from oldest to newest.
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the submissions stream for all submissions
made to reddit.
:param limit: The maximum number of submissions to fetch in a single
iteration. When None, fetch all available submissions (reddit limits
this to 1000 (or multiple of 1000 for multi-subreddits). If this number
is too small, submissions may be missed. Since there isn't a limit to
the number of submissions that can be retrieved from r/all, the limit
will be set to 1000 when limit is None.
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of submissions
processed and provide the short-term number of submissions processed
per second; >= 2: output when additional delays are added in order to
avoid subsequent unexpected http errors. >= 3: output debugging
information regarding the submission stream. (Default: 1)
"""
if six.text_type(subreddit).lower() == "all":
if limit is None:
limit = 1000
if not hasattr(subreddit, 'reddit_session'):
subreddit = reddit_session.get_subreddit(subreddit)
return _stream_generator(subreddit.get_new, limit, verbosity)
def valid_redditors(redditors, sub):
"""Return a verified list of valid Redditor instances.
:param redditors: A list comprised of Redditor instances and/or strings
that are to be verified as actual redditor accounts.
:param sub: A Subreddit instance that the authenticated account has
flair changing permission on.
Note: Flair will be unset for all valid redditors in `redditors` on the
subreddit `sub`. A valid redditor is defined as a redditor that is
registered on reddit.
"""
simplified = list(set(six.text_type(x).lower() for x in redditors))
return [sub.reddit_session.get_redditor(simplified[i], fetch=False)
for (i, resp) in enumerate(sub.set_flair_csv(
({'user': x, 'flair_text': x} for x in simplified)))
if resp['ok']]
def submissions_between(reddit_session,
subreddit,
lowest_timestamp=None,
highest_timestamp=None,
newest_first=True,
extra_cloudsearch_fields=None,
verbosity=1):
"""Yield submissions between two timestamps.
If both ``highest_timestamp`` and ``lowest_timestamp`` are unspecified,
yields all submissions in the ``subreddit``.
Submissions are yielded from newest to oldest(like in the "new" queue).
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the submissions stream for all submissions
made to reddit.
:param lowest_timestamp: The lower bound for ``created_utc`` atributed of
submissions.
(Default: subreddit's created_utc or 0 when subreddit == "all").
:param highest_timestamp: The upper bound for ``created_utc`` attribute
of submissions. (Default: current unix time)
NOTE: both highest_timestamp and lowest_timestamp are proper
unix timestamps(just like ``created_utc`` attributes)
:param newest_first: If set to true, yields submissions
from newest to oldest. Otherwise yields submissions
from oldest to newest
:param extra_cloudsearch_fields: Allows extra filtering of results by
parameters like author, self. Full list is available here:
https://www.reddit.com/wiki/search
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of submissions
processed; >= 2: output debugging information regarding
the search queries. (Default: 1)
"""
def debug(msg, level):
if verbosity >= level:
sys.stderr.write(msg + '\n')
def format_query_field(k, v):
if k in ["nsfw", "self"]:
# even though documentation lists "no" and "yes"
# as possible values, in reality they don't work
if v not in [0, 1, "0", "1"]:
raise PRAWException("Invalid value for the extra"
"field {}. Only '0' and '1' are"
"valid values.".format(k))
return "{}:{}".format(k, v)
return "{}:'{}'".format(k, v)
if extra_cloudsearch_fields is None:
extra_cloudsearch_fields = {}
extra_query_part = " ".join(
[format_query_field(k, v) for (k, v)
in sorted(extra_cloudsearch_fields.items())]
)
if highest_timestamp is None:
highest_timestamp = int(time.time()) + REDDIT_TIMESTAMP_OFFSET
else:
highest_timestamp = int(highest_timestamp) + REDDIT_TIMESTAMP_OFFSET
if lowest_timestamp is not None:
lowest_timestamp = int(lowest_timestamp) + REDDIT_TIMESTAMP_OFFSET
elif not isinstance(subreddit, six.string_types):
lowest_timestamp = int(subreddit.created)
elif subreddit not in ("all", "contrib", "mod", "friend"):
lowest_timestamp = int(reddit_session.get_subreddit(subreddit).created)
else:
lowest_timestamp = 0
original_highest_timestamp = highest_timestamp
original_lowest_timestamp = lowest_timestamp
# When making timestamp:X..Y queries, reddit misses submissions
# inside X..Y range, but they can be found inside Y..Z range
# It is not clear what is the value of Z should be, but it seems
# like the difference is usually about ~1 hour or less
# To be sure, let's set the workaround offset to 2 hours
out_of_order_submissions_workaround_offset = 7200
highest_timestamp += out_of_order_submissions_workaround_offset
lowest_timestamp -= out_of_order_submissions_workaround_offset
# Those parameters work ok, but there may be a better set of parameters
window_size = 60 * 60
search_limit = 100
min_search_results_in_window = 50
window_adjustment_ratio = 1.25
backoff = BACKOFF_START
processed_submissions = 0
prev_win_increased = False
prev_win_decreased = False
while highest_timestamp >= lowest_timestamp:
try:
if newest_first:
t1 = max(highest_timestamp - window_size, lowest_timestamp)
t2 = highest_timestamp
else:
t1 = lowest_timestamp
t2 = min(lowest_timestamp + window_size, highest_timestamp)
search_query = 'timestamp:{}..{}'.format(t1, t2)
if extra_query_part:
search_query = "(and {} {})".format(search_query,
extra_query_part)
debug(search_query, 3)
search_results = list(reddit_session.search(search_query,
subreddit=subreddit,
limit=search_limit,
syntax='cloudsearch',
sort='new'))
debug("Received {0} search results for query {1}"
.format(len(search_results), search_query),
2)
backoff = BACKOFF_START
except HTTPException as exc:
debug("{0}. Sleeping for {1} seconds".format(exc, backoff), 2)
time.sleep(backoff)
backoff *= 2
continue
if len(search_results) >= search_limit:
power = 2 if prev_win_decreased else 1
window_size = int(window_size / window_adjustment_ratio**power)
prev_win_decreased = True
debug("Decreasing window size to {0} seconds".format(window_size),
2)
# Since it is possible that there are more submissions
# in the current window, we have to re-do the request
# with reduced window
continue
else:
prev_win_decreased = False
search_results = [s for s in search_results
if original_lowest_timestamp <= s.created and
s.created <= original_highest_timestamp]
for submission in sorted(search_results,
key=attrgetter('created_utc', 'id'),
reverse=newest_first):
yield submission
processed_submissions += len(search_results)
debug('Total processed submissions: {}'
.format(processed_submissions), 1)
if newest_first:
highest_timestamp -= (window_size + 1)
else:
lowest_timestamp += (window_size + 1)
if len(search_results) < min_search_results_in_window:
power = 2 if prev_win_increased else 1
window_size = int(window_size * window_adjustment_ratio**power)
prev_win_increased = True
debug("Increasing window size to {0} seconds"
.format(window_size), 2)
else:
prev_win_increased = False
def _stream_generator(get_function, limit=None, verbosity=1):
def debug(msg, level):
if verbosity >= level:
sys.stderr.write(msg + '\n')
def b36_id(item):
return int(item.id, 36)
seen = BoundedSet(KEEP_ITEMS * 16)
before = None
count = 0 # Count is incremented to bypass the cache
processed = 0
backoff = BACKOFF_START
while True:
items = []
sleep = None
start = timer()
try:
i = None
params = {'uniq': count}
count = (count + 1) % 100
if before:
params['before'] = before
gen = enumerate(get_function(limit=limit, params=params))
for i, item in gen:
if b36_id(item) in seen:
if i == 0:
if before is not None:
# reddit sent us out of order data -- log it
debug('(INFO) {0} already seen with before of {1}'
.format(item.fullname, before), 3)
before = None
break
if i == 0: # Always the first item in the generator
before = item.fullname
if b36_id(item) not in seen:
items.append(item)
processed += 1
if verbosity >= 1 and processed % 100 == 0:
sys.stderr.write(' Items: {0} \r'
.format(processed))
sys.stderr.flush()
if i < KEEP_ITEMS:
seen.add(b36_id(item))
else: # Generator exhausted
if i is None: # Generator yielded no items
assert before is not None
# Try again without before as the before item may be too
# old or no longer exist.
before = None
backoff = BACKOFF_START
except HTTPException as exc:
sleep = (backoff, '{0}. Sleeping for {{0}} seconds.'.format(exc),
2)
backoff *= 2
# Provide rate limit
if verbosity >= 1:
rate = len(items) / (timer() - start)
sys.stderr.write(' Items: {0} ({1:.2f} ips) \r'
.format(processed, rate))
sys.stderr.flush()
# Yield items from oldest to newest
for item in items[::-1]:
yield item
# Sleep if necessary
if sleep:
sleep_time, msg, msg_level = sleep # pylint: disable=W0633
debug(msg.format(sleep_time), msg_level)
time.sleep(sleep_time)
def chunk_sequence(sequence, chunk_length, allow_incomplete=True):
"""Given a sequence, divide it into sequences of length `chunk_length`.
:param allow_incomplete: If True, allow final chunk to be shorter if the
given sequence is not an exact multiple of `chunk_length`.
If False, the incomplete chunk will be discarded.
"""
(complete, leftover) = divmod(len(sequence), chunk_length)
if not allow_incomplete:
leftover = 0
chunk_count = complete + min(leftover, 1)
chunks = []
for x in range(chunk_count):
left = chunk_length * x
right = left + chunk_length
chunks.append(sequence[left:right])
return chunks
def convert_id36_to_numeric_id(id36):
"""Convert strings representing base36 numbers into an integer."""
if not isinstance(id36, six.string_types) or id36.count("_") > 0:
raise ValueError("must supply base36 string, not fullname (e.g. use "
"xxxxx, not t3_xxxxx)")
return int(id36, 36)
def convert_numeric_id_to_id36(numeric_id):
"""Convert an integer into its base36 string representation.
This method has been cleaned up slightly to improve readability. For more
info see:
https://github.com/reddit/reddit/blob/master/r2/r2/lib/utils/_utils.pyx
https://www.reddit.com/r/redditdev/comments/n624n/submission_ids_question/
https://en.wikipedia.org/wiki/Base36
"""
# base36 allows negative numbers, but reddit does not
if not isinstance(numeric_id, six.integer_types) or numeric_id < 0:
raise ValueError("must supply a positive int/long")
# Alphabet used for base 36 conversion
alphabet = '0123456789abcdefghijklmnopqrstuvwxyz'
alphabet_len = len(alphabet)
# Temp assign
current_number = numeric_id
base36 = []
# Current_number must be greater than alphabet length to while/divmod
if 0 <= current_number < alphabet_len:
return alphabet[current_number]
# Break up into chunks
while current_number != 0:
current_number, rem = divmod(current_number, alphabet_len)
base36.append(alphabet[rem])
# String is built in reverse order
return ''.join(reversed(base36))
def flatten_tree(tree, nested_attr='replies', depth_first=False):
"""Return a flattened version of the passed in tree.
:param nested_attr: The attribute name that contains the nested items.
Defaults to ``replies`` which is suitable for comments.
:param depth_first: When true, add to the list in a depth-first manner
rather than the default breadth-first manner.
"""
stack = deque(tree)
extend = stack.extend if depth_first else stack.extendleft
retval = []
while stack:
item = stack.popleft()
nested = getattr(item, nested_attr, None)
if nested:
extend(nested)
retval.append(item)
return retval
def normalize_url(url):
"""Return url after stripping trailing .json and trailing slashes."""
if url.endswith('.json'):
url = url[:-5]
if url.endswith('/'):
url = url[:-1]
return url
class BoundedSet(object):
"""A set with a maximum size that evicts the oldest items when necessary.
This class does not implement the complete set interface.
"""
def __init__(self, max_items):
"""Construct an instance of the BoundedSet."""
self.max_items = max_items
self._fifo = []
self._set = set()
def __contains__(self, item):
"""Test if the BoundedSet contains item."""
return item in self._set
def add(self, item):
"""Add an item to the set discarding the oldest item if necessary."""
if item in self._set:
self._fifo.remove(item)
elif len(self._set) == self.max_items:
self._set.remove(self._fifo.pop(0))
self._fifo.append(item)
self._set.add(item)
| mit |
rosmo/boto | boto/kinesis/layer1.py | 102 | 40879 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
from boto.compat import six
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_tags_to_stream(self, stream_name, tags):
"""
Adds or updates tags for the specified Amazon Kinesis stream.
Each stream can have up to 10 tags.
If tags have already been assigned to the stream,
`AddTagsToStream` overwrites any existing tags that correspond
to the specified tag keys.
:type stream_name: string
:param stream_name: The name of the stream.
:type tags: map
:param tags: The set of key-value pairs to use to create the tags.
"""
params = {'StreamName': stream_name, 'Tags': tags, }
return self.make_request(action='AddTagsToStream',
body=json.dumps(params))
def create_stream(self, stream_name, shard_count):
"""
Creates a Amazon Kinesis stream. A stream captures and
transports data records that are continuously emitted from
different data sources or producers . Scale-out within an
Amazon Kinesis stream is explicitly supported by means of
shards, which are uniquely identified groups of data records
in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each open shard can support up to 5 read
transactions per second, up to a maximum total of 2 MB of data
read per second. Each shard can support up to 1000 records
written per second, up to a maximum total of 1 MB data written
per second. You can add shards to a stream if the amount of
data input increases and you can remove shards if the amount
of data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to `CREATING`. After the stream is
created, Amazon Kinesis sets the stream status to `ACTIVE`.
You should perform read and write operations only on an
`ACTIVE` stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the `CREATING` state at any
point in time.
+ Create more shards than are authorized for your account.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
You can use `DescribeStream` to check the stream status, which
is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards, `contact
AWS Support`_ to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
Deletes a stream and all its shards and data. You must shut
down any applications that are operating on the stream before
you delete the stream. If an application attempts to operate
on a deleted stream, it will receive the exception
`ResourceNotFoundException`.
If the stream is in the `ACTIVE` state, you can delete it.
After a `DeleteStream` request, the specified stream is in the
`DELETING` state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord, PutRecords, and
GetRecords, on a stream in the `DELETING` state until the
stream deletion is complete.
When you delete a stream, any shards in that stream are also
deleted, and any tags are dissociated from the stream.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
Describes the specified stream.
The information about the stream includes its current status,
its Amazon Resource Name (ARN), and an array of shard objects.
For each shard object, there is information about the hash key
and sequence number ranges that the shard spans, and the IDs
of any earlier shards that played in a role in creating the
shard. A sequence number is the identifier associated with
every record ingested in the Amazon Kinesis stream. The
sequence number is assigned when a record is put into the
stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
`DescribeStream` is a paginated operation. If there are more
shards available, you can request them using the shard ID of
the last shard returned. Specify this ID in the
`ExclusiveStartShardId` parameter in a subsequent request to
`DescribeStream`.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
Gets data records from a shard.
Specify a shard iterator using the `ShardIterator` parameter.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. If
there are no records available in the portion of the shard
that the iterator points to, `GetRecords` returns an empty
list. Note that it might take multiple calls to get to a
portion of the shard that contains records.
You can scale by provisioning multiple shards. Your
application should have one thread per shard, each reading
continuously from its stream. To read from a stream
continually, call `GetRecords` in a loop. Use GetShardIterator
to get the shard iterator to specify in the first `GetRecords`
call. `GetRecords` returns a new shard iterator in
`NextShardIterator`. Specify the shard iterator returned in
`NextShardIterator` in subsequent calls to `GetRecords`. Note
that if the shard has been closed, the shard iterator can't
return more data and `GetRecords` returns `null` in
`NextShardIterator`. You can terminate the loop when the shard
is closed, or when the shard iterator reaches the record with
the sequence number or other attribute that marks it as the
last record to process.
Each data record can be up to 50 KB in size, and each shard
can read up to 2 MB per second. You can ensure that your calls
don't exceed the maximum supported size or throughput by using
the `Limit` parameter to specify the maximum number of records
that `GetRecords` can return. Consider your average record
size when determining this limit. For example, if your average
record size is 40 KB, you can limit the data returned to about
1 MB per call by specifying 25 as the limit.
The size of the data returned by `GetRecords` will vary
depending on the utilization of the shard. The maximum size of
data that `GetRecords` can return is 10 MB. If a call returns
10 MB of data, subsequent calls made within the next 5 seconds
throw `ProvisionedThroughputExceededException`. If there is
insufficient provisioned throughput on the shard, subsequent
calls made within the next 1 second throw
`ProvisionedThroughputExceededException`. Note that
`GetRecords` won't return any data when it throws an
exception. For this reason, we recommend that you wait one
second between calls to `GetRecords`; however, it's possible
that the application will get exceptions for longer than 1
second.
To detect whether the application is falling behind in
processing, add a timestamp to your records and note how long
it takes to process them. You can also monitor how much data
is in a stream using the CloudWatch metrics for write
operations ( `PutRecord` and `PutRecords`). For more
information, see `Monitoring Amazon Kinesis with Amazon
CloudWatch`_ in the Amazon Kinesis Developer Guide .
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records. A shard iterator specifies
this position using the sequence number of a data record in the
shard.
:type limit: integer
:param limit: The maximum number of records to return. Specify a value
of up to 10,000. If you specify a value that is greater than
10,000, `GetRecords` throws `InvalidArgumentException`.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
Gets a shard iterator. A shard iterator expires five minutes
after it is returned to the requester.
A shard iterator specifies the position in the shard from
which to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in a shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned when a record is put
into the stream.
You must specify the shard iterator type. For example, you can
set the `ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
`AT_SEQUENCE_NUMBER` shard iterator type, or right after the
sequence number by using the `AFTER_SEQUENCE_NUMBER` shard
iterator type, using sequence numbers returned by earlier
calls to PutRecord, PutRecords, GetRecords, or DescribeStream.
You can specify the shard iterator type `TRIM_HORIZON` in the
request to cause `ShardIterator` to point to the last
untrimmed record in the shard in the system, which is the
oldest data record in the shard. Or you can point to just
after the most recent record in the shard, by using the shard
iterator type `LATEST`, so that you always read the most
recent data in the shard.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you receive
a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see GetRecords.
If the shard is closed, the iterator can't return more data,
and `GetShardIterator` returns `null` for its `ShardIterator`.
A shard can be closed using SplitShard or MergeShards.
`GetShardIterator` has a limit of 5 transactions per second
per account per open shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
Lists your streams.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
limit=None):
"""
Lists the tags for the specified Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream.
:type exclusive_start_tag_key: string
:param exclusive_start_tag_key: The key to use as the starting point
for the list of tags. If this parameter is set, `ListTagsForStream`
gets all tags that occur after `ExclusiveStartTagKey`.
:type limit: integer
:param limit: The number of tags to return. If this number is less than
the total number of tags associated with the stream, `HasMoreTags`
is set to `True`. To list additional tags, set
`ExclusiveStartTagKey` to the last key in the response.
"""
params = {'StreamName': stream_name, }
if exclusive_start_tag_key is not None:
params['ExclusiveStartTagKey'] = exclusive_start_tag_key
if limit is not None:
params['Limit'] = limit
return self.make_request(action='ListTagsForStream',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
Merges two adjacent shards in a stream and combines them into
a single shard to reduce the stream's capacity to ingest and
transport data. Two shards are considered adjacent if the
union of the hash key ranges for the two shards form a
contiguous set with no gaps. For example, if you have two
shards, one with a hash key range of 276...381 and the other
with a hash key range of 382...454, then you could merge these
two shards into a single shard that would have a hash key
range of 276...454. After the merge, the single child shard
receives data for all hash key values covered by the two
parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. You must specify the shard to be merged and
the adjacent shard for a stream. For more information about
merging shards, see `Merge Two Shards`_ in the Amazon Kinesis
Developer Guide .
If the stream is in the `ACTIVE` state, you can call
`MergeShards`. If a stream is in the `CREATING`, `UPDATING`,
or `DELETING` state, `MergeShards` returns a
`ResourceInUseException`. If the specified stream does not
exist, `MergeShards` returns a `ResourceNotFoundException`.
You can use DescribeStream to check the state of the stream,
which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You use DescribeStream to determine the shard IDs that are
specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
if not isinstance(params['Data'], six.binary_type):
params['Data'] = params['Data'].encode('utf-8')
params['Data'] = base64.b64encode(params['Data']).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def put_records(self, records, stream_name, b64_encode=True):
"""
Puts (writes) multiple data records from a producer into an
Amazon Kinesis stream in a single call (also referred to as a
`PutRecords` request). Use this operation to send data from a
data producer into the Amazon Kinesis stream for real-time
ingestion and processing. Each shard can support up to 1000
records written per second, up to a maximum total of 1 MB data
written per second.
You must specify the name of the stream that captures, stores,
and transports the data; and an array of request `Records`,
with each record in the array requiring a partition key and
data blob.
The data blob can be any type of data; for example, a segment
from a log file, geographic/location data, website clickstream
data, and so on.
The partition key is used by Amazon Kinesis as input to a hash
function that maps the partition key and associated data to a
specific shard. An MD5 hash function is used to map partition
keys to 128-bit integer values and to map associated data
records to shards. As a result of this hashing mechanism, all
data records with the same partition key map to the same shard
within the stream. For more information, see `Partition Key`_
in the Amazon Kinesis Developer Guide .
Each record in the `Records` array may include an optional
parameter, `ExplicitHashKey`, which overrides the partition
key to shard mapping. This parameter allows a data producer to
determine explicitly the shard where the record is stored. For
more information, see `Adding Multiple Records with
PutRecords`_ in the Amazon Kinesis Developer Guide .
The `PutRecords` response includes an array of response
`Records`. Each record in the response array directly
correlates with a record in the request array using natural
ordering, from the top to the bottom of the request and
response. The response `Records` array always includes the
same number of records as the request array.
The response `Records` array includes both successfully and
unsuccessfully processed records. Amazon Kinesis attempts to
process all records in each `PutRecords` request. A single
record failure does not stop the processing of subsequent
records.
A successfully-processed record includes `ShardId` and
`SequenceNumber` values. The `ShardId` parameter identifies
the shard in the stream where the record is stored. The
`SequenceNumber` parameter is an identifier assigned to the
put record, unique to all records in the stream.
An unsuccessfully-processed record includes `ErrorCode` and
`ErrorMessage` values. `ErrorCode` reflects the type of error
and can be one of the following values:
`ProvisionedThroughputExceededException` or `InternalFailure`.
`ErrorMessage` provides more detailed information about the
`ProvisionedThroughputExceededException` exception including
the account ID, stream name, and shard ID of the record that
was throttled.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type records: list
:param records: The records associated with the request.
:type stream_name: string
:param stream_name: The stream name associated with the request.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {'Records': records, 'StreamName': stream_name, }
if b64_encode:
for i in range(len(params['Records'])):
data = params['Records'][i]['Data']
if not isinstance(data, six.binary_type):
data = data.encode('utf-8')
params['Records'][i]['Data'] = base64.b64encode(
data).decode('utf-8')
return self.make_request(action='PutRecords',
body=json.dumps(params))
def remove_tags_from_stream(self, stream_name, tag_keys):
"""
Deletes tags from the specified Amazon Kinesis stream.
If you specify a tag that does not exist, it is ignored.
:type stream_name: string
:param stream_name: The name of the stream.
:type tag_keys: list
:param tag_keys: A list of tag keys. Each corresponding tag is removed
from the stream.
"""
params = {'StreamName': stream_name, 'TagKeys': tag_keys, }
return self.make_request(action='RemoveTagsFromStream',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
Splits a shard into two new shards in the stream, to increase
the stream's capacity to ingest and transport data.
`SplitShard` is called when there is a need to increase the
overall capacity of stream because of an expected increase in
the volume of data records being ingested.
You can also use `SplitShard` when a shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
`SplitShard` to increase stream capacity, so that more Amazon
Kinesis applications can simultaneously read data from the
stream for real-time processing.
You must specify the shard to be split and the new hash key,
which is the position in the shard where the shard gets split
in two. In many cases, the new hash key might simply be the
average of the beginning and ending hash key, but it can be
any hash key value in the range being mapped into the shard.
For more information about splitting shards, see `Split a
Shard`_ in the Amazon Kinesis Developer Guide .
You can use DescribeStream to determine the shard ID and hash
key values for the `ShardToSplit` and `NewStartingHashKey`
parameters that are specified in the `SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to `UPDATING`. After the
operation is completed, Amazon Kinesis sets the stream status
to `ACTIVE`. Read and write operations continue to work while
the stream is in the `UPDATING` state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the `ACTIVE` state, you can call `SplitShard`. If a stream
is in `CREATING` or `UPDATING` or `DELETING` states,
`DescribeStream` returns a `ResourceInUseException`.
If the specified stream does not exist, `DescribeStream`
returns a `ResourceNotFoundException`. If you try to create
more shards than are authorized for your account, you receive
a `LimitExceededException`.
The default limit for an AWS account is 10 shards per stream.
If you need to create a stream with more than 10 shards,
`contact AWS Support`_ to increase the limit on your account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
flyfei/python-for-android | python3-alpha/python3-src/Lib/plat-unixware7/STROPTS.py | 106 | 6524 | # Generated by h2py from /usr/include/sys/stropts.h
# Included from sys/types.h
def quad_low(x): return x.val[0]
ADT_EMASKSIZE = 8
SHRT_MIN = -32768
SHRT_MAX = 32767
INT_MIN = (-2147483647-1)
INT_MAX = 2147483647
LONG_MIN = (-2147483647-1)
LONG_MAX = 2147483647
OFF32_MAX = LONG_MAX
ISTAT_ASSERTED = 0
ISTAT_ASSUMED = 1
ISTAT_NONE = 2
OFF_MAX = OFF32_MAX
CLOCK_MAX = LONG_MAX
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/select.h
FD_SETSIZE = 4096
NBBY = 8
NULL = 0
# Included from sys/conf.h
D_NEW = 0x00
D_OLD = 0x01
D_DMA = 0x02
D_BLKOFF = 0x400
D_LFS = 0x8000
D_STR = 0x0800
D_MOD = 0x1000
D_PSEUDO = 0x2000
D_RANDOM = 0x4000
D_HOT = 0x10000
D_SEEKNEG = 0x04
D_TAPE = 0x08
D_NOBRKUP = 0x10
D_INITPUB = 0x20
D_NOSPECMACDATA = 0x40
D_RDWEQ = 0x80
SECMASK = (D_INITPUB|D_NOSPECMACDATA|D_RDWEQ)
DAF_REQDMA = 0x1
DAF_PHYSREQ = 0x2
DAF_PRE8 = 0x4
DAF_STATIC = 0x8
DAF_STR = 0x10
D_MP = 0x100
D_UPF = 0x200
ROOTFS_NAMESZ = 7
FMNAMESZ = 8
MCD_VERSION = 1
DI_BCBP = 0
DI_MEDIA = 1
# Included from sys/secsys.h
ES_MACOPENLID = 1
ES_MACSYSLID = 2
ES_MACROOTLID = 3
ES_PRVINFO = 4
ES_PRVSETCNT = 5
ES_PRVSETS = 6
ES_MACADTLID = 7
ES_PRVID = 8
ES_TPGETMAJOR = 9
SA_EXEC = 0o01
SA_WRITE = 0o02
SA_READ = 0o04
SA_SUBSIZE = 0o10
# Included from sys/stropts_f.h
X_STR = (ord('S')<<8)
X_I_BASE = (X_STR|0o200)
X_I_NREAD = (X_STR|0o201)
X_I_PUSH = (X_STR|0o202)
X_I_POP = (X_STR|0o203)
X_I_LOOK = (X_STR|0o204)
X_I_FLUSH = (X_STR|0o205)
X_I_SRDOPT = (X_STR|0o206)
X_I_GRDOPT = (X_STR|0o207)
X_I_STR = (X_STR|0o210)
X_I_SETSIG = (X_STR|0o211)
X_I_GETSIG = (X_STR|0o212)
X_I_FIND = (X_STR|0o213)
X_I_LINK = (X_STR|0o214)
X_I_UNLINK = (X_STR|0o215)
X_I_PEEK = (X_STR|0o217)
X_I_FDINSERT = (X_STR|0o220)
X_I_SENDFD = (X_STR|0o221)
X_I_RECVFD = (X_STR|0o222)
# Included from unistd.h
# Included from sys/unistd.h
R_OK = 0o04
W_OK = 0o02
X_OK = 0o01
F_OK = 000
EFF_ONLY_OK = 0o10
EX_OK = 0o20
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
_SC_ARG_MAX = 1
_SC_CHILD_MAX = 2
_SC_CLK_TCK = 3
_SC_NGROUPS_MAX = 4
_SC_OPEN_MAX = 5
_SC_JOB_CONTROL = 6
_SC_SAVED_IDS = 7
_SC_VERSION = 8
_SC_PASS_MAX = 9
_SC_LOGNAME_MAX = 10
_SC_PAGESIZE = 11
_SC_PAGE_SIZE = _SC_PAGESIZE
_SC_XOPEN_VERSION = 12
_SC_NACLS_MAX = 13
_SC_NPROCESSORS_CONF = 14
_SC_NPROCESSORS_ONLN = 15
_SC_NPROCESSES = 39
_SC_TOTAL_MEMORY = 40
_SC_USEABLE_MEMORY = 41
_SC_GENERAL_MEMORY = 42
_SC_DEDICATED_MEMORY = 43
_SC_NCGS_CONF = 44
_SC_NCGS_ONLN = 45
_SC_MAX_CPUS_PER_CG = 46
_SC_CG_SIMPLE_IMPL = 47
_SC_CACHE_LINE = 48
_SC_SYSTEM_ID = 49
_SC_THREADS = 51
_SC_THREAD_ATTR_STACKADDR = 52
_SC_THREAD_ATTR_STACKSIZE = 53
_SC_THREAD_DESTRUCTOR_ITERATIONS = 54
_SC_THREAD_KEYS_MAX = 55
_SC_THREAD_PRIORITY_SCHEDULING = 56
_SC_THREAD_PRIO_INHERIT = 57
_SC_THREAD_PRIO_PROTECT = 58
_SC_THREAD_STACK_MIN = 59
_SC_THREAD_PROCESS_SHARED = 60
_SC_THREAD_SAFE_FUNCTIONS = 61
_SC_THREAD_THREADS_MAX = 62
_SC_KERNEL_VM = 63
_SC_TZNAME_MAX = 320
_SC_STREAM_MAX = 321
_SC_XOPEN_CRYPT = 323
_SC_XOPEN_ENH_I18N = 324
_SC_XOPEN_SHM = 325
_SC_XOPEN_XCU_VERSION = 327
_SC_AES_OS_VERSION = 330
_SC_ATEXIT_MAX = 331
_SC_2_C_BIND = 350
_SC_2_C_DEV = 351
_SC_2_C_VERSION = 352
_SC_2_CHAR_TERM = 353
_SC_2_FORT_DEV = 354
_SC_2_FORT_RUN = 355
_SC_2_LOCALEDEF = 356
_SC_2_SW_DEV = 357
_SC_2_UPE = 358
_SC_2_VERSION = 359
_SC_BC_BASE_MAX = 370
_SC_BC_DIM_MAX = 371
_SC_BC_SCALE_MAX = 372
_SC_BC_STRING_MAX = 373
_SC_COLL_WEIGHTS_MAX = 380
_SC_EXPR_NEST_MAX = 381
_SC_LINE_MAX = 382
_SC_RE_DUP_MAX = 383
_SC_IOV_MAX = 390
_SC_NPROC_CONF = 391
_SC_NPROC_ONLN = 392
_SC_XOPEN_UNIX = 400
_SC_SEMAPHORES = 440
_CS_PATH = 1
__O_CS_HOSTNAME = 2
_CS_RELEASE = 3
_CS_VERSION = 4
__O_CS_MACHINE = 5
__O_CS_ARCHITECTURE = 6
_CS_HW_SERIAL = 7
__O_CS_HW_PROVIDER = 8
_CS_SRPC_DOMAIN = 9
_CS_INITTAB_NAME = 10
__O_CS_SYSNAME = 11
_CS_LFS_CFLAGS = 20
_CS_LFS_LDFLAGS = 21
_CS_LFS_LIBS = 22
_CS_LFS_LINTFLAGS = 23
_CS_LFS64_CFLAGS = 24
_CS_LFS64_LDFLAGS = 25
_CS_LFS64_LIBS = 26
_CS_LFS64_LINTFLAGS = 27
_CS_ARCHITECTURE = 100
_CS_BUSTYPES = 101
_CS_HOSTNAME = 102
_CS_HW_PROVIDER = 103
_CS_KERNEL_STAMP = 104
_CS_MACHINE = 105
_CS_OS_BASE = 106
_CS_OS_PROVIDER = 107
_CS_SYSNAME = 108
_CS_USER_LIMIT = 109
_PC_LINK_MAX = 1
_PC_MAX_CANON = 2
_PC_MAX_INPUT = 3
_PC_NAME_MAX = 4
_PC_PATH_MAX = 5
_PC_PIPE_BUF = 6
_PC_NO_TRUNC = 7
_PC_VDISABLE = 8
_PC_CHOWN_RESTRICTED = 9
_PC_FILESIZEBITS = 10
_POSIX_VERSION = 199009
_XOPEN_VERSION = 4
GF_PATH = "/etc/group"
PF_PATH = "/etc/passwd"
F_ULOCK = 0
F_LOCK = 1
F_TLOCK = 2
F_TEST = 3
_POSIX_JOB_CONTROL = 1
_POSIX_SAVED_IDS = 1
_POSIX_VDISABLE = 0
NULL = 0
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
_XOPEN_UNIX = 1
_XOPEN_ENH_I18N = 1
_XOPEN_XPG4 = 1
_POSIX2_C_VERSION = 199209
_POSIX2_VERSION = 199209
_XOPEN_XCU_VERSION = 4
_POSIX_SEMAPHORES = 1
_POSIX_THREADS = 1
_POSIX_THREAD_ATTR_STACKADDR = 1
_POSIX_THREAD_ATTR_STACKSIZE = 1
_POSIX_THREAD_PRIORITY_SCHEDULING = 1
_POSIX_THREAD_PROCESS_SHARED = 1
_POSIX_THREAD_SAFE_FUNCTIONS = 1
_POSIX2_C_BIND = 1
_POSIX2_CHAR_TERM = 1
_POSIX2_FORT_RUN = 1
_POSIX2_LOCALEDEF = 1
_POSIX2_UPE = 1
_LFS_ASYNCHRONOUS_IO = 1
_LFS_LARGEFILE = 1
_LFS64_ASYNCHRONOUS_IO = 1
_LFS64_LARGEFILE = 1
_LFS64_STDIO = 1
FMNAMESZ = 8
SNDZERO = 0x001
SNDPIPE = 0x002
RNORM = 0x000
RMSGD = 0x001
RMSGN = 0x002
RMODEMASK = 0x003
RPROTDAT = 0x004
RPROTDIS = 0x008
RPROTNORM = 0x010
RPROTMASK = 0x01c
FLUSHR = 0x01
FLUSHW = 0x02
FLUSHRW = 0x03
FLUSHBAND = 0x04
S_INPUT = 0x0001
S_HIPRI = 0x0002
S_OUTPUT = 0x0004
S_MSG = 0x0008
S_ERROR = 0x0010
S_HANGUP = 0x0020
S_RDNORM = 0x0040
S_WRNORM = S_OUTPUT
S_RDBAND = 0x0080
S_WRBAND = 0x0100
S_BANDURG = 0x0200
RS_HIPRI = 0x01
MSG_HIPRI = 0x01
MSG_ANY = 0x02
MSG_BAND = 0x04
MSG_DISCARD = 0x08
MSG_PEEKIOCTL = 0x10
MORECTL = 1
MOREDATA = 2
MUXID_ALL = (-1)
ANYMARK = 0x01
LASTMARK = 0x02
STR = (ord('S')<<8)
I_NREAD = (STR|0o1)
I_PUSH = (STR|0o2)
I_POP = (STR|0o3)
I_LOOK = (STR|0o4)
I_FLUSH = (STR|0o5)
I_SRDOPT = (STR|0o6)
I_GRDOPT = (STR|0o7)
I_STR = (STR|0o10)
I_SETSIG = (STR|0o11)
I_GETSIG = (STR|0o12)
I_FIND = (STR|0o13)
I_LINK = (STR|0o14)
I_UNLINK = (STR|0o15)
I_PEEK = (STR|0o17)
I_FDINSERT = (STR|0o20)
I_SENDFD = (STR|0o21)
I_RECVFD = (STR|0o22)
I_E_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o16)
I_RECVFD = (STR|0o22)
I_SWROPT = (STR|0o23)
I_GWROPT = (STR|0o24)
I_LIST = (STR|0o25)
I_PLINK = (STR|0o26)
I_PUNLINK = (STR|0o27)
I_FLUSHBAND = (STR|0o34)
I_CKBAND = (STR|0o35)
I_GETBAND = (STR|0o36)
I_ATMARK = (STR|0o37)
I_SETCLTIME = (STR|0o40)
I_GETCLTIME = (STR|0o41)
I_CANPUT = (STR|0o42)
I_S_RECVFD = (STR|0o43)
I_STATS = (STR|0o44)
I_BIGPIPE = (STR|0o45)
I_GETTP = (STR|0o46)
INFTIM = -1
| apache-2.0 |
yceruto/django | tests/model_fields/tests.py | 6 | 26311 | from __future__ import unicode_literals
import datetime
from decimal import Decimal
import unittest
import warnings
from django import test
from django import forms
from django.core.exceptions import ValidationError
from django.db import connection, models, IntegrityError
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField,
EmailField, FilePathField, FloatField, IntegerField, IPAddressField,
GenericIPAddressField, NOT_PROVIDED, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Foo, Bar, Whiz, BigD, BigS, BigInt, Post, NullBooleanModel,
BooleanModel, DataModel, Document, RenamedField,
VerboseNameField, FksToBooleans)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
from django.db import connection
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
from django.db import connection
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertTrue(f.to_python(1) is True)
self.assertTrue(f.to_python(0) is False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=True)
self.assertEqual(f.formfield().choices, [('', '---------')] + choices)
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertFalse(isinstance(b5.pk, bool))
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- should't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was succcessful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class BigIntegerFieldTests(test.TestCase):
def test_limits(self):
# Ensure that values that are right at the limits can be saved
# and then retrieved without corruption.
maxval = 9223372036854775807
minval = -maxval - 1
BigInt.objects.create(value=maxval)
qs = BigInt.objects.filter(value__gte=maxval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, maxval)
BigInt.objects.create(value=minval)
qs = BigInt.objects.filter(value__lte=minval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, minval)
def test_types(self):
b = BigInt(value=0)
self.assertIsInstance(b.value, six.integer_types)
b.save()
self.assertIsInstance(b.value, six.integer_types)
b = BigInt.objects.all()[0]
self.assertIsInstance(b.value, six.integer_types)
def test_coercing(self):
BigInt.objects.create(value='10')
b = BigInt.objects.get(value='10')
self.assertEqual(b.value, 10)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
test_set_and_retrieve = unittest.expectedFailure(test_set_and_retrieve)
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: '[email protected]', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
| bsd-3-clause |
gdooper/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py | 47 | 6803 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from numpy import abs, sum, sign, arange
from .go_benchmark import Benchmark
class Zacharov(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
class ZeroSum(Benchmark):
r"""
ZeroSum objective function.
This class defines the ZeroSum [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{ZeroSum}}(x) = \begin{cases}
0 & \textrm{if} \sum_{i=1}^n x_i = 0 \\
1 + \left(10000 \left |\sum_{i=1}^n x_i\right|
\right)^{0.5} & \textrm{otherwise}
\end{cases}
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` where :math:`\sum_{i=1}^n x_i = 0`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
if abs(sum(x)) < 3e-16:
return 0.0
return 1.0 + (10000.0 * abs(sum(x))) ** 0.5
class Zettl(Benchmark):
r"""
Zettl objective function.
This class defines the Zettl [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Zettl}}(x) = \frac{1}{4} x_{1} + \left(x_{1}^{2} - 2 x_{1}
+ x_{2}^{2}\right)^{2}
with :math:`x_i \in [-1, 5]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.0037912` for :math:`x = [-0.029896, 0.0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-0.02989597760285287, 0.0]]
self.fglob = -0.003791237220468656
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + x[1] ** 2 - 2 * x[0]) ** 2 + 0.25 * x[0]
class Zimmerman(Benchmark):
r"""
Zimmerman objective function.
This class defines the Zimmerman [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zimmerman}}(x) = \max \left[Zh1(x), Zp(Zh2(x))
\textrm{sgn}(Zh2(x)), Zp(Zh3(x))
\textrm{sgn}(Zh3(x)),
Zp(-x_1)\textrm{sgn}(x_1),
Zp(-x_2)\textrm{sgn}(x_2) \right]
Where, in this exercise:
.. math::
\begin{cases}
Zh1(x) = 9 - x_1 - x_2 \\
Zh2(x) = (x_1 - 3)^2 + (x_2 - 2)^2 \\
Zh3(x) = x_1x_2 - 14 \\
Zp(t) = 100(1 + t)
\end{cases}
Where :math:`x` is a vector and :math:`t` is a scalar.
Here, :math:`x_i \in [0, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [7, 2]`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO implementation from Gavana
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N, [100.0] * self.N))
self.custom_bounds = ([0.0, 8.0], [0.0, 8.0])
self.global_optimum = [[7.0, 2.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
Zh1 = lambda x: 9.0 - x[0] - x[1]
Zh2 = lambda x: (x[0] - 3.0) ** 2.0 + (x[1] - 2.0) ** 2.0 - 16.0
Zh3 = lambda x: x[0] * x[1] - 14.0
Zp = lambda x: 100.0 * (1.0 + x)
return max(Zh1(x),
Zp(Zh2(x)) * sign(Zh2(x)),
Zp(Zh3(x)) * sign(Zh3(x)),
Zp(-x[0]) * sign(x[0]),
Zp(-x[1]) * sign(x[1]))
class Zirilli(Benchmark):
r"""
Zettl objective function.
This class defines the Zirilli [1]_ global optimization problem. This is a
unimodal minimization problem defined as follows:
.. math::
f_{\text{Zirilli}}(x) = 0.25x_1^4 - 0.5x_1^2 + 0.1x_1 + 0.5x_2^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.3523` for :math:`x = [-1.0465, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0])
self.global_optimum = [[-1.0465, 0.0]]
self.fglob = -0.35238603
def fun(self, x, *args):
self.nfev += 1
return 0.25 * x[0] ** 4 - 0.5 * x[0] ** 2 + 0.1 * x[0] + 0.5 * x[1] ** 2
| bsd-3-clause |
absperf/wagtailapproval | wagtailapproval/menu.py | 1 | 3637 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from django.contrib.auth import get_user
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy as _n
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.menu import MenuItem
from .models import ApprovalStep
def get_user_approval_items(user):
'''Get an iterable of all items pending for a user's approval.
:param User user: A user object whose groups are to be checked for
appropriate steps
:rtype: Iterable[ApprovalItem]
:returns: All the items that this user can approve or reject.
'''
if user.is_superuser:
steps = ApprovalStep.objects.all()
else:
groups = user.groups.all()
steps = ApprovalStep.objects.filter(group__in=groups)
return itertools.chain.from_iterable(
step.get_items(user) for step in steps)
class ApprovalMenuItem(MenuItem):
'''The menu item that shows in the wagtail sidebar'''
def __init__(
self, label=_('Approval'), url=reverse_lazy('wagtailapproval:index'),
classnames='icon icon-tick-inverse', order=201, **kwargs):
super(ApprovalMenuItem, self).__init__(
label,
url,
classnames=classnames,
order=order,
**kwargs)
def is_shown(self, request):
'''Only show the menu if the user is in an owned approval group'''
user = get_user(request)
# If the user is superuser, show the menu if any steps exist at all
if user.is_superuser:
return ApprovalStep.objects.exists()
groups = user.groups.all()
if ApprovalStep.objects.filter(group__in=groups).exists():
# Display the approval notification only outside of the approval
# paths
if not request.path.startswith(reverse('wagtailapproval:index')):
# Get the count of waiting approvals
waiting_approvals = sum(
1 for _ in get_user_approval_items(user))
if waiting_approvals > 0:
messages.info(
request,
_n(
'{num:d} item waiting for approval',
'{num:d} items waiting for approval',
waiting_approvals).format(num=waiting_approvals),
buttons=[
messages.button(
reverse('wagtailapproval:index'),
_('Examine Now'))
]
)
return True
return False
class ApprovalAdminMenuItem(MenuItem):
'''The admin menu item that shows in the wagtail sidebar, for
administrating entire pipelines and manually dropping items into steps.'''
def __init__(
self, label=_('Approval Admin'),
url=reverse_lazy('wagtailapproval:admin_index'),
classnames='icon icon-cog', order=200, **kwargs):
super(ApprovalAdminMenuItem, self).__init__(
label,
url,
classnames=classnames,
order=order,
**kwargs)
def is_shown(self, request):
'''Only show the menu if the user is a superuser and any ApprovalStep
objects exist.'''
user = get_user(request)
if user.is_superuser:
return ApprovalStep.objects.exists()
return False
| bsd-2-clause |
40223240/2015cdb_g3_40223240 | static/Brython3.1.1-20150328-091302/Lib/logging/config.py | 739 | 35619 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| gpl-3.0 |
lulivi/debate_bot | bot.py | 1 | 5398 | #!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
import sys
import time
import telebot # Librería de la API del bot.
from telebot import types # Tipos para la API del bot.
from priv.__init__ import token as tk
bot = telebot.TeleBot(tk()) # Creamos el objeto de nuestro bot.
###############################################################################
# commands
###############################################################################
# start mensaje de bienvenida
@bot.message_handler(commands=['start'])
def command_start(m):
cid = m.chat.id
comando = m.text[7:]
if comando == 'reglas':
command_reglas(m)
else:
bot.send_message(cid,"¡Hola! Soy Debatebot.\nUsa el comando /ayuda para que te muestre mis demás comandos.\n\nEspero ser de utilidad.")
########################################
# muestra los comandos visibles
@bot.message_handler(commands=['ayuda'])
def command_ayuda(m):
bot.reply_to(m,"Guardo y doy información acerca de debates.\n/nuevo establezco el nuevo tema de debate.\n/actual muestro el tema actual de debate.\n/fin termino el debate actual.\n/reglas muestro las reglas actuales del grupo.")
########################################
# nuevo debat
@bot.message_handler(commands=['nuevo'])
def command_nuevo(m):
pos = m.text.find(" ")
cid = m.chat.id
if pos == -1:
bot.send_message(cid,m.from_user.first_name+", escribe:\n/nuevo nuevo_tema_de_debate")
else:
if get_matter(cid) == "":
set_matter(cid, m.text[pos:])
fuid = m.from_user.id
set_matter_id(cid, fuid)
bot.send_message(cid,"El tema actual se ha guardado con éxito, "+m.from_user.first_name+".")
else:
bot.send_message(cid,"Ya se está debatifino un tema, "+m.from_user.first_name+".\n/fin para terminarlo.\n/actual para obtenerlo.")
########################################
# debate actual
@bot.message_handler(commands=['actual'])
def command_actual(m):
cid = m.chat.id
actual = get_matter(cid)
if actual != "":
bot.send_message(cid,"\"* "+actual+" *\" es el tema actual.\n\n/fin para terminarlo.",parse_mode="Markdown")
else:
bot.send_message(cid,"No hay debate actualmente.\n/nuevo para comenzar uno.")
########################################
# terminar el debate
@bot.message_handler(commands=['fin'])
def command_fin(m):
cid = m.chat.id
if get_matter(cid) != "":
uid = get_matter_id(cid)
fuid = m.from_user.id
if uid == fuid:
set_matter(cid)
set_matter_id(cid,uid)
bot.send_message(cid,"Tema cerrado, "+m.from_user.first_name+".\n/nuevo para comenzar uno.")
else:
bot.send_message(cid,"No tiene permiso para terminar el debate, "+m.from_user.first_name+".")
else:
bot.send_message(cid, "No hay debate actualmente, "+m.from_user.first_name+".\n/nuevo para comenzar uno.")
########################################
REGLASID = ""
# reglas
@bot.message_handler(commands=['reglas'])
def command_to_reglas(m):
cid = m.chat.id
if cid < 0:
REGLASID = str(cid)
bot.send_message(cid,"Pulse [aquí](https://telegram.me/debate_bot?start=reglas)",parse_mode="Markdown")
else:
command_reglas(m)
def command_reglas(m):
if REGLASID != "":
reglas = get_reglas(REGLASID)
else:
cid = m.chat.id
reglas = get_reglas(cid)
if reglas != "":
bot.reply_to(m,"Reglas de participación en este grupo:\n\n"+reglas)
else:
bot.reply_to(m,"No hay relgas definidas para este grupo.")
########################################
# definir las reglas
@bot.message_handler(commands=['definereglas'])
def command_definereglas(m):
cid = m.chat.id
text = m.text
pos = text.find(" ")
if pos != -1:
txt = m.text[pos+1:]
set_reglas(cid, txt)
else:
txt = ""
set_reglas(cid, txt)
###############################################################################
# functions
###############################################################################
##### matter #####
def set_matter(chatid,txt=""):
cid = str(chatid)
with open("./matter/"+cid+".mat",'w') as f:
f.write(txt)
def get_matter(chatid):
cid = str(chatid)
with open("./matter/"+cid+".mat",'a') as f:
pass
with open("./matter/"+cid+".mat",'r') as f:
matter = f.read()
return matter
##### reglas #####
def set_reglas(chatid, txt):
cid = str(chatid)
with open("./reglas/"+cid+".rul",'w') as f:
f.write(txt)
def get_reglas(chatid):
cid = str(chatid)
with open("./reglas/"+cid+".rul",'a') as f:
pass
with open("./reglas/"+cid+".rul",'r') as f:
reglas = f.read()
return reglas
##### matter id #####
def set_matter_id(chatid,userid):
cid = str(chatid)
uid = str(userid)
with open("./matter/"+cid+".matid",'w') as f:
f.write(uid)
def get_matter_id(chatid):
cid = str(chatid)
with open("./matter/"+cid+".matid",'a') as f:
pass
with open("./matter/"+cid+".matid",'r') as f:
uid = f.read()
if uid == "":
return -1
else:
return int(uid)
###############################################################################
bot.polling()
| gpl-2.0 |
jordancheah/zipline | tests/test_munge.py | 34 | 1794 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
from unittest import TestCase
from zipline.utils.munge import bfill, ffill
class MungeTests(TestCase):
def test_bfill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.bfill().values
test = bfill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.bfill().values
test = bfill(df.values)
assert_almost_equal(correct, test)
def test_ffill(self):
# test ndim=1
N = 100
s = pd.Series(np.random.randn(N))
mask = random.sample(range(N), 10)
s.iloc[mask] = np.nan
correct = s.ffill().values
test = ffill(s.values)
assert_almost_equal(correct, test)
# test ndim=2
df = pd.DataFrame(np.random.randn(N, N))
df.iloc[mask] = np.nan
correct = df.ffill().values
test = ffill(df.values)
assert_almost_equal(correct, test)
| apache-2.0 |
chugunovyar/factoryForBuild | neuron/SaveClosedPossition.py | 1 | 31069 | # -*- coding: utf-8 -*-
import logging
from neuron.models import DataSet
import dateutil.parser as DP
loggermsg = logging.getLogger('django')
def saveClosedPossition(jsondata):
#loggermsg.info(len(jsondata))
# Проверяем есть ли такой ордер в БД
ifExistOrdernum = DataSet.objects.filter(open_magicnum=jsondata['magicnum'])
# Если нет такого ордера то записываем его в бд.
if len(ifExistOrdernum) == 0:
if float(jsondata['result']) > 0:
effectivnes = 1
else:
effectivnes = 0
dataToSave = DataSet(
open_magicnum = jsondata['magicnum'],\
open_neuron_name = jsondata['neuron_name'],\
open_period = jsondata['period'],\
orderOpenPrice = jsondata['openprice'],\
open_type = jsondata['open_type'],\
open_time = DP.parse(jsondata['orderopentime']),\
open_close_1 = jsondata['open_close_1'],\
open_open_1 = jsondata['open_open_1'],\
open_high_1 = jsondata['open_high_1'],\
open_low_1 = jsondata['open_low_1'],
open_upband_1 = jsondata['open_upband_1'],
open_lowband_1 = jsondata['open_lowband_1'],
open_midleband_1 = jsondata['open_midleband_1'],
open_jaw_1 = jsondata['open_jaw_1'],
open_lips_1 = jsondata['open_lips_1'],
open_teeth_1 = jsondata['open_teeth_1'],
open_volume_1 = jsondata['open_volume_1'],
open_close_2 = jsondata['open_close_2'],
open_open_2 = jsondata['open_open_2'],
open_high_2 = jsondata['open_high_2'],
open_low_2 = jsondata['open_low_2'],
open_upband_2 = jsondata['open_upband_2'],
open_lowband_2 = jsondata['open_lowband_2'],
open_midleband_2 = jsondata['open_midleband_2'],
open_jaw_2 = jsondata['open_jaw_2'],
open_lips_2 = jsondata['open_lips_2'],
open_teeth_2 = jsondata['open_teeth_2'],
open_volume_2 = jsondata['open_volume_2'],
open_close_3 = jsondata['open_close_3'],
open_open_3 = jsondata['open_open_3'],
open_high_3 = jsondata['open_high_3'],
open_low_3 = jsondata['open_low_3'],
open_upband_3 = jsondata['open_upband_3'],
open_lowband_3 = jsondata['open_lowband_3'],
open_midleband_3 = jsondata['open_midleband_3'],
open_jaw_3 = jsondata['open_jaw_3'],
open_lips_3 = jsondata['open_lips_3'],
open_teeth_3 = jsondata['open_teeth_3'],
open_volume_3 = jsondata['open_volume_3'],
open_close_4 = jsondata['open_close_4'],
open_open_4 = jsondata['open_open_4'],
open_high_4 = jsondata['open_high_4'],
open_low_4 = jsondata['open_low_4'],
open_upband_4 = jsondata['open_upband_4'],
open_lowband_4 = jsondata['open_lowband_4'],
open_midleband_4 = jsondata['open_midleband_4'],
open_jaw_4 = jsondata['open_jaw_4'],
open_lips_4 = jsondata['open_lips_4'],
open_teeth_4 = jsondata['open_teeth_4'],
open_volume_4 = jsondata['open_volume_4'],
open_close_5 = jsondata['open_close_5'],
open_open_5 = jsondata['open_open_5'],
open_high_5 = jsondata['open_high_5'],
open_low_5 = jsondata['open_low_5'],
open_upband_5 = jsondata['open_upband_5'],
open_lowband_5 = jsondata['open_lowband_5'],
open_midleband_5 = jsondata['open_midleband_5'],
open_jaw_5 = jsondata['open_jaw_5'],
open_lips_5 = jsondata['open_lips_5'],
open_teeth_5 = jsondata['open_teeth_5'],
open_volume_5 = jsondata['open_volume_5'],
open_close_6 = jsondata['open_close_6'],
open_open_6 = jsondata['open_open_6'],
open_high_6 = jsondata['open_high_6'],
open_low_6 = jsondata['open_low_6'],
open_upband_6 = jsondata['open_upband_6'],
open_lowband_6 = jsondata['open_lowband_6'],
open_midleband_6 = jsondata['open_midleband_6'],
open_jaw_6 = jsondata['open_jaw_6'],
open_lips_6 = jsondata['open_lips_6'],
open_teeth_6 = jsondata['open_teeth_6'],
open_volume_6 = jsondata['open_volume_6'],
open_close_7 = jsondata['open_close_7'],
open_open_7 = jsondata['open_open_7'],
open_high_7 = jsondata['open_high_7'],
open_low_7 = jsondata['open_low_7'],
open_upband_7 = jsondata['open_upband_7'],
open_lowband_7 = jsondata['open_lowband_7'],
open_midleband_7 = jsondata['open_midleband_7'],
open_jaw_7 = jsondata['open_jaw_7'],
open_lips_7 = jsondata['open_lips_7'],
open_teeth_7 = jsondata['open_teeth_7'],
open_volume_7 = jsondata['open_volume_7'],
open_close_8 = jsondata['open_close_8'],
open_open_8 = jsondata['open_open_8'],
open_high_8 = jsondata['open_high_8'],
open_low_8 = jsondata['open_low_8'],
open_upband_8 = jsondata['open_upband_8'],
open_lowband_8 = jsondata['open_lowband_8'],
open_midleband_8 = jsondata['open_midleband_8'],
open_jaw_8 = jsondata['open_jaw_8'],
open_lips_8 = jsondata['open_lips_8'],
open_teeth_8 = jsondata['open_teeth_8'],
open_volume_8 = jsondata['open_volume_8'],
open_close_9 = jsondata['open_close_9'],
open_open_9 = jsondata['open_open_9'],
open_high_9 = jsondata['open_high_9'],
open_low_9 = jsondata['open_low_9'],
open_upband_9 = jsondata['open_upband_9'],
open_lowband_9 = jsondata['open_lowband_9'],
open_midleband_9 = jsondata['open_midleband_9'],
open_jaw_9 = jsondata['open_jaw_9'],
open_lips_9 = jsondata['open_lips_9'],
open_teeth_9 = jsondata['open_teeth_9'],
open_volume_9 = jsondata['open_volume_9'],
open_close_10 = jsondata['open_close_10'],
open_open_10 = jsondata['open_open_10'],
open_high_10 = jsondata['open_high_10'],
open_low_10 = jsondata['open_low_10'],
open_upband_10 = jsondata['open_upband_10'],
open_lowband_10 = jsondata['open_lowband_10'],
open_midleband_10 = jsondata['open_midleband_10'],
open_jaw_10 = jsondata['open_jaw_10'],
open_lips_10 = jsondata['open_lips_10'],
open_teeth_10 = jsondata['open_teeth_10'],
open_volume_10 = jsondata['open_volume_10'],
)
dataToSave.save()
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
open_close_11 = jsondata['open_close_11'],
open_open_11 = jsondata['open_open_11'],
open_high_11 = jsondata['open_high_11'],
open_low_11 = jsondata['open_low_11'],
open_upband_11 = jsondata['open_upband_11'],
open_lowband_11 = jsondata['open_lowband_11'],
open_midleband_11 = jsondata['open_midleband_11'],
open_jaw_11 = jsondata['open_jaw_11'],
open_lips_11 = jsondata['open_lips_11'],
open_teeth_11 = jsondata['open_teeth_11'],
open_volume_11 = jsondata['open_volume_11'],
open_close_12 = jsondata['open_close_12'],
open_open_12 = jsondata['open_open_12'],
open_high_12 = jsondata['open_high_12'],
open_low_12 = jsondata['open_low_12'],
open_upband_12 = jsondata['open_upband_12'],
open_lowband_12 = jsondata['open_lowband_12'],
open_midleband_12 = jsondata['open_midleband_12'],
open_jaw_12 = jsondata['open_jaw_12'],
open_lips_12 = jsondata['open_lips_12'],
open_teeth_12 = jsondata['open_teeth_12'],
open_volume_12 = jsondata['open_volume_12'],
open_close_13 = jsondata['open_close_13'],
open_open_13 = jsondata['open_open_13'],
open_high_13 = jsondata['open_high_13'],
open_low_13 = jsondata['open_low_13'],
open_upband_13 = jsondata['open_upband_13'],
open_lowband_13 = jsondata['open_lowband_13'],
open_midleband_13 = jsondata['open_midleband_13'],
open_jaw_13 = jsondata['open_jaw_13'],
open_lips_13 = jsondata['open_lips_13'],
open_teeth_13 = jsondata['open_teeth_13'],
open_volume_13 = jsondata['open_volume_13'],
open_close_14 = jsondata['open_close_14'],
open_open_14 = jsondata['open_open_14'],
open_high_14 = jsondata['open_high_14'],
open_low_14 = jsondata['open_low_14'],
open_upband_14 = jsondata['open_upband_14'],
open_lowband_14 = jsondata['open_lowband_14'],
open_midleband_14 = jsondata['open_midleband_14'],
open_jaw_14 = jsondata['open_jaw_14'],
open_lips_14 = jsondata['open_lips_14'],
open_teeth_14 = jsondata['open_teeth_14'],
open_volume_14 = jsondata['open_volume_14'],
open_close_15 = jsondata['open_close_15'],
open_open_15 = jsondata['open_open_15'],
open_high_15 = jsondata['open_high_15'],
open_low_15 = jsondata['open_low_15'],
open_upband_15 = jsondata['open_upband_15'],
open_lowband_15 = jsondata['open_lowband_15'],
open_midleband_15 = jsondata['open_midleband_15'],
open_jaw_15 = jsondata['open_jaw_15'],
open_lips_15 = jsondata['open_lips_15'],
open_teeth_15 = jsondata['open_teeth_15'],
open_volume_15 = jsondata['open_volume_15'],
open_close_16 = jsondata['open_close_16'],
open_open_16 = jsondata['open_open_16'],
open_high_16 = jsondata['open_high_16'],
open_low_16 = jsondata['open_low_16'],
open_upband_16 = jsondata['open_upband_16'],
open_lowband_16 = jsondata['open_lowband_16'],
open_midleband_16 = jsondata['open_midleband_16'],
open_jaw_16 = jsondata['open_jaw_16'],
open_lips_16 = jsondata['open_lips_16'],
open_teeth_16 = jsondata['open_teeth_16'],
open_volume_16 = jsondata['open_volume_16'],
open_close_17 = jsondata['open_close_17'],
open_open_17 = jsondata['open_open_17'],
open_high_17 = jsondata['open_high_17'],
open_low_17 = jsondata['open_low_17'],
open_upband_17 = jsondata['open_upband_17'],
open_lowband_17 = jsondata['open_lowband_17'],
open_midleband_17 = jsondata['open_midleband_17'],
open_jaw_17 = jsondata['open_jaw_17'],
open_lips_17 = jsondata['open_lips_17'],
open_teeth_17 = jsondata['open_teeth_17'],
open_volume_17 = jsondata['open_volume_17'],
open_close_18 = jsondata['open_close_18'],
open_open_18 = jsondata['open_open_18'],
open_high_18 = jsondata['open_high_18'],
open_low_18 = jsondata['open_low_18'],
open_upband_18 = jsondata['open_upband_18'],
open_lowband_18 = jsondata['open_lowband_18'],
open_midleband_18 = jsondata['open_midleband_18'],
open_jaw_18 = jsondata['open_jaw_18'],
open_lips_18 = jsondata['open_lips_18'],
open_teeth_18 = jsondata['open_teeth_18'],
open_volume_18 = jsondata['open_volume_18'],
open_close_19 = jsondata['open_close_19'],
open_open_19 = jsondata['open_open_19'],
open_high_19 = jsondata['open_high_19'],
open_low_19 = jsondata['open_low_19'],
open_upband_19 = jsondata['open_upband_19'],
open_lowband_19 = jsondata['open_lowband_19'],
open_midleband_19 = jsondata['open_midleband_19'],
open_jaw_19 = jsondata['open_jaw_19'],
open_lips_19 = jsondata['open_lips_19'],
open_teeth_19 = jsondata['open_teeth_19'],
open_volume_19 = jsondata['open_volume_19'],
open_close_20 = jsondata['open_close_20'],
open_open_20 = jsondata['open_open_20'],
open_high_20 = jsondata['open_high_20'],
open_low_20 = jsondata['open_low_20'],
open_upband_20 = jsondata['open_upband_20'],
open_lowband_20 = jsondata['open_lowband_20'],
open_midleband_20 = jsondata['open_midleband_20'],
open_jaw_20 = jsondata['open_jaw_20'],
open_lips_20 = jsondata['open_lips_20'],
open_teeth_20 = jsondata['open_teeth_20'],
open_volume_20 = jsondata['open_volume_20'],
open_close_21 = jsondata['open_close_21'],
open_open_21 = jsondata['open_open_21'],
open_high_21 = jsondata['open_high_21'],
open_low_21 = jsondata['open_low_21'],
open_upband_21 = jsondata['open_upband_21'],
open_lowband_21 = jsondata['open_lowband_21'],
open_midleband_21 = jsondata['open_midleband_21'],
open_jaw_21 = jsondata['open_jaw_21'],
open_lips_21 = jsondata['open_lips_21'],
open_teeth_21 = jsondata['open_teeth_21'],
open_volume_21 = jsondata['open_volume_21'],
open_close_22 = jsondata['open_close_22'],
open_open_22 = jsondata['open_open_22'],
open_high_22 = jsondata['open_high_22'],
open_low_22 = jsondata['open_low_22'],
open_upband_22 = jsondata['open_upband_22'],
open_lowband_22 = jsondata['open_lowband_22'],
open_midleband_22 = jsondata['open_midleband_22'],
open_jaw_22 = jsondata['open_jaw_22'],
open_lips_22 = jsondata['open_lips_22'],
open_teeth_22 = jsondata['open_teeth_22'],
open_volume_22 = jsondata['open_volume_22'],
open_close_23 = jsondata['open_close_23'],
open_open_23 = jsondata['open_open_23'],
open_high_23 = jsondata['open_high_23'],
open_low_23 = jsondata['open_low_23'],
open_upband_23 = jsondata['open_upband_23'],
open_lowband_23 = jsondata['open_lowband_23'],
open_midleband_23 = jsondata['open_midleband_23'],
open_jaw_23 = jsondata['open_jaw_23'],
open_lips_23 = jsondata['open_lips_23'],
open_teeth_23 = jsondata['open_teeth_23'],
open_volume_23 = jsondata['open_volume_23'],
open_close_24 = jsondata['open_close_24'],
open_open_24 = jsondata['open_open_24'],
open_high_24 = jsondata['open_high_24'],
open_low_24 = jsondata['open_low_24'],
open_upband_24 = jsondata['open_upband_24'],
open_lowband_24 = jsondata['open_lowband_24'],
open_midleband_24 = jsondata['open_midleband_24'],
open_jaw_24 = jsondata['open_jaw_24'],
open_lips_24 = jsondata['open_lips_24'],
open_teeth_24 = jsondata['open_teeth_24'],
open_volume_24 = jsondata['open_volume_24']
)
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
close_close_1 = jsondata['close_close_1'],
close_open_1 = jsondata['close_open_1'],
close_high_1 = jsondata['close_high_1'],
close_low_1 = jsondata['close_low_1'],
close_upband_1 = jsondata['close_upband_1'],
close_lowband_1 = jsondata['close_lowband_1'],
close_midleband_1 = jsondata['close_midleband_1'],
close_jaw_1 = jsondata['close_jaw_1'],
close_lips_1 = jsondata['close_lips_1'],
close_teeth_1 = jsondata['close_teeth_1'],
close_volume_1 = jsondata['close_volume_1'],
close_close_2 = jsondata['close_close_2'],
close_open_2 = jsondata['close_open_2'],
close_high_2 = jsondata['close_high_2'],
close_low_2 = jsondata['close_low_2'],
close_upband_2 = jsondata['close_upband_2'],
close_lowband_2 = jsondata['close_lowband_2'],
close_midleband_2 = jsondata['close_midleband_2'],
close_jaw_2 = jsondata['close_jaw_2'],
close_lips_2 = jsondata['close_lips_2'],
close_teeth_2 = jsondata['close_teeth_2'],
close_volume_2 = jsondata['close_volume_2'],
close_close_3 = jsondata['close_close_3'],
close_open_3 = jsondata['close_open_3'],
close_high_3 = jsondata['close_high_3'],
close_low_3 = jsondata['close_low_3'],
close_upband_3 = jsondata['close_upband_3'],
close_lowband_3 = jsondata['close_lowband_3'],
close_midleband_3 = jsondata['close_midleband_3'],
close_jaw_3 = jsondata['close_jaw_3'],
close_lips_3 = jsondata['close_lips_3'],
close_teeth_3 = jsondata['close_teeth_3'],
close_volume_3 = jsondata['close_volume_3'],
close_close_4 = jsondata['close_close_4'],
close_open_4 = jsondata['close_open_4'],
close_high_4 = jsondata['close_high_4'],
close_low_4 = jsondata['close_low_4'],
close_upband_4 = jsondata['close_upband_4'],
close_lowband_4 = jsondata['close_lowband_4'],
close_midleband_4 = jsondata['close_midleband_4'],
close_jaw_4 = jsondata['close_jaw_4'],
close_lips_4 = jsondata['close_lips_4'],
close_teeth_4 = jsondata['close_teeth_4'],
close_volume_4 = jsondata['close_volume_4'],
close_close_5 = jsondata['close_close_5'],
close_open_5 = jsondata['close_open_5'],
close_high_5 = jsondata['close_high_5'],
close_low_5 = jsondata['close_low_5'],
close_upband_5 = jsondata['close_upband_5'],
close_lowband_5 = jsondata['close_lowband_5'],
close_midleband_5 = jsondata['close_midleband_5'],
close_jaw_5 = jsondata['close_jaw_5'],
close_lips_5 = jsondata['close_lips_5'],
close_teeth_5 = jsondata['close_teeth_5'],
close_volume_5 = jsondata['close_volume_5'],
close_close_6 = jsondata['close_close_6'],
close_open_6 = jsondata['close_open_6'],
close_high_6 = jsondata['close_high_6'],
close_low_6 = jsondata['close_low_6'],
close_upband_6 = jsondata['close_upband_6'],
close_lowband_6 = jsondata['close_lowband_6'],
close_midleband_6 = jsondata['close_midleband_6'],
close_jaw_6 = jsondata['close_jaw_6'],
close_lips_6 = jsondata['close_lips_6'],
close_teeth_6 = jsondata['close_teeth_6'],
close_volume_6 = jsondata['close_volume_6'],
close_close_7 = jsondata['close_close_7'],
close_open_7 = jsondata['close_open_7'],
close_high_7 = jsondata['close_high_7'],
close_low_7 = jsondata['close_low_7'],
close_upband_7 = jsondata['close_upband_7'],
close_lowband_7 = jsondata['close_lowband_7'],
close_midleband_7 = jsondata['close_midleband_7'],
close_jaw_7 = jsondata['close_jaw_7'],
close_lips_7 = jsondata['close_lips_7'],
close_teeth_7 = jsondata['close_teeth_7'],
close_volume_7 = jsondata['close_volume_7'],
close_close_8 = jsondata['close_close_8'],
close_open_8 = jsondata['close_open_8'],
close_high_8 = jsondata['close_high_8'],
close_low_8 = jsondata['close_low_8'],
close_upband_8 = jsondata['close_upband_8'],
close_lowband_8 = jsondata['close_lowband_8'],
close_midleband_8 = jsondata['close_midleband_8'],
close_jaw_8 = jsondata['close_jaw_8'],
close_lips_8 = jsondata['close_lips_8'],
close_teeth_8 = jsondata['close_teeth_8'],
close_volume_8 = jsondata['close_volume_8'],
close_close_9 = jsondata['close_close_9'],
close_open_9 = jsondata['close_open_9'],
close_high_9 = jsondata['close_high_9'],
close_low_9 = jsondata['close_low_9'],
close_upband_9 = jsondata['close_upband_9'],
close_lowband_9 = jsondata['close_lowband_9'],
close_midleband_9 = jsondata['close_midleband_9'],
close_jaw_9 = jsondata['close_jaw_9'],
close_lips_9 = jsondata['close_lips_9'],
close_teeth_9 = jsondata['close_teeth_9'],
close_volume_9 = jsondata['close_volume_9'],
close_close_10 = jsondata['close_close_10'],
close_open_10 = jsondata['close_open_10'],
close_high_10 = jsondata['close_high_10'],
close_low_10 = jsondata['close_low_10'],
close_upband_10 = jsondata['close_upband_10'],
close_lowband_10 = jsondata['close_lowband_10'],
close_midleband_10 = jsondata['close_midleband_10'],
close_jaw_10 = jsondata['close_jaw_10'],
close_lips_10 = jsondata['close_lips_10'],
close_teeth_10 = jsondata['close_teeth_10'],
close_volume_10 = jsondata['close_volume_10'],
close_close_11 = jsondata['close_close_11'],
close_open_11 = jsondata['close_open_11'],
close_high_11 = jsondata['close_high_11'],
close_low_11 = jsondata['close_low_11'],
close_upband_11 = jsondata['close_upband_11'],
close_lowband_11 = jsondata['close_lowband_11'],
close_midleband_11 = jsondata['close_midleband_11'],
close_jaw_11 = jsondata['close_jaw_11'],
close_lips_11 = jsondata['close_lips_11'],
close_teeth_11 = jsondata['close_teeth_11'],
close_volume_11 = jsondata['close_volume_11'],
close_close_12 = jsondata['close_close_12'],
close_open_12 = jsondata['close_open_12'],
close_high_12 = jsondata['close_high_12'],
close_low_12 = jsondata['close_low_12'],
close_upband_12 = jsondata['close_upband_12'],
close_lowband_12 = jsondata['close_lowband_12'],
close_midleband_12 = jsondata['close_midleband_12'],
close_jaw_12 = jsondata['close_jaw_12'],
close_lips_12 = jsondata['close_lips_12'],
close_teeth_12 = jsondata['close_teeth_12'],
close_volume_12 = jsondata['close_volume_12'],
)
DataSet.objects.filter(open_magicnum=jsondata['magicnum']).update(
close_close_13 = jsondata['close_close_13'],
close_open_13 = jsondata['close_open_13'],
close_high_13 = jsondata['close_high_13'],
close_low_13 = jsondata['close_low_13'],
close_upband_13 = jsondata['close_upband_13'],
close_lowband_13 = jsondata['close_lowband_13'],
close_midleband_13 = jsondata['close_midleband_13'],
close_jaw_13 = jsondata['close_jaw_13'],
close_lips_13 = jsondata['close_lips_13'],
close_teeth_13 = jsondata['close_teeth_13'],
close_volume_13 = jsondata['close_volume_13'],
close_close_14 = jsondata['close_close_14'],
close_open_14 = jsondata['close_open_14'],
close_high_14 = jsondata['close_high_14'],
close_low_14 = jsondata['close_low_14'],
close_upband_14 = jsondata['close_upband_14'],
close_lowband_14 = jsondata['close_lowband_14'],
close_midleband_14 = jsondata['close_midleband_14'],
close_jaw_14 = jsondata['close_jaw_14'],
close_lips_14 = jsondata['close_lips_14'],
close_teeth_14 = jsondata['close_teeth_14'],
close_volume_14 = jsondata['close_volume_14'],
close_close_15 = jsondata['close_close_15'],
close_open_15 = jsondata['close_open_15'],
close_high_15 = jsondata['close_high_15'],
close_low_15 = jsondata['close_low_15'],
close_upband_15 = jsondata['close_upband_15'],
close_lowband_15 = jsondata['close_lowband_15'],
close_midleband_15 = jsondata['close_midleband_15'],
close_jaw_15 = jsondata['close_jaw_15'],
close_lips_15 = jsondata['close_lips_15'],
close_teeth_15 = jsondata['close_teeth_15'],
close_volume_15 = jsondata['close_volume_15'],
close_close_16 = jsondata['close_close_16'],
close_open_16 = jsondata['close_open_16'],
close_high_16 = jsondata['close_high_16'],
close_low_16 = jsondata['close_low_16'],
close_upband_16 = jsondata['close_upband_16'],
close_lowband_16 = jsondata['close_lowband_16'],
close_midleband_16 = jsondata['close_midleband_16'],
close_jaw_16 = jsondata['close_jaw_16'],
close_lips_16 = jsondata['close_lips_16'],
close_teeth_16 = jsondata['close_teeth_16'],
close_volume_16 = jsondata['close_volume_16'],
close_close_17 = jsondata['close_close_17'],
close_open_17 = jsondata['close_open_17'],
close_high_17 = jsondata['close_high_17'],
close_low_17 = jsondata['close_low_17'],
close_upband_17 = jsondata['close_upband_17'],
close_lowband_17 = jsondata['close_lowband_17'],
close_midleband_17 = jsondata['close_midleband_17'],
close_jaw_17 = jsondata['close_jaw_17'],
close_lips_17 = jsondata['close_lips_17'],
close_teeth_17 = jsondata['close_teeth_17'],
close_volume_17 = jsondata['close_volume_17'],
close_close_18 = jsondata['close_close_18'],
close_open_18 = jsondata['close_open_18'],
close_high_18 = jsondata['close_high_18'],
close_low_18 = jsondata['close_low_18'],
close_upband_18 = jsondata['close_upband_18'],
close_lowband_18 = jsondata['close_lowband_18'],
close_midleband_18 = jsondata['close_midleband_18'],
close_jaw_18 = jsondata['close_jaw_18'],
close_lips_18 = jsondata['close_lips_18'],
close_teeth_18 = jsondata['close_teeth_18'],
close_volume_18 = jsondata['close_volume_18'],
close_close_19 = jsondata['close_close_19'],
close_open_19 = jsondata['close_open_19'],
close_high_19 = jsondata['close_high_19'],
close_low_19 = jsondata['close_low_19'],
close_upband_19 = jsondata['close_upband_19'],
close_lowband_19 = jsondata['close_lowband_19'],
close_midleband_19 = jsondata['close_midleband_19'],
close_jaw_19 = jsondata['close_jaw_19'],
close_lips_19 = jsondata['close_lips_19'],
close_teeth_19 = jsondata['close_teeth_19'],
close_volume_19 = jsondata['close_volume_19'],
close_close_20 = jsondata['close_close_20'],
close_open_20 = jsondata['close_open_20'],
close_high_20 = jsondata['close_high_20'],
close_low_20 = jsondata['close_low_20'],
close_upband_20 = jsondata['close_upband_20'],
close_lowband_20 = jsondata['close_lowband_20'],
close_midleband_20 = jsondata['close_midleband_20'],
close_jaw_20 = jsondata['close_jaw_20'],
close_lips_20 = jsondata['close_lips_20'],
close_teeth_20 = jsondata['close_teeth_20'],
close_volume_20 = jsondata['close_volume_20'],
close_close_21 = jsondata['close_close_21'],
close_open_21 = jsondata['close_open_21'],
close_high_21 = jsondata['close_high_21'],
close_low_21 = jsondata['close_low_21'],
close_upband_21 = jsondata['close_upband_21'],
close_lowband_21 = jsondata['close_lowband_21'],
close_midleband_21 = jsondata['close_midleband_21'],
close_jaw_21 = jsondata['close_jaw_21'],
close_lips_21 = jsondata['close_lips_21'],
close_teeth_21 = jsondata['close_teeth_21'],
close_volume_21 = jsondata['close_volume_21'],
close_close_22 = jsondata['close_close_22'],
close_open_22 = jsondata['close_open_22'],
close_high_22 = jsondata['close_high_22'],
close_low_22 = jsondata['close_low_22'],
close_upband_22 = jsondata['close_upband_22'],
close_lowband_22 = jsondata['close_lowband_22'],
close_midleband_22 = jsondata['close_midleband_22'],
close_jaw_22 = jsondata['close_jaw_22'],
close_lips_22 = jsondata['close_lips_22'],
close_teeth_22 = jsondata['close_teeth_22'],
close_volume_22 = jsondata['close_volume_22'],
close_close_23 = jsondata['close_close_23'],
close_open_23 = jsondata['close_open_23'],
close_high_23 = jsondata['close_high_23'],
close_low_23 = jsondata['close_low_23'],
close_upband_23 = jsondata['close_upband_23'],
close_lowband_23 = jsondata['close_lowband_23'],
close_midleband_23 = jsondata['close_midleband_23'],
close_jaw_23 = jsondata['close_jaw_23'],
close_lips_23 = jsondata['close_lips_23'],
close_teeth_23 = jsondata['close_teeth_23'],
close_volume_23 = jsondata['close_volume_23'],
close_close_24 = jsondata['close_close_24'],
close_open_24 = jsondata['close_open_24'],
close_high_24 = jsondata['close_high_24'],
close_low_24 = jsondata['close_low_24'],
close_upband_24 = jsondata['close_upband_24'],
close_lowband_24 = jsondata['close_lowband_24'],
close_midleband_24 = jsondata['close_midleband_24'],
close_jaw_24 = jsondata['close_jaw_24'],
close_lips_24 = jsondata['close_lips_24'],
close_teeth_24 = jsondata['close_teeth_24'],
close_volume_24 = jsondata['close_volume_24'],
close_result = jsondata['result'],
close_effectivnes = effectivnes,
close_neuron_name = jsondata['neuron_name'],
close_closeprice = jsondata['closeprice'],
close_time = DP.parse(jsondata['orderclosetime'])
)
| gpl-3.0 |
rodrigc/buildbot | master/buildbot/test/unit/util/test_state.py | 6 | 2671 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.test.fake import fakemaster
from buildbot.test.util.misc import TestReactorMixin
from buildbot.util import state
class FakeObject(state.StateMixin):
name = "fake-name"
def __init__(self, master):
self.master = master
class TestStateMixin(TestReactorMixin, unittest.TestCase):
OBJECTID = 19
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantDb=True)
self.object = FakeObject(self.master)
@defer.inlineCallbacks
def test_getState(self):
self.master.db.state.fakeState('fake-name', 'FakeObject',
fav_color=['red', 'purple'])
res = yield self.object.getState('fav_color')
self.assertEqual(res, ['red', 'purple'])
@defer.inlineCallbacks
def test_getState_default(self):
res = yield self.object.getState('fav_color', 'black')
self.assertEqual(res, 'black')
def test_getState_KeyError(self):
self.master.db.state.fakeState('fake-name', 'FakeObject',
fav_color=['red', 'purple'])
d = self.object.getState('fav_book')
def cb(_):
self.fail("should not succeed")
def check_exc(f):
f.trap(KeyError)
d.addCallbacks(cb, check_exc)
return d
@defer.inlineCallbacks
def test_setState(self):
yield self.object.setState('y', 14)
self.master.db.state.assertStateByClass('fake-name', 'FakeObject',
y=14)
@defer.inlineCallbacks
def test_setState_existing(self):
self.master.db.state.fakeState('fake-name', 'FakeObject', x=13)
yield self.object.setState('x', 14)
self.master.db.state.assertStateByClass('fake-name', 'FakeObject',
x=14)
| gpl-2.0 |
campbe13/openhatch | vendor/packages/Django/django/http/utils.py | 36 | 1499 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = ''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = ''
return response
| agpl-3.0 |
dustcloud/dustlink | SmartMeshSDK/IpMgrConnectorMux/IpMgrConnectorMuxInternal.py | 4 | 9593 | import threading
import socket
import select
import struct
import MuxMsg
from SmartMeshSDK import ApiException, \
ApiConnector
from SmartMeshSDK.ApiDefinition import IpMgrDefinition
class IpMgrConnectorMuxInternal(ApiConnector.ApiConnector ) :
'''
\ingroup ApiConnector
\brief Internal class for IP manager connector, through Serial Mux.
Members of class
acknowledgeBuf - binary payload of acknowledge
ackCmdId - command ID from acknowledge
sendSemaphor - semaphore to wait for acknowledgement of command (threading.Semaphore)
inputThread - thread for processing input packets (threading.Thread)
socket - TCP socket for connection with Serial Mux
'''
PARAM_HOST = 'host'
PARAM_PORT = 'port'
PARAM_ISSENDHELLO = 'isSendHello'
DEFAULT_PARAM_HOST = '127.0.0.1'
DEFAULT_PARAM_PORT = 9900
_RC_OK = 0
_RC_TIMEOUT = 5
def __init__(self, maxQSize = 100) :
ApiConnector.ApiConnector.__init__(self, maxQSize)
self.acknowledgeBuf = None
self.ackCmdId = -1
self.sendSemaphor = threading.BoundedSemaphore(1)
self.sendLock = threading.Lock()
self.socket = None
self.inputThread = None
self.muxMsg = MuxMsg.MuxMsg(self.processCmd)
self.apiDef = IpMgrDefinition.IpMgrDefinition()
self.notifIds = self.apiDef.getIds(self.apiDef.NOTIFICATION)
def connect(self, params = {}) :
'''
\brief Connect to device
\param params Dictionary of connection parameters:
- 'host' - IP address of Mux (default: '127.0.0.1')
- 'port' - port of Mux (default: 9900)
- 'isSendHello' - send Hello message after connection (default True)
'''
host = self.DEFAULT_PARAM_HOST
port = self.DEFAULT_PARAM_PORT
isSendHello = True
if self.PARAM_HOST in params and params[self.PARAM_HOST] :
host = params[self.PARAM_HOST]
if self.PARAM_PORT in params and params[self.PARAM_PORT] :
port = int(params[self.PARAM_PORT])
if self.PARAM_ISSENDHELLO in params :
isSendHello = params[self.PARAM_ISSENDHELLO]
if self.inputThread : # Wait finish disconnect process
try :
self.inputThread.join(1.0)
if self.inputThread.isAlive() :
raise ApiException.ConnectionError("Already connected")
except RuntimeError :
pass # Ignore join error
self.inputThread = None
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect( (host, port) )
self.socket.setblocking(1)
except socket.error as ex:
raise ApiException.ConnectionError(str(ex))
self.sendSemaphor.acquire(False) # Clear semaphore
# Start thread for processing input stream
self.inputThread = threading.Thread(target = self.inputProcess)
self.inputThread.name = "IpMgrConnectorMuxInternal"
self.inputThread.start()
ApiConnector.ApiConnector.connect(self)
if isSendHello :
self.sendHelloCmd()
def disconnect(self, reason="") :
if not self.isConnected :
return
try :
self.socket.send("stop")
self.socket.shutdown(socket.SHUT_RD) # start disconnection
self.socket.close()
except socket.error :
pass # Ignore socket error
ApiConnector.ApiConnector.disconnect(self, reason)
def send(self, cmdNames, params) :
self.sendLock.acquire()
try :
if not self.isConnected :
raise ApiException.ConnectionError("Disconnected")
# Send data
ApiConnector.log.debug("IO OUT. {0} : {1}".format(cmdNames, params))
(cmdId, paramsBinList) = self.apiDef.serialize(cmdNames, params)
paramsBin = struct.pack('!'+str(len(paramsBinList))+'B', *paramsBinList)
ApiConnector.logDump(paramsBin, "RawIO OUT. Command ID: {0}".format(cmdId))
packet = self.muxMsg.build_message(cmdId, paramsBin)
self.acknowledgeBuf = None
self.ackCmdId = -1
try :
self.socket.sendall(packet)
except socket.error, way:
# Socket error. Disconnect from device. Stop command processing
reason = "IO output error [{0}] {1}".format(way.args[0], way.args[1])
self.disconnect(reason)
raise ApiException.ConnectionError(reason)
# Waiting acknowledge
self.sendSemaphor.acquire()
if not self.isConnected : # Disconnect happened during waiting ack.
raise ApiException.ConnectionError(self.disconnectReason)
# Process acknowledge
cmdId = self.apiDef.nameToId(self.apiDef.COMMAND, (cmdNames[0],))
if self.ackCmdId != cmdId :
reason = "Unexpected acknowledge {0} for command {1} ({2})".format(self.ackCmdId, cmdId, cmdNames)
self.disconnect(reason)
raise ApiException.ConnectionError(reason)
# Parse acknowledge
ackList = struct.unpack('!'+str(len(self.acknowledgeBuf))+'B', self.acknowledgeBuf)
(resCmdName, resParams) = self.apiDef.deserialize(self.apiDef.COMMAND, self.ackCmdId, ackList)
ApiConnector.log.debug("IO INP. {0} : {1}".format(resCmdName, resParams))
if self.apiDef.RC in resParams and resParams[self.apiDef.RC] != self._RC_OK :
if resParams[self.apiDef.RC] == self._RC_TIMEOUT :
raise ApiException.CommandTimeoutError(resCmdName)
try:
desc = '({0})\n{1}'.format(
self.apiDef.responseFieldValueToDesc(
resCmdName,
self.apiDef.RC,
resParams[self.apiDef.RC],
),
self.apiDef.rcToDescription(
resParams[self.apiDef.RC],
resCmdName,
),
)
except:
desc = None
raise ApiException.APIError(
cmd=resCmdName,
rc=resParams[self.apiDef.RC],
desc=desc
)
self.ackCmdId = -1
self.acknowledgeBuf = None
finally:
self.sendLock.release()
return resParams
def ackSignal(self):
'''
\brief Send signal 'Acknowledge received'
'''
try :
self.sendSemaphor.release()
except ValueError :
pass
def inputProcess(self):
'''
\brief Processing device input
'''
try :
while True :
select.select([self.socket], [], [self.socket])
buf = self.socket.recv(4096)
if not buf :
raise socket.error(0, "Connection close")
self.muxMsg.parse(buf)
except socket.error, way:
# Disconnect process -------------------------------------------------
if way.args[0] == 9 : #
way = socket.error(0, "Connection close")
ApiConnector.ApiConnector.disconnect(self, "Disconnect. Reason: {0} [{1}]".format(way.args[1], way.args[0]))
self.acknowledgeBuf = None
self.ackCmdId = -1
self.ackSignal()
try :
self.socket.close()
except socket.error :
pass # Ignore socket error
def processCmd(self, reserved, cmdId, payload):
'''
\brief deserialize and process command
'''
ApiConnector.logDump(payload, "RawIO INP. Command ID: {0}".format(cmdId))
if cmdId in self.notifIds :
try :
payloadList = struct.unpack('!'+str(len(payload))+'B', payload)
(notifNames, params) = self.apiDef.deserialize(self.apiDef.NOTIFICATION, cmdId, payloadList)
ApiConnector.log.debug("IO INP. {0} : {1}".format(notifNames, params))
self.putNotification((notifNames, params))
except ApiException.ConnectionError as ex:
raise socket.error(0, ex.value) # Initiate disconnection
except Exception as ex :
ApiConnector.log.error("Deserialization command {0}. Error {1}".format(cmdId, ex))
else :
self.ackCmdId = cmdId
self.acknowledgeBuf = payload
self.ackSignal()
def sendHelloCmd(self):
'''
\brief Send Hello command
'''
res = self.send(["mux_hello"], {"version" : self.muxMsg.getVer(), "secret" : self.muxMsg.getAuth()})
return res
| bsd-3-clause |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/core/checks/model_checks.py | 1 | 2454 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import types
from django.apps import apps
from django.core.checks import Error, Tags, register
@register(Tags.models)
def check_all_models(app_configs=None, **kwargs):
errors = []
for model in apps.get_models():
if app_configs is None or model._meta.app_config in app_configs:
if not inspect.ismethod(model.check):
errors.append(
Error(
"The '%s.check()' class method is "
"currently overridden by %r." % (
model.__name__, model.check),
hint=None,
obj=model,
id='models.E020'
)
)
else:
errors.extend(model.check(**kwargs))
return errors
@register(Tags.models, Tags.signals)
def check_model_signals(app_configs=None, **kwargs):
"""
Ensure lazily referenced model signals senders are installed.
"""
# Avoid circular import
from django.db import models
errors = []
for name in dir(models.signals):
obj = getattr(models.signals, name)
if isinstance(obj, models.signals.ModelSignal):
for reference, receivers in obj.unresolved_references.items():
for receiver, _, _ in receivers:
# The receiver is either a function or an instance of class
# defining a `__call__` method.
if isinstance(receiver, types.FunctionType):
description = "The '%s' function" % receiver.__name__
else:
description = "An instance of the '%s' class" % receiver.__class__.__name__
errors.append(
Error(
"%s was connected to the '%s' signal "
"with a lazy reference to the '%s' sender, "
"which has not been installed." % (
description, name, '.'.join(reference)
),
obj=receiver.__module__,
hint=None,
id='signals.E001'
)
)
return errors
| mit |
MM1nd/worldengine | worldengine/astar.py | 4 | 6331 | #!/usr/bin/env python
"""
A* works based on cost, the higher the cost the less likely it is to travel
that path. There are no hard limits, it works on minus infinity and
positive infinity.
It will take a starting position and and end position, then find the path
between the two with the lowest cost.
This is perfect for height maps for example, because you can use it to
find path through mountain/hills between villages.
usage: You can use the PathFinder.find(height_map, source, destination)
where height_map is any 2D array while source and destination are both
lists of two values [x, y].
author: Bret Curtis
"""
class Path:
""" A path object, containing the nodes and total cost."""
def __init__(self, nodes, total_cost):
self.nodes = nodes
self.totalCost = total_cost
def get_nodes(self):
return self.nodes
def get_total_movement_cost(self):
return self.totalCost
class Node:
""" The basic unit/pixel/location is a Node."""
def __init__(self, location, movement_cost, lid, parent=None):
self.location = location # where is this node located
self.mCost = movement_cost # total move cost to reach this node
self.parent = parent # parent node
self.score = 0 # calculated score for this node
self.lid = lid # location id unique for each location in the map
def __eq__(self, n):
if n.lid == self.lid:
return 1
else:
return 0
class AStar:
""" The "A* Star Search Algorithm" itself.
Have a read:
https://en.wikipedia.org/wiki/A*_search_algorithm
"""
def __init__(self, map_handler):
self.mh = map_handler
self.o = []
self.on = []
self.c = []
def _get_best_open_node(self):
best_node = None
for n in self.on:
if not best_node:
best_node = n
else:
if n.score <= best_node.score:
best_node = n
return best_node
@staticmethod
def _trace_path(n):
nodes = []
total_cost = n.mCost
p = n.parent
nodes.insert(0, n)
while 1:
if p.parent is None:
break
nodes.insert(0, p)
p = p.parent
return Path(nodes, total_cost)
def _handle_node(self, node, end):
i = self.o.index(node.lid)
self.on.pop(i)
self.o.pop(i)
self.c.append(node.lid)
nodes = self.mh.get_adjacent_nodes(node, end)
for n in nodes:
if n.location == end: # reached the destination
return n
elif n.lid in self.c: # already in close, skip this
continue
elif n.lid in self.o: # already in open, check if better score
i = self.o.index(n.lid)
on = self.on[i]
if n.mCost < on.mCost:
self.on.pop(i)
self.o.pop(i)
self.on.append(n)
self.o.append(n.lid)
else: # new node, append to open list
self.on.append(n)
self.o.append(n.lid)
return None
def find_path(self, from_location, to_location):
end = to_location
f_node = self.mh.get_node(from_location)
self.on.append(f_node)
self.o.append(f_node.lid)
next_node = f_node
counter = 0 # a bail-out counter
while next_node is not None:
if counter > 10000:
break # no path found under limit
finish = self._handle_node(next_node, end)
if finish:
return self._trace_path(finish)
next_node = self._get_best_open_node()
counter += 1
return None
class SQLocation:
"""A simple Square Map Location implementation"""
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, l):
if l.x == self.x and l.y == self.y:
return 1
else:
return 0
class SQMapHandler:
"""A simple Square Map implementation"""
def __init__(self, map_data, width, height):
self.m = map_data
self.w = width
self.h = height
def get_node(self, location):
x = location.x
y = location.y
if x < 0 or x >= self.w or y < 0 or y >= self.h:
return None
d = self.m[(y * self.w) + x]
return Node(location, d, ((y * self.w) + x))
def get_adjacent_nodes(self, cur_node, destination):
result = []
cl = cur_node.location
dl = destination
n = self._handle_node(cl.x + 1, cl.y, cur_node, dl.x, dl.y)
if n:
result.append(n)
n = self._handle_node(cl.x - 1, cl.y, cur_node, dl.x, dl.y)
if n:
result.append(n)
n = self._handle_node(cl.x, cl.y + 1, cur_node, dl.x, dl.y)
if n:
result.append(n)
n = self._handle_node(cl.x, cl.y - 1, cur_node, dl.x, dl.y)
if n:
result.append(n)
return result
def _handle_node(self, x, y, from_node, destination_x, destination_y):
n = self.get_node(SQLocation(x, y))
if n is not None:
dx = max(x, destination_x) - min(x, destination_x)
dy = max(y, destination_y) - min(y, destination_y)
em_cost = dx + dy
n.mCost += from_node.mCost
n.score = n.mCost + em_cost
n.parent = from_node
return n
return None
class PathFinder:
"""Using the a* algorithm we will try to find the best path between two
points.
"""
def __init__(self):
pass
@staticmethod
def find(height_map, source, destination):
sx, sy = source
dx, dy = destination
path = []
height, width = height_map.shape
graph = height_map.flatten('C') #flatten array (row-major)
pathfinder = AStar(SQMapHandler(graph, width, height))
start = SQLocation(sx, sy)
end = SQLocation(dx, dy)
p = pathfinder.find_path(start, end)
if not p:
return path
for node in p.nodes:
path.append([node.location.x, node.location.y])
return path
| mit |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes_/__init__.py | 1 | 42200 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import default_metric
from . import delay_metric
from . import expense_metric
from . import error_metric
class prefixes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-external-reachability/prefixes/prefixes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv4 external prefixes and reachability attributes.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__state",
"__default_metric",
"__delay_metric",
"__expense_metric",
"__error_metric",
)
_yang_name = "prefixes"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__default_metric = YANGDynClass(
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__delay_metric = YANGDynClass(
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__expense_metric = YANGDynClass(
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__error_metric = YANGDynClass(
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-external-reachability",
"prefixes",
"prefixes",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/state (container)
YANG Description: State parameters of IPv4 standard prefix.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IPv4 standard prefix.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_default_metric(self):
"""
Getter method for default_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/default_metric (container)
YANG Description: This container defines ISIS Default Metric.
"""
return self.__default_metric
def _set_default_metric(self, v, load=False):
"""
Setter method for default_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/default_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_metric() directly.
YANG Description: This container defines ISIS Default Metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=default_metric.default_metric, is_container='container', yang_name="default-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__default_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_default_metric(self):
self.__default_metric = YANGDynClass(
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_delay_metric(self):
"""
Getter method for delay_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/delay_metric (container)
YANG Description: This container defines the ISIS delay metric.
"""
return self.__delay_metric
def _set_delay_metric(self, v, load=False):
"""
Setter method for delay_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/delay_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_delay_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_delay_metric() directly.
YANG Description: This container defines the ISIS delay metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """delay_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=delay_metric.delay_metric, is_container='container', yang_name="delay-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__delay_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_delay_metric(self):
self.__delay_metric = YANGDynClass(
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_expense_metric(self):
"""
Getter method for expense_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/expense_metric (container)
YANG Description: This container defines the ISIS expense metric.
"""
return self.__expense_metric
def _set_expense_metric(self, v, load=False):
"""
Setter method for expense_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/expense_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_expense_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_expense_metric() directly.
YANG Description: This container defines the ISIS expense metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """expense_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=expense_metric.expense_metric, is_container='container', yang_name="expense-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__expense_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_expense_metric(self):
self.__expense_metric = YANGDynClass(
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_error_metric(self):
"""
Getter method for error_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/error_metric (container)
YANG Description: This container defines the ISIS error metric.
"""
return self.__error_metric
def _set_error_metric(self, v, load=False):
"""
Setter method for error_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/error_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_error_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_error_metric() directly.
YANG Description: This container defines the ISIS error metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """error_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=error_metric.error_metric, is_container='container', yang_name="error-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__error_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_error_metric(self):
self.__error_metric = YANGDynClass(
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
default_metric = __builtin__.property(_get_default_metric)
delay_metric = __builtin__.property(_get_delay_metric)
expense_metric = __builtin__.property(_get_expense_metric)
error_metric = __builtin__.property(_get_error_metric)
_pyangbind_elements = OrderedDict(
[
("state", state),
("default_metric", default_metric),
("delay_metric", delay_metric),
("expense_metric", expense_metric),
("error_metric", error_metric),
]
)
from . import state
from . import default_metric
from . import delay_metric
from . import expense_metric
from . import error_metric
class prefixes(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/ipv4-external-reachability/prefixes/prefixes. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv4 external prefixes and reachability attributes.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__state",
"__default_metric",
"__delay_metric",
"__expense_metric",
"__error_metric",
)
_yang_name = "prefixes"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__default_metric = YANGDynClass(
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__delay_metric = YANGDynClass(
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__expense_metric = YANGDynClass(
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__error_metric = YANGDynClass(
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"ipv4-external-reachability",
"prefixes",
"prefixes",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/state (container)
YANG Description: State parameters of IPv4 standard prefix.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IPv4 standard prefix.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_default_metric(self):
"""
Getter method for default_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/default_metric (container)
YANG Description: This container defines ISIS Default Metric.
"""
return self.__default_metric
def _set_default_metric(self, v, load=False):
"""
Setter method for default_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/default_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_metric() directly.
YANG Description: This container defines ISIS Default Metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """default_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=default_metric.default_metric, is_container='container', yang_name="default-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__default_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_default_metric(self):
self.__default_metric = YANGDynClass(
base=default_metric.default_metric,
is_container="container",
yang_name="default-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_delay_metric(self):
"""
Getter method for delay_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/delay_metric (container)
YANG Description: This container defines the ISIS delay metric.
"""
return self.__delay_metric
def _set_delay_metric(self, v, load=False):
"""
Setter method for delay_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/delay_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_delay_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_delay_metric() directly.
YANG Description: This container defines the ISIS delay metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """delay_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=delay_metric.delay_metric, is_container='container', yang_name="delay-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__delay_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_delay_metric(self):
self.__delay_metric = YANGDynClass(
base=delay_metric.delay_metric,
is_container="container",
yang_name="delay-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_expense_metric(self):
"""
Getter method for expense_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/expense_metric (container)
YANG Description: This container defines the ISIS expense metric.
"""
return self.__expense_metric
def _set_expense_metric(self, v, load=False):
"""
Setter method for expense_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/expense_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_expense_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_expense_metric() directly.
YANG Description: This container defines the ISIS expense metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """expense_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=expense_metric.expense_metric, is_container='container', yang_name="expense-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__expense_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_expense_metric(self):
self.__expense_metric = YANGDynClass(
base=expense_metric.expense_metric,
is_container="container",
yang_name="expense-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_error_metric(self):
"""
Getter method for error_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/error_metric (container)
YANG Description: This container defines the ISIS error metric.
"""
return self.__error_metric
def _set_error_metric(self, v, load=False):
"""
Setter method for error_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/ipv4_external_reachability/prefixes/prefixes/error_metric (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_error_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_error_metric() directly.
YANG Description: This container defines the ISIS error metric.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """error_metric must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=error_metric.error_metric, is_container='container', yang_name="error-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__error_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_error_metric(self):
self.__error_metric = YANGDynClass(
base=error_metric.error_metric,
is_container="container",
yang_name="error-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
default_metric = __builtin__.property(_get_default_metric)
delay_metric = __builtin__.property(_get_delay_metric)
expense_metric = __builtin__.property(_get_expense_metric)
error_metric = __builtin__.property(_get_error_metric)
_pyangbind_elements = OrderedDict(
[
("state", state),
("default_metric", default_metric),
("delay_metric", delay_metric),
("expense_metric", expense_metric),
("error_metric", error_metric),
]
)
| apache-2.0 |
rohitwaghchaure/erpnext_develop | erpnext/patches/v4_0/create_price_list_if_missing.py | 119 | 1087 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils.nestedset import get_root_of
def execute():
# setup not complete
if not frappe.db.sql("""select name from tabCompany limit 1"""):
return
if "shopping_cart" in frappe.get_installed_apps():
frappe.reload_doc("shopping_cart", "doctype", "shopping_cart_settings")
if not frappe.db.sql("select name from `tabPrice List` where buying=1"):
create_price_list(_("Standard Buying"), buying=1)
if not frappe.db.sql("select name from `tabPrice List` where selling=1"):
create_price_list(_("Standard Selling"), selling=1)
def create_price_list(pl_name, buying=0, selling=0):
price_list = frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": buying,
"selling": selling,
"currency": frappe.db.get_default("currency"),
"territories": [{
"territory": get_root_of("Territory")
}]
})
price_list.insert()
| gpl-3.0 |
mnach/suds-py3k | suds/serviceproxy.py | 2 | 2974 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The service proxy provides access to web services.
Replaced by: L{client.Client}
"""
from logging import getLogger
from suds import *
from suds.client import Client
log = getLogger(__name__)
class ServiceProxy(object):
"""
A lightweight soap based web service proxy.
@ivar __client__: A client.
Everything is delegated to the 2nd generation API.
@type __client__: L{Client}
@note: Deprecated, replaced by L{Client}.
"""
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@keyword faults: Raise faults raised by server (default:True),
else return tuple from service method invocation as (http code, object).
@type faults: boolean
@keyword proxy: An http proxy to be specified on requests (default:{}).
The proxy is defined as {protocol:proxy,}
@type proxy: dict
"""
client = Client(url, **kwargs)
self.__client__ = client
def get_instance(self, name):
"""
Get an instance of a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def get_enum(self, name):
"""
Get an instance of an enumeration defined in the WSDL by name.
@param name: The name of a enumeration defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def __str__(self):
return str(self.__client__)
def __unicode__(self):
return str(self.__client__)
def __getattr__(self, name):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return getattr(self.__client__.service, name) | lgpl-3.0 |
JaDogg/__py_playground | reference/parsley/ometa/test/test_tube.py | 3 | 3004 | from __future__ import absolute_import, unicode_literals
import unittest
from ometa.grammar import OMeta
from ometa.tube import TrampolinedParser
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
class TrampolinedReceiver():
"""
Receive and store the passed in data.
"""
currentRule = 'initial'
def __init__(self):
self.received = []
def receive(self, data):
self.received.append(data)
class TrampolinedParserTestCase(unittest.TestCase):
"""
Tests for L{ometa.tube.TrampolinedParser}
"""
def _parseGrammar(self, grammar, name="Grammar"):
return OMeta(grammar).parseGrammar(name)
def setUp(self):
_grammar = r"""
delimiter = '\r\n'
initial = <(~delimiter anything)*>:val delimiter -> receiver.receive(val)
witharg :arg1 :arg2 = <(~delimiter anything)*>:a delimiter -> receiver.receive(arg1+arg2+a)
"""
self.grammar = self._parseGrammar(_grammar)
def test_dataNotFullyReceived(self):
"""
Since the initial rule inside the grammar is not matched, the receiver
shouldn't receive any byte.
"""
receiver = TrampolinedReceiver()
trampolinedParser = TrampolinedParser(self.grammar, receiver, {})
buf = 'foobarandnotreachdelimiter'
for c in iterbytes(buf):
trampolinedParser.receive(c)
self.assertEqual(receiver.received, [])
def test_dataFullyReceived(self):
"""
The receiver should receive the data according to the grammar.
"""
receiver = TrampolinedReceiver()
trampolinedParser = TrampolinedParser(self.grammar, receiver, {})
buf = '\r\n'.join(('foo', 'bar', 'foo', 'bar'))
for c in iterbytes(buf):
trampolinedParser.receive(c)
self.assertEqual(receiver.received, ['foo', 'bar', 'foo'])
trampolinedParser.receive('\r\n')
self.assertEqual(receiver.received, ['foo', 'bar', 'foo', 'bar'])
def test_bindings(self):
"""
The passed-in bindings should be accessible inside the grammar.
"""
receiver = TrampolinedReceiver()
grammar = r"""
initial = digit:d (-> int(d)+SMALL_INT):val -> receiver.receive(val)
"""
bindings = {'SMALL_INT': 3}
TrampolinedParser(self._parseGrammar(grammar), receiver, bindings).receive('0')
self.assertEqual(receiver.received, [3])
def test_currentRuleWithArgs(self):
"""
TrampolinedParser should be able to invoke curruent rule with args.
"""
receiver = TrampolinedReceiver()
receiver.currentRule = "witharg", "nice ", "day"
trampolinedParser = TrampolinedParser(self.grammar, receiver, {})
buf = ' oh yes\r\n'
for c in iterbytes(buf):
trampolinedParser.receive(c)
self.assertEqual(receiver.received, ["nice day oh yes"])
| mit |
box/ClusterRunner | app/master/time_based_atom_grouper.py | 4 | 11090 | from collections import OrderedDict
from app.master.atom_grouper import AtomGrouper
class TimeBasedAtomGrouper(object):
"""
This class implements the algorithm to best split & group atoms based on historic time values. This algorithm is
somewhat complicated, so I'm going to give a summary here.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Let N be the number of concurrent executors allocated for this job.
Let T be the aggregate serial time to execute all atoms on a single executor.
Both N and T are known values at the beginning of this algorithm.
In the ideal subjob atom-grouping, we would have exactly N subjobs, each allocated with T/N amount of work that
would all end at the same time. However, in reality, there are a few factors that makes this solution unfeasible:
- There is a significant amount of variability in the times of running these atoms, so numbers are never exact.
- Certain builds will introduce new tests (for which we don't have historical time data for).
- Not all of the machines are exactly the same, so we can't expect identical performance.
We have two aims for this algorithm:
- Minimize the amount of framework overhead (time spent sending and retrieving subjobs) and maximize the amount of
time the slaves actually spend running the build.
- Don't overload any single executor with too much work--this will cause the whole build to wait on a single
executor. We want to try to get all of the executors to end as close to the same time as possible in order to
get rid of any inefficient use of slave machines.
In order to accomplish this, the algorithm implemented by this class tries to split up the majority of the atoms
into N buckets, and splits up the rest of the atoms into smaller buckets. Hopefully, the timeline graph of
executed subjobs for each of the executors would end up looking like this:
[========================================================================][===][==][==]
[===============================================================================][==]
[====================================================================][====][===][==][=]
[========================================================================][===][==][=]
[=====================================================================][====][==][==]
[==================================================================================][=]
[===================================================================][======][==][==]
The algorithm has two stages of subjob creation: the 'big chunk' stage and the 'small chunk' stage. The 'big chunk'
stage creates exactly N large subjob groupings that will consist of the majority of atoms (in terms of runtime).
The 'small chunk' stage creates ~2N short subjob groupings that will be used to fill in the gaps in order to aim for
having all of the executors end at similar times.
Notes:
- For new atoms that we don't have historic times for, we will assign it the highest atom time value in order to
avoid underestimating the length of unknown atoms.
- We will have to try tweaking the percentage of T that we want to be allocated for the initial large batch of
big subjobs. Same goes for the number and size of the smaller buckets.
"""
BIG_CHUNK_FRACTION = 0.8
def __init__(self, atoms, max_executors, atom_time_map, project_directory):
"""
:param atoms: the list of atoms for this build
:type atoms: list[app.master.atom.Atom]
:param max_executors: the maximum number of executors for this build
:type max_executors: int
:param atom_time_map: a dictionary containing the historic times for atoms for this particular job
:type atom_time_map: dict[str, float]
:type project_directory: str
"""
self._atoms = atoms
self._max_executors = max_executors
self._atom_time_map = atom_time_map
self._project_directory = project_directory
def groupings(self):
"""
Group the atoms into subjobs using historic timing data.
:return: a list of lists of atoms
:rtype: list[list[app.master.atom.Atom]]
"""
# 1). Coalesce the atoms with historic atom times, and also get total estimated runtime
try:
total_estimated_runtime = self._set_expected_atom_times(
self._atoms, self._atom_time_map, self._project_directory)
except _AtomTimingDataError:
grouper = AtomGrouper(self._atoms, self._max_executors)
return grouper.groupings()
# 2). Sort them by decreasing time, and add them to an OrderedDict
atoms_by_decreasing_time = sorted(self._atoms, key=lambda atom: atom.expected_time, reverse=True)
sorted_atom_times_left = OrderedDict([(atom, atom.expected_time) for atom in atoms_by_decreasing_time])
# 3). Group them!
# Calculate what the target 'big subjob' time is going to be for each executor's initial subjob
big_subjob_time = (total_estimated_runtime * self.BIG_CHUNK_FRACTION) / self._max_executors
# Calculate what the target 'small subjob' time is going to be
small_subjob_time = (total_estimated_runtime * (1.0 - self.BIG_CHUNK_FRACTION)) / (2 * self._max_executors)
# _group_atoms_into_sized_buckets() will remove elements from sorted_atom_times_left.
subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, big_subjob_time, self._max_executors)
small_subjobs = self._group_atoms_into_sized_buckets(sorted_atom_times_left, small_subjob_time, None)
subjobs.extend(small_subjobs)
return subjobs
def _set_expected_atom_times(self, new_atoms, old_atoms_with_times, project_directory):
"""
Set the expected runtime (new_atom.expected_time) of each atom in new_atoms using historic timing data.
Additionally, return the total estimated serial-runtime for this build. Although this seems like an odd thing
for this method to return, it is done here for efficiency. There can be thousands of atoms, and iterating
through them multiple times seems inefficient.
:param new_atoms: the list of atoms that will be run in this build
:type new_atoms: list[app.master.atom.Atom]
:param old_atoms_with_times: a dictionary containing the historic times for atoms for this particular job
:type old_atoms_with_times: dict[str, float]
:type project_directory: str
:return: the total estimated runtime in seconds
:rtype: float
"""
atoms_without_timing_data = []
total_time = 0
max_atom_time = 0
# Generate list for atoms that have timing data
for new_atom in new_atoms:
if new_atom.command_string not in old_atoms_with_times:
atoms_without_timing_data.append(new_atom)
continue
new_atom.expected_time = old_atoms_with_times[new_atom.command_string]
# Discover largest single atom time to use as conservative estimates for atoms with unknown times
if max_atom_time < new_atom.expected_time:
max_atom_time = new_atom.expected_time
# We want to return the atom with the project directory still in it, as this data will directly be
# sent to the slave to be run.
total_time += new_atom.expected_time
# For the atoms without historic timing data, assign them the largest atom time we have
for new_atom in atoms_without_timing_data:
new_atom.expected_time = max_atom_time
if len(new_atoms) == len(atoms_without_timing_data):
raise _AtomTimingDataError
total_time += (max_atom_time * len(atoms_without_timing_data))
return total_time
def _group_atoms_into_sized_buckets(self, sorted_atom_time_dict, target_group_time, max_groups_to_create):
"""
Given a sorted dictionary (Python FTW) of [atom, time] pairs in variable sorted_atom_time_dict, return a list
of lists of atoms that each are estimated to take target_group_time seconds. This method will generate at most
max_groups_to_create groupings, and will return once this limit is reached or when sorted_atom_time_dict is
empty.
Note, this method will modify sorted_atom_time_dict's state by removing elements as needed (often from the
middle of the collection).
:param sorted_atom_time_dict: the sorted (longest first), double-ended queue containing [atom, time] pairs.
This OrderedDict will have elements removed from this method.
:type sorted_atom_time_dict: OrderedDict[app.master.atom.Atom, float]
:param target_group_time: how long each subjob should approximately take
:type target_group_time: float
:param max_groups_to_create: the maximum number of subjobs to create. Once max_groups_to_create limit is
reached, this method will return the subjobs that have already been grouped. If set to None, then there
is no limit.
:type max_groups_to_create: int|None
:return: the groups of grouped atoms, with each group taking an estimated target_group_time
:rtype: list[list[app.master.atom.Atom]]
"""
subjobs = []
subjob_time_so_far = 0
subjob_atoms = []
while (max_groups_to_create is None or len(subjobs) < max_groups_to_create) and len(sorted_atom_time_dict) > 0:
for atom, time in sorted_atom_time_dict.items():
if len(subjob_atoms) == 0 or (time + subjob_time_so_far) <= target_group_time:
subjob_time_so_far += time
subjob_atoms.append(atom)
sorted_atom_time_dict.pop(atom)
# If (number of subjobs created so far + atoms left) is less than or equal to the total number of
# subjobs we need to create, then have each remaining atom be a subjob and return.
# The "+ 1" is here to account for the current subjob being generated, but that hasn't been
# appended to subjobs yet.
if max_groups_to_create is not None and (len(subjobs) + len(sorted_atom_time_dict) + 1) <= max_groups_to_create:
subjobs.append(subjob_atoms)
for atom, _ in sorted_atom_time_dict.items():
sorted_atom_time_dict.pop(atom)
subjobs.append([atom])
return subjobs
subjobs.append(subjob_atoms)
subjob_atoms = []
subjob_time_so_far = 0
return subjobs
class _AtomTimingDataError(Exception):
"""
An exception to represent the case where the atom timing data is either not present or incorrect.
"""
| apache-2.0 |
bgxavier/nova | nova/tests/unit/virt/hyperv/test_networkutils.py | 68 | 3245 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
class NetworkUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V NetworkUtils class."""
_FAKE_PORT = {'Name': mock.sentinel.FAKE_PORT_NAME}
_FAKE_RET_VALUE = 0
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualSwitch'
def setUp(self):
self._networkutils = networkutils.NetworkUtils()
self._networkutils._conn = mock.MagicMock()
super(NetworkUtilsTestCase, self).setUp()
def test_get_external_vswitch(self):
mock_vswitch = mock.MagicMock()
mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
getattr(self._networkutils._conn,
self._MSVM_VIRTUAL_SWITCH).return_value = [mock_vswitch]
switch_path = self._networkutils.get_external_vswitch(
mock.sentinel.FAKE_VSWITCH_NAME)
self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
def test_get_external_vswitch_not_found(self):
self._networkutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
self.assertRaises(vmutils.HyperVException,
self._networkutils.get_external_vswitch,
mock.sentinel.FAKE_VSWITCH_NAME)
def test_get_external_vswitch_no_name(self):
mock_vswitch = mock.MagicMock()
mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
mock_ext_port = self._networkutils._conn.Msvm_ExternalEthernetPort()[0]
self._prepare_external_port(mock_vswitch, mock_ext_port)
switch_path = self._networkutils.get_external_vswitch(None)
self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
def _prepare_external_port(self, mock_vswitch, mock_ext_port):
mock_lep = mock_ext_port.associators()[0]
mock_lep.associators.return_value = [mock_vswitch]
def test_create_vswitch_port(self):
svc = self._networkutils._conn.Msvm_VirtualSwitchManagementService()[0]
svc.CreateSwitchPort.return_value = (
self._FAKE_PORT, self._FAKE_RET_VALUE)
port = self._networkutils.create_vswitch_port(
mock.sentinel.FAKE_VSWITCH_PATH, mock.sentinel.FAKE_PORT_NAME)
svc.CreateSwitchPort.assert_called_once_with(
Name=mock.ANY, FriendlyName=mock.sentinel.FAKE_PORT_NAME,
ScopeOfResidence="", VirtualSwitch=mock.sentinel.FAKE_VSWITCH_PATH)
self.assertEqual(self._FAKE_PORT, port)
def test_vswitch_port_needed(self):
self.assertTrue(self._networkutils.vswitch_port_needed())
| apache-2.0 |
NeCTAR-RC/nova | nova/tests/unit/api/openstack/compute/test_services.py | 9 | 39890 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import iso8601
import mock
from oslo_utils import fixture as utils_fixture
import webob.exc
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack.compute.legacy_v2.contrib import services \
as services_v2
from nova.api.openstack.compute import services as services_v21
from nova.api.openstack import extensions
from nova.api.openstack import wsgi as os_wsgi
from nova import availability_zones
from nova.cells import utils as cells_utils
from nova.compute import cells_api
from nova import context
from nova import exception
from nova import objects
from nova.servicegroup.drivers import db as db_driver
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_service
fake_services_list = [
dict(test_service.fake_service,
binary='nova-scheduler',
host='host1',
id=1,
disabled=True,
topic='scheduler',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 2),
forced_down=False,
disabled_reason='test1'),
dict(test_service.fake_service,
binary='nova-compute',
host='host1',
id=2,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 5),
forced_down=False,
disabled_reason='test2'),
dict(test_service.fake_service,
binary='nova-scheduler',
host='host2',
id=3,
disabled=False,
topic='scheduler',
updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=datetime.datetime(2012, 9, 19, 6, 55, 34),
forced_down=False,
disabled_reason=None),
dict(test_service.fake_service,
binary='nova-compute',
host='host2',
id=4,
disabled=True,
topic='compute',
updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=datetime.datetime(2012, 9, 18, 8, 3, 38),
forced_down=False,
disabled_reason='test4'),
# NOTE(rpodolyaka): API services are special case and must be filtered out
dict(test_service.fake_service,
binary='nova-osapi_compute',
host='host2',
id=5,
disabled=False,
topic=None,
updated_at=None,
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=None,
forced_down=False,
disabled_reason=None),
dict(test_service.fake_service,
binary='nova-metadata',
host='host2',
id=6,
disabled=False,
topic=None,
updated_at=None,
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
last_seen_up=None,
forced_down=False,
disabled_reason=None),
]
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
def __init__(self, version=os_wsgi.DEFAULT_API_VERSION): # version='2.1'):
super(FakeRequest, self).__init__()
self.api_version_request = api_version.APIVersionRequest(version)
class FakeRequestWithService(FakeRequest):
GET = {"binary": "nova-compute"}
class FakeRequestWithHost(FakeRequest):
GET = {"host": "host1"}
class FakeRequestWithHostService(FakeRequest):
GET = {"host": "host1", "binary": "nova-compute"}
def fake_service_get_all(services):
def service_get_all(context, filters=None, set_zones=False):
if set_zones or 'availability_zone' in filters:
return availability_zones.set_availability_zones(context,
services)
return services
return service_get_all
def fake_db_api_service_get_all(context, disabled=None):
return fake_services_list
def fake_db_service_get_by_host_binary(services):
def service_get_by_host_binary(context, host, binary):
for service in services:
if service['host'] == host and service['binary'] == binary:
return service
raise exception.HostBinaryNotFound(host=host, binary=binary)
return service_get_by_host_binary
def fake_service_get_by_host_binary(context, host, binary):
fake = fake_db_service_get_by_host_binary(fake_services_list)
return fake(context, host, binary)
def _service_get_by_id(services, value):
for service in services:
if service['id'] == value:
return service
return None
def fake_db_service_update(services):
def service_update(context, service_id, values):
service = _service_get_by_id(services, service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
service = copy.deepcopy(service)
service.update(values)
return service
return service_update
def fake_service_update(context, service_id, values):
fake = fake_db_service_update(fake_services_list)
return fake(context, service_id, values)
def fake_utcnow():
return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTestV21(test.TestCase):
service_is_up_exc = webob.exc.HTTPInternalServerError
bad_request = exception.ValidationError
wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
def _set_up_controller(self):
self.controller = services_v21.ServiceController()
def setUp(self):
super(ServicesTestV21, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self._set_up_controller()
self.controller.host_api.service_get_all = (
mock.Mock(side_effect=fake_service_get_all(fake_services_list)))
self.useFixture(utils_fixture.TimeFixture(fake_utcnow()))
self.stub_out('nova.db.service_get_by_host_and_binary',
fake_db_service_get_by_host_binary(fake_services_list))
self.stub_out('nova.db.service_update',
fake_db_service_update(fake_services_list))
self.req = fakes.HTTPRequest.blank('')
def _process_output(self, services, has_disabled=False, has_id=False):
return services
def test_services_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'disabled_reason': 'test1',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'disabled_reason': 'test2',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler',
'host': 'host2',
'zone': 'internal',
'id': 3,
'status': 'enabled',
'disabled_reason': None,
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'id': 4,
'status': 'disabled',
'disabled_reason': 'test4',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'disabled_reason': 'test1',
'id': 1,
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'disabled_reason': 'test2',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'disabled_reason': 'test4',
'id': 4,
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'id': 2,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-scheduler',
'host': 'host2',
'zone': 'internal',
'status': 'enabled',
'id': 3,
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'disabled_reason': None},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'id': 4,
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'id': 1,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'id': 2,
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_services_detail_with_delete_extension(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'id': 1,
'zone': 'internal',
'disabled_reason': 'test1',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'disabled_reason': 'test2',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler',
'host': 'host2',
'disabled_reason': None,
'id': 3,
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'disabled_reason': 'test4',
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response, has_id=True)
self.assertEqual(res_dict, response)
def test_services_enable(self):
def _service_update(context, service_id, values):
self.assertIsNone(values['disabled_reason'])
return dict(test_service.fake_service, id=service_id, **values)
self.stub_out('nova.db.service_update', _service_update)
body = {'host': 'host1', 'binary': 'nova-compute'}
res_dict = self.controller.update(self.req, "enable", body=body)
self.assertEqual(res_dict['service']['status'], 'enabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_enable_with_invalid_host(self):
body = {'host': 'invalid', 'binary': 'nova-compute'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"enable",
body=body)
def test_services_enable_with_invalid_binary(self):
body = {'host': 'host1', 'binary': 'invalid'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"enable",
body=body)
def test_services_disable(self):
body = {'host': 'host1', 'binary': 'nova-compute'}
res_dict = self.controller.update(self.req, "disable", body=body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertNotIn('disabled_reason', res_dict['service'])
def test_services_disable_with_invalid_host(self):
body = {'host': 'invalid', 'binary': 'nova-compute'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"disable",
body=body)
def test_services_disable_with_invalid_binary(self):
body = {'host': 'host1', 'binary': 'invalid'}
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
self.req,
"disable",
body=body)
def test_services_disable_log_reason(self):
self.ext_mgr.extensions['os-extended-services'] = True
body = {'host': 'host1',
'binary': 'nova-compute',
'disabled_reason': 'test-reason',
}
res_dict = self.controller.update(self.req,
"disable-log-reason",
body=body)
self.assertEqual(res_dict['service']['status'], 'disabled')
self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
def test_mandatory_reason_field(self):
self.ext_mgr.extensions['os-extended-services'] = True
body = {'host': 'host1',
'binary': 'nova-compute',
}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, self.req, "disable-log-reason",
body=body)
def test_invalid_reason_field(self):
self.ext_mgr.extensions['os-extended-services'] = True
reason = 'a' * 256
body = {'host': 'host1',
'binary': 'nova-compute',
'disabled_reason': reason,
}
self.assertRaises(self.bad_request,
self.controller.update, self.req, "disable-log-reason",
body=body)
def test_services_delete(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
with mock.patch.object(self.controller.host_api,
'service_delete') as service_delete:
self.controller.delete(self.req, '1')
service_delete.assert_called_once_with(
self.req.environ['nova.context'], '1')
self.assertEqual(self.controller.delete.wsgi_code, 204)
def test_services_delete_not_found(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 1234)
def test_services_delete_bad_request(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, self.req, 'abc')
# This test is just to verify that the servicegroup API gets used when
# calling the API
@mock.patch.object(db_driver.DbDriver, 'is_up', side_effect=KeyError)
def test_services_with_exception(self, mock_is_up):
req = FakeRequestWithHostService()
self.assertRaises(self.service_is_up_exc, self.controller.index, req)
class ServicesTestV211(ServicesTestV21):
wsgi_api_version = '2.11'
def test_services_list(self):
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'forced_down': False,
'disabled_reason': 'test1',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'disabled_reason': 'test2',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler',
'host': 'host2',
'zone': 'internal',
'id': 3,
'status': 'enabled',
'disabled_reason': None,
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'id': 4,
'status': 'disabled',
'disabled_reason': 'test4',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host(self):
req = FakeRequestWithHost(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'disabled_reason': 'test1',
'id': 1,
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'disabled_reason': 'test2',
'id': 2,
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'disabled_reason': 'test4',
'id': 4,
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'disabled_reason': 'test2',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
self._process_output(response)
self.assertEqual(res_dict, response)
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'status': 'disabled',
'id': 1,
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'id': 2,
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-scheduler',
'host': 'host2',
'zone': 'internal',
'status': 'enabled',
'id': 3,
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'disabled_reason': None},
{'binary': 'nova-compute',
'host': 'host2',
'zone': 'nova',
'id': 4,
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHost(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'zone': 'internal',
'id': 1,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'id': 2,
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_service_detail_with_host_service(self):
self.ext_mgr.extensions['os-extended-services'] = True
req = FakeRequestWithHostService(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'id': 2,
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'}]}
self._process_output(response, has_disabled=True)
self.assertEqual(res_dict, response)
def test_services_detail_with_delete_extension(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest(self.wsgi_api_version)
res_dict = self.controller.index(req)
response = {'services': [
{'binary': 'nova-scheduler',
'host': 'host1',
'id': 1,
'zone': 'internal',
'disabled_reason': 'test1',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
'host': 'host1',
'id': 2,
'zone': 'nova',
'disabled_reason': 'test2',
'status': 'disabled',
'state': 'up',
'forced_down': False,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler',
'host': 'host2',
'disabled_reason': None,
'id': 3,
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute',
'host': 'host2',
'id': 4,
'disabled_reason': 'test4',
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'forced_down': False,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
self._process_output(response, has_id=True)
self.assertEqual(res_dict, response)
class ServicesTestV20(ServicesTestV21):
service_is_up_exc = KeyError
bad_request = webob.exc.HTTPBadRequest
def setUp(self):
super(ServicesTestV20, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.non_admin_req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = services_v2.ServiceController(self.ext_mgr)
def test_services_delete_not_enabled(self):
self.assertRaises(webob.exc.HTTPMethodNotAllowed,
self.controller.delete, self.req, '300')
def _process_output(self, services, has_disabled=False, has_id=False):
for service in services['services']:
if not has_disabled:
service.pop('disabled_reason')
if not has_id:
service.pop('id')
return services
def test_update_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.update,
self.non_admin_req, fakes.FAKE_UUID, body={})
def test_delete_with_non_admin(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
self.assertRaises(exception.AdminRequired, self.controller.delete,
self.non_admin_req, fakes.FAKE_UUID)
def test_index_with_non_admin(self):
self.assertRaises(exception.AdminRequired, self.controller.index,
self.non_admin_req)
class ServicesCellsTestV21(test.TestCase):
def setUp(self):
super(ServicesCellsTestV21, self).setUp()
host_api = cells_api.HostAPI()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self._set_up_controller()
self.controller.host_api = host_api
self.useFixture(utils_fixture.TimeFixture(fake_utcnow()))
services_list = []
for service in fake_services_list:
service = service.copy()
del service['version']
service_obj = objects.Service(**service)
service_proxy = cells_utils.ServiceProxy(service_obj, 'cell1')
services_list.append(service_proxy)
host_api.cells_rpcapi.service_get_all = (
mock.Mock(side_effect=fake_service_get_all(services_list)))
def _set_up_controller(self):
self.controller = services_v21.ServiceController()
def _process_out(self, res_dict):
for res in res_dict['services']:
res.pop('disabled_reason')
def test_services_detail(self):
self.ext_mgr.extensions['os-extended-services-delete'] = True
req = FakeRequest()
res_dict = self.controller.index(req)
utc = iso8601.iso8601.Utc()
response = {'services': [
{'id': 'cell1@1',
'binary': 'nova-scheduler',
'host': 'cell1@host1',
'zone': 'internal',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
tzinfo=utc)},
{'id': 'cell1@2',
'binary': 'nova-compute',
'host': 'cell1@host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
tzinfo=utc)},
{'id': 'cell1@3',
'binary': 'nova-scheduler',
'host': 'cell1@host2',
'zone': 'internal',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
tzinfo=utc)},
{'id': 'cell1@4',
'binary': 'nova-compute',
'host': 'cell1@host2',
'zone': 'nova',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
tzinfo=utc)}]}
self._process_out(res_dict)
self.assertEqual(response, res_dict)
class ServicesCellsTestV20(ServicesCellsTestV21):
def _set_up_controller(self):
self.controller = services_v2.ServiceController(self.ext_mgr)
def _process_out(self, res_dict):
pass
class ServicesPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ServicesPolicyEnforcementV21, self).setUp()
self.controller = services_v21.ServiceController()
self.req = fakes.HTTPRequest.blank('')
def test_update_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID,
body={'host': 'host1',
'binary': 'nova-compute'})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-services"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
Ujjwal29/ansible | test/units/module_utils/test_database.py | 325 | 5737 | import collections
import mock
import os
import re
from nose.tools import eq_
try:
from nose.tools import assert_raises_regexp
except ImportError:
# Python < 2.7
def assert_raises_regexp(expected, regexp, callable, *a, **kw):
try:
callable(*a, **kw)
except expected as e:
if isinstance(regexp, basestring):
regexp = re.compile(regexp)
if not regexp.search(str(e)):
raise Exception('"%s" does not match "%s"' %
(regexp.pattern, str(e)))
else:
if hasattr(expected,'__name__'): excName = expected.__name__
else: excName = str(expected)
raise AssertionError("%s not raised" % excName)
from ansible.module_utils.database import (
pg_quote_identifier,
SQLParseError,
)
# Note: Using nose's generator test cases here so we can't inherit from
# unittest.TestCase
class TestQuotePgIdentifier(object):
# These are all valid strings
# The results are based on interpreting the identifier as a table name
valid = {
# User quoted
'"public.table"': '"public.table"',
'"public"."table"': '"public"."table"',
'"schema test"."table test"': '"schema test"."table test"',
# We quote part
'public.table': '"public"."table"',
'"public".table': '"public"."table"',
'public."table"': '"public"."table"',
'schema test.table test': '"schema test"."table test"',
'"schema test".table test': '"schema test"."table test"',
'schema test."table test"': '"schema test"."table test"',
# Embedded double quotes
'table "test"': '"table ""test"""',
'public."table ""test"""': '"public"."table ""test"""',
'public.table "test"': '"public"."table ""test"""',
'schema "test".table': '"schema ""test"""."table"',
'"schema ""test""".table': '"schema ""test"""."table"',
'"""wat"""."""test"""': '"""wat"""."""test"""',
# Sigh, handle these as well:
'"no end quote': '"""no end quote"',
'schema."table': '"schema"."""table"',
'"schema.table': '"""schema"."table"',
'schema."table.something': '"schema"."""table"."something"',
# Embedded dots
'"schema.test"."table.test"': '"schema.test"."table.test"',
'"schema.".table': '"schema."."table"',
'"schema."."table"': '"schema."."table"',
'schema.".table"': '"schema".".table"',
'"schema".".table"': '"schema".".table"',
'"schema.".".table"': '"schema.".".table"',
# These are valid but maybe not what the user intended
'."table"': '".""table"""',
'table.': '"table."',
}
invalid = {
('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
}
def check_valid_quotes(self, identifier, quoted_identifier):
eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
def test_valid_quotes(self):
for identifier in self.valid:
yield self.check_valid_quotes, identifier, self.valid[identifier]
def check_invalid_quotes(self, identifier, id_type, msg):
assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
def test_invalid_quotes(self):
for test in self.invalid:
yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
def test_how_many_dots(self):
eq_(pg_quote_identifier('role', 'role'), '"role"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
eq_(pg_quote_identifier('db', 'database'), '"db"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
| gpl-3.0 |
ojengwa/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0c5.py | 253 | 4581 | data = (
'sseum', # 0x00
'sseub', # 0x01
'sseubs', # 0x02
'sseus', # 0x03
'sseuss', # 0x04
'sseung', # 0x05
'sseuj', # 0x06
'sseuc', # 0x07
'sseuk', # 0x08
'sseut', # 0x09
'sseup', # 0x0a
'sseuh', # 0x0b
'ssyi', # 0x0c
'ssyig', # 0x0d
'ssyigg', # 0x0e
'ssyigs', # 0x0f
'ssyin', # 0x10
'ssyinj', # 0x11
'ssyinh', # 0x12
'ssyid', # 0x13
'ssyil', # 0x14
'ssyilg', # 0x15
'ssyilm', # 0x16
'ssyilb', # 0x17
'ssyils', # 0x18
'ssyilt', # 0x19
'ssyilp', # 0x1a
'ssyilh', # 0x1b
'ssyim', # 0x1c
'ssyib', # 0x1d
'ssyibs', # 0x1e
'ssyis', # 0x1f
'ssyiss', # 0x20
'ssying', # 0x21
'ssyij', # 0x22
'ssyic', # 0x23
'ssyik', # 0x24
'ssyit', # 0x25
'ssyip', # 0x26
'ssyih', # 0x27
'ssi', # 0x28
'ssig', # 0x29
'ssigg', # 0x2a
'ssigs', # 0x2b
'ssin', # 0x2c
'ssinj', # 0x2d
'ssinh', # 0x2e
'ssid', # 0x2f
'ssil', # 0x30
'ssilg', # 0x31
'ssilm', # 0x32
'ssilb', # 0x33
'ssils', # 0x34
'ssilt', # 0x35
'ssilp', # 0x36
'ssilh', # 0x37
'ssim', # 0x38
'ssib', # 0x39
'ssibs', # 0x3a
'ssis', # 0x3b
'ssiss', # 0x3c
'ssing', # 0x3d
'ssij', # 0x3e
'ssic', # 0x3f
'ssik', # 0x40
'ssit', # 0x41
'ssip', # 0x42
'ssih', # 0x43
'a', # 0x44
'ag', # 0x45
'agg', # 0x46
'ags', # 0x47
'an', # 0x48
'anj', # 0x49
'anh', # 0x4a
'ad', # 0x4b
'al', # 0x4c
'alg', # 0x4d
'alm', # 0x4e
'alb', # 0x4f
'als', # 0x50
'alt', # 0x51
'alp', # 0x52
'alh', # 0x53
'am', # 0x54
'ab', # 0x55
'abs', # 0x56
'as', # 0x57
'ass', # 0x58
'ang', # 0x59
'aj', # 0x5a
'ac', # 0x5b
'ak', # 0x5c
'at', # 0x5d
'ap', # 0x5e
'ah', # 0x5f
'ae', # 0x60
'aeg', # 0x61
'aegg', # 0x62
'aegs', # 0x63
'aen', # 0x64
'aenj', # 0x65
'aenh', # 0x66
'aed', # 0x67
'ael', # 0x68
'aelg', # 0x69
'aelm', # 0x6a
'aelb', # 0x6b
'aels', # 0x6c
'aelt', # 0x6d
'aelp', # 0x6e
'aelh', # 0x6f
'aem', # 0x70
'aeb', # 0x71
'aebs', # 0x72
'aes', # 0x73
'aess', # 0x74
'aeng', # 0x75
'aej', # 0x76
'aec', # 0x77
'aek', # 0x78
'aet', # 0x79
'aep', # 0x7a
'aeh', # 0x7b
'ya', # 0x7c
'yag', # 0x7d
'yagg', # 0x7e
'yags', # 0x7f
'yan', # 0x80
'yanj', # 0x81
'yanh', # 0x82
'yad', # 0x83
'yal', # 0x84
'yalg', # 0x85
'yalm', # 0x86
'yalb', # 0x87
'yals', # 0x88
'yalt', # 0x89
'yalp', # 0x8a
'yalh', # 0x8b
'yam', # 0x8c
'yab', # 0x8d
'yabs', # 0x8e
'yas', # 0x8f
'yass', # 0x90
'yang', # 0x91
'yaj', # 0x92
'yac', # 0x93
'yak', # 0x94
'yat', # 0x95
'yap', # 0x96
'yah', # 0x97
'yae', # 0x98
'yaeg', # 0x99
'yaegg', # 0x9a
'yaegs', # 0x9b
'yaen', # 0x9c
'yaenj', # 0x9d
'yaenh', # 0x9e
'yaed', # 0x9f
'yael', # 0xa0
'yaelg', # 0xa1
'yaelm', # 0xa2
'yaelb', # 0xa3
'yaels', # 0xa4
'yaelt', # 0xa5
'yaelp', # 0xa6
'yaelh', # 0xa7
'yaem', # 0xa8
'yaeb', # 0xa9
'yaebs', # 0xaa
'yaes', # 0xab
'yaess', # 0xac
'yaeng', # 0xad
'yaej', # 0xae
'yaec', # 0xaf
'yaek', # 0xb0
'yaet', # 0xb1
'yaep', # 0xb2
'yaeh', # 0xb3
'eo', # 0xb4
'eog', # 0xb5
'eogg', # 0xb6
'eogs', # 0xb7
'eon', # 0xb8
'eonj', # 0xb9
'eonh', # 0xba
'eod', # 0xbb
'eol', # 0xbc
'eolg', # 0xbd
'eolm', # 0xbe
'eolb', # 0xbf
'eols', # 0xc0
'eolt', # 0xc1
'eolp', # 0xc2
'eolh', # 0xc3
'eom', # 0xc4
'eob', # 0xc5
'eobs', # 0xc6
'eos', # 0xc7
'eoss', # 0xc8
'eong', # 0xc9
'eoj', # 0xca
'eoc', # 0xcb
'eok', # 0xcc
'eot', # 0xcd
'eop', # 0xce
'eoh', # 0xcf
'e', # 0xd0
'eg', # 0xd1
'egg', # 0xd2
'egs', # 0xd3
'en', # 0xd4
'enj', # 0xd5
'enh', # 0xd6
'ed', # 0xd7
'el', # 0xd8
'elg', # 0xd9
'elm', # 0xda
'elb', # 0xdb
'els', # 0xdc
'elt', # 0xdd
'elp', # 0xde
'elh', # 0xdf
'em', # 0xe0
'eb', # 0xe1
'ebs', # 0xe2
'es', # 0xe3
'ess', # 0xe4
'eng', # 0xe5
'ej', # 0xe6
'ec', # 0xe7
'ek', # 0xe8
'et', # 0xe9
'ep', # 0xea
'eh', # 0xeb
'yeo', # 0xec
'yeog', # 0xed
'yeogg', # 0xee
'yeogs', # 0xef
'yeon', # 0xf0
'yeonj', # 0xf1
'yeonh', # 0xf2
'yeod', # 0xf3
'yeol', # 0xf4
'yeolg', # 0xf5
'yeolm', # 0xf6
'yeolb', # 0xf7
'yeols', # 0xf8
'yeolt', # 0xf9
'yeolp', # 0xfa
'yeolh', # 0xfb
'yeom', # 0xfc
'yeob', # 0xfd
'yeobs', # 0xfe
'yeos', # 0xff
)
| gpl-2.0 |
Kamik423/uni_plan | plan/plan/lib64/python3.4/encodings/iso8859_10.py | 272 | 13589 | """ Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-10',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\xa7' # 0xA7 -> SECTION SIGN
'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
'\xad' # 0xAD -> SOFT HYPHEN
'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
'\xb0' # 0xB0 -> DEGREE SIGN
'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
'\xb7' # 0xB7 -> MIDDLE DOT
'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
'\u2015' # 0xBD -> HORIZONTAL BAR
'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
Thraxis/pymedusa | sickbeard/server/web/config/notifications.py | 1 | 17493 | # coding=utf-8
"""
Configure notifications
"""
from __future__ import unicode_literals
import os
from tornado.routes import route
import sickbeard
from sickbeard import (
config, logger, ui,
)
from sickrage.helper.common import try_int
from sickrage.helper.encoding import ek
from sickbeard.server.web.core import PageTemplate
from sickbeard.server.web.config.handler import Config
@route('/config/notifications(/?.*)')
class ConfigNotifications(Config):
"""
Handler for notification configuration
"""
def __init__(self, *args, **kwargs):
super(ConfigNotifications, self).__init__(*args, **kwargs)
def index(self):
"""
Render the notification configuration page
"""
t = PageTemplate(rh=self, filename='config_notifications.mako')
return t.render(submenu=self.ConfigMenu(), title='Config - Notifications',
header='Notifications', topmenu='config',
controller='config', action='notifications')
def saveNotifications(self, use_kodi=None, kodi_always_on=None, kodi_notify_onsnatch=None,
kodi_notify_ondownload=None,
kodi_notify_onsubtitledownload=None, kodi_update_onlyfirst=None,
kodi_update_library=None, kodi_update_full=None, kodi_host=None, kodi_username=None,
kodi_password=None,
use_plex_server=None, plex_notify_onsnatch=None, plex_notify_ondownload=None,
plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_server_token=None, plex_client_host=None, plex_server_username=None, plex_server_password=None,
use_plex_client=None, plex_client_username=None, plex_client_password=None,
plex_server_https=None, use_emby=None, emby_host=None, emby_apikey=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None,
growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_freemobile=None, freemobile_notify_onsnatch=None, freemobile_notify_ondownload=None,
freemobile_notify_onsubtitledownload=None, freemobile_id=None, freemobile_apikey=None,
use_telegram=None, telegram_notify_onsnatch=None, telegram_notify_ondownload=None,
telegram_notify_onsubtitledownload=None, telegram_id=None, telegram_apikey=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None,
prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
prowl_show_list=None, prowl_show=None, prowl_message_title=None,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None,
twitter_notify_onsubtitledownload=None, twitter_usedm=None, twitter_dmto=None,
use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None,
boxcar2_notify_onsubtitledownload=None, boxcar2_accesstoken=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None,
pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_apikey=None, pushover_device=None, pushover_sound=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None,
libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_pin=None,
trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_remove_show_from_sickrage=None, trakt_method_add=None,
trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None, trakt_sync_remove=None,
trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_timeout=None, trakt_blacklist_name=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None,
synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None,
pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None,
nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None,
pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None,
pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None,
pushbullet_device_list=None,
use_email=None, email_notify_onsnatch=None, email_notify_ondownload=None,
email_notify_onsubtitledownload=None, email_host=None, email_port=25, email_from=None,
email_tls=None, email_user=None, email_password=None, email_list=None, email_subject=None, email_show_list=None,
email_show=None):
"""
Save notification related settings
"""
results = []
sickbeard.USE_KODI = config.checkbox_to_value(use_kodi)
sickbeard.KODI_ALWAYS_ON = config.checkbox_to_value(kodi_always_on)
sickbeard.KODI_NOTIFY_ONSNATCH = config.checkbox_to_value(kodi_notify_onsnatch)
sickbeard.KODI_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(kodi_notify_ondownload)
sickbeard.KODI_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(kodi_notify_onsubtitledownload)
sickbeard.KODI_UPDATE_LIBRARY = config.checkbox_to_value(kodi_update_library)
sickbeard.KODI_UPDATE_FULL = config.checkbox_to_value(kodi_update_full)
sickbeard.KODI_UPDATE_ONLYFIRST = config.checkbox_to_value(kodi_update_onlyfirst)
sickbeard.KODI_HOST = config.clean_hosts(kodi_host)
sickbeard.KODI_USERNAME = kodi_username
sickbeard.KODI_PASSWORD = kodi_password
sickbeard.USE_PLEX_SERVER = config.checkbox_to_value(use_plex_server)
sickbeard.PLEX_NOTIFY_ONSNATCH = config.checkbox_to_value(plex_notify_onsnatch)
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(plex_notify_ondownload)
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(plex_notify_onsubtitledownload)
sickbeard.PLEX_UPDATE_LIBRARY = config.checkbox_to_value(plex_update_library)
sickbeard.PLEX_CLIENT_HOST = config.clean_hosts(plex_client_host)
sickbeard.PLEX_SERVER_HOST = config.clean_hosts(plex_server_host)
sickbeard.PLEX_SERVER_TOKEN = config.clean_host(plex_server_token)
sickbeard.PLEX_SERVER_USERNAME = plex_server_username
if plex_server_password != '*' * len(sickbeard.PLEX_SERVER_PASSWORD):
sickbeard.PLEX_SERVER_PASSWORD = plex_server_password
sickbeard.USE_PLEX_CLIENT = config.checkbox_to_value(use_plex_client)
sickbeard.PLEX_CLIENT_USERNAME = plex_client_username
if plex_client_password != '*' * len(sickbeard.PLEX_CLIENT_PASSWORD):
sickbeard.PLEX_CLIENT_PASSWORD = plex_client_password
sickbeard.PLEX_SERVER_HTTPS = config.checkbox_to_value(plex_server_https)
sickbeard.USE_EMBY = config.checkbox_to_value(use_emby)
sickbeard.EMBY_HOST = config.clean_host(emby_host)
sickbeard.EMBY_APIKEY = emby_apikey
sickbeard.USE_GROWL = config.checkbox_to_value(use_growl)
sickbeard.GROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(growl_notify_onsnatch)
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(growl_notify_ondownload)
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(growl_notify_onsubtitledownload)
sickbeard.GROWL_HOST = config.clean_host(growl_host, default_port=23053)
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_FREEMOBILE = config.checkbox_to_value(use_freemobile)
sickbeard.FREEMOBILE_NOTIFY_ONSNATCH = config.checkbox_to_value(freemobile_notify_onsnatch)
sickbeard.FREEMOBILE_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(freemobile_notify_ondownload)
sickbeard.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(freemobile_notify_onsubtitledownload)
sickbeard.FREEMOBILE_ID = freemobile_id
sickbeard.FREEMOBILE_APIKEY = freemobile_apikey
sickbeard.USE_TELEGRAM = config.checkbox_to_value(use_telegram)
sickbeard.TELEGRAM_NOTIFY_ONSNATCH = config.checkbox_to_value(telegram_notify_onsnatch)
sickbeard.TELEGRAM_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(telegram_notify_ondownload)
sickbeard.TELEGRAM_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(telegram_notify_onsubtitledownload)
sickbeard.TELEGRAM_ID = telegram_id
sickbeard.TELEGRAM_APIKEY = telegram_apikey
sickbeard.USE_PROWL = config.checkbox_to_value(use_prowl)
sickbeard.PROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(prowl_notify_onsnatch)
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(prowl_notify_ondownload)
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(prowl_notify_onsubtitledownload)
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.PROWL_MESSAGE_TITLE = prowl_message_title
sickbeard.USE_TWITTER = config.checkbox_to_value(use_twitter)
sickbeard.TWITTER_NOTIFY_ONSNATCH = config.checkbox_to_value(twitter_notify_onsnatch)
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(twitter_notify_ondownload)
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(twitter_notify_onsubtitledownload)
sickbeard.TWITTER_USEDM = config.checkbox_to_value(twitter_usedm)
sickbeard.TWITTER_DMTO = twitter_dmto
sickbeard.USE_BOXCAR2 = config.checkbox_to_value(use_boxcar2)
sickbeard.BOXCAR2_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar2_notify_onsnatch)
sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar2_notify_ondownload)
sickbeard.BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(boxcar2_notify_onsubtitledownload)
sickbeard.BOXCAR2_ACCESSTOKEN = boxcar2_accesstoken
sickbeard.USE_PUSHOVER = config.checkbox_to_value(use_pushover)
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = config.checkbox_to_value(pushover_notify_onsnatch)
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushover_notify_ondownload)
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushover_notify_onsubtitledownload)
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.PUSHOVER_APIKEY = pushover_apikey
sickbeard.PUSHOVER_DEVICE = pushover_device
sickbeard.PUSHOVER_SOUND = pushover_sound
sickbeard.USE_LIBNOTIFY = config.checkbox_to_value(use_libnotify)
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = config.checkbox_to_value(libnotify_notify_onsnatch)
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(libnotify_notify_ondownload)
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(libnotify_notify_onsubtitledownload)
sickbeard.USE_NMJ = config.checkbox_to_value(use_nmj)
sickbeard.NMJ_HOST = config.clean_host(nmj_host)
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_NMJv2 = config.checkbox_to_value(use_nmjv2)
sickbeard.NMJv2_HOST = config.clean_host(nmjv2_host)
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_SYNOINDEX = config.checkbox_to_value(use_synoindex)
sickbeard.USE_SYNOLOGYNOTIFIER = config.checkbox_to_value(use_synologynotifier)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = config.checkbox_to_value(synologynotifier_notify_onsnatch)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(synologynotifier_notify_ondownload)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(
synologynotifier_notify_onsubtitledownload)
config.change_USE_TRAKT(use_trakt)
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist)
sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist)
sickbeard.TRAKT_REMOVE_SHOW_FROM_SICKRAGE = config.checkbox_to_value(trakt_remove_show_from_sickrage)
sickbeard.TRAKT_SYNC_WATCHLIST = config.checkbox_to_value(trakt_sync_watchlist)
sickbeard.TRAKT_METHOD_ADD = int(trakt_method_add)
sickbeard.TRAKT_START_PAUSED = config.checkbox_to_value(trakt_start_paused)
sickbeard.TRAKT_USE_RECOMMENDED = config.checkbox_to_value(trakt_use_recommended)
sickbeard.TRAKT_SYNC = config.checkbox_to_value(trakt_sync)
sickbeard.TRAKT_SYNC_REMOVE = config.checkbox_to_value(trakt_sync_remove)
sickbeard.TRAKT_DEFAULT_INDEXER = int(trakt_default_indexer)
sickbeard.TRAKT_TIMEOUT = int(trakt_timeout)
sickbeard.TRAKT_BLACKLIST_NAME = trakt_blacklist_name
sickbeard.USE_EMAIL = config.checkbox_to_value(use_email)
sickbeard.EMAIL_NOTIFY_ONSNATCH = config.checkbox_to_value(email_notify_onsnatch)
sickbeard.EMAIL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(email_notify_ondownload)
sickbeard.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(email_notify_onsubtitledownload)
sickbeard.EMAIL_HOST = config.clean_host(email_host)
sickbeard.EMAIL_PORT = try_int(email_port, 25)
sickbeard.EMAIL_FROM = email_from
sickbeard.EMAIL_TLS = config.checkbox_to_value(email_tls)
sickbeard.EMAIL_USER = email_user
sickbeard.EMAIL_PASSWORD = email_password
sickbeard.EMAIL_LIST = email_list
sickbeard.EMAIL_SUBJECT = email_subject
sickbeard.USE_PYTIVO = config.checkbox_to_value(use_pytivo)
sickbeard.PYTIVO_NOTIFY_ONSNATCH = config.checkbox_to_value(pytivo_notify_onsnatch)
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pytivo_notify_ondownload)
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pytivo_notify_onsubtitledownload)
sickbeard.PYTIVO_UPDATE_LIBRARY = config.checkbox_to_value(pytivo_update_library)
sickbeard.PYTIVO_HOST = config.clean_host(pytivo_host)
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = config.checkbox_to_value(use_nma)
sickbeard.NMA_NOTIFY_ONSNATCH = config.checkbox_to_value(nma_notify_onsnatch)
sickbeard.NMA_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(nma_notify_ondownload)
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(nma_notify_onsubtitledownload)
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_PUSHALOT = config.checkbox_to_value(use_pushalot)
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = config.checkbox_to_value(pushalot_notify_onsnatch)
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushalot_notify_ondownload)
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushalot_notify_onsubtitledownload)
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = config.checkbox_to_value(use_pushbullet)
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = config.checkbox_to_value(pushbullet_notify_onsnatch)
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushbullet_notify_ondownload)
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushbullet_notify_onsubtitledownload)
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.save_config()
if results:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br>\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect('/config/notifications/')
| gpl-3.0 |
chhao91/QGIS | python/plugins/processing/gui/CommanderWindow.py | 12 | 9065 | # -*- coding: utf-8 -*-
"""
***************************************************************************
CommanderWindow.py
---------------------
Date : April 2013
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import types
import os
import imp
from PyQt4.QtCore import Qt, QSize
from PyQt4.QtGui import QDialog, QLabel, QSpacerItem, QHBoxLayout, QVBoxLayout, QSizePolicy, QComboBox, QCompleter, QSortFilterProxyModel
from qgis.utils import iface
from processing.core.Processing import Processing
from processing.gui.MessageDialog import MessageDialog
from processing.gui.AlgorithmDialog import AlgorithmDialog
from processing.tools.system import userFolder, mkdir
ITEMHEIGHT = 30
OFFSET = 20
HEIGHT = 60
class CommanderWindow(QDialog):
def __init__(self, parent, canvas):
self.canvas = canvas
QDialog.__init__(self, parent, Qt.FramelessWindowHint)
self.commands = imp.load_source('commands', self.commandsFile())
self.initGui()
def commandsFolder(self):
folder = unicode(os.path.join(userFolder(), 'commander'))
mkdir(folder)
return os.path.abspath(folder)
def commandsFile(self):
f = os.path.join(self.commandsFolder(), 'commands.py')
if not os.path.exists(f):
out = open(f, 'w')
out.write('from qgis.core import *\n')
out.write('import processing\n\n')
out.write('def removeall():\n')
out.write('\tmapreg = QgsMapLayerRegistry.instance()\n')
out.write('\tmapreg.removeAllMapLayers()\n\n')
out.write('def load(*args):\n')
out.write('\tprocessing.load(args[0])\n')
out.close()
return f
def algsListHasChanged(self):
self.fillCombo()
def initGui(self):
self.combo = ExtendedComboBox()
self.fillCombo()
self.combo.setEditable(True)
self.label = QLabel('Enter command:')
self.errorLabel = QLabel('Enter command:')
self.vlayout = QVBoxLayout()
self.vlayout.setSpacing(2)
self.vlayout.setMargin(0)
self.vlayout.addSpacerItem(QSpacerItem(0, OFFSET,
QSizePolicy.Maximum, QSizePolicy.Expanding))
self.hlayout = QHBoxLayout()
self.hlayout.addWidget(self.label)
self.vlayout.addLayout(self.hlayout)
self.hlayout2 = QHBoxLayout()
self.hlayout2.addWidget(self.combo)
self.vlayout.addLayout(self.hlayout2)
self.vlayout.addSpacerItem(QSpacerItem(0, OFFSET,
QSizePolicy.Maximum, QSizePolicy.Expanding))
self.setLayout(self.vlayout)
self.combo.lineEdit().returnPressed.connect(self.run)
self.prepareGui()
def fillCombo(self):
self.combo.clear()
# Add algorithms
for providerName in Processing.algs.keys():
provider = Processing.algs[providerName]
algs = provider.values()
for alg in algs:
self.combo.addItem('Processing algorithm: ' + alg.name)
# Add functions
for command in dir(self.commands):
if isinstance(self.commands.__dict__.get(command),
types.FunctionType):
self.combo.addItem('Command: ' + command)
#Add menu entries
menuActions = []
actions = iface.mainWindow().menuBar().actions()
for action in actions:
menuActions.extend(self.getActions(action))
for action in menuActions:
self.combo.addItem('Menu action: ' + unicode(action.text()))
def prepareGui(self):
self.combo.setEditText('')
self.combo.setMaximumSize(QSize(self.canvas.rect().width() - 2 * OFFSET, ITEMHEIGHT))
self.combo.view().setStyleSheet('min-height: 150px')
self.combo.setFocus(Qt.OtherFocusReason)
self.label.setMaximumSize(self.combo.maximumSize())
self.label.setVisible(False)
self.adjustSize()
pt = self.canvas.rect().topLeft()
absolutePt = self.canvas.mapToGlobal(pt)
self.move(absolutePt)
self.resize(self.canvas.rect().width(), HEIGHT)
self.setStyleSheet('CommanderWindow {background-color: #e7f5fe; \
border: 1px solid #b9cfe4;}')
def getActions(self, action):
menuActions = []
menu = action.menu()
if menu is None:
menuActions.append(action)
return menuActions
else:
actions = menu.actions()
for subaction in actions:
if subaction.menu() is not None:
menuActions.extend(self.getActions(subaction))
elif not subaction.isSeparator():
menuActions.append(subaction)
return menuActions
def run(self):
s = unicode(self.combo.currentText())
if s.startswith('Processing algorithm: '):
algName = s[len('Processing algorithm: '):]
alg = Processing.getAlgorithmFromFullName(algName)
if alg is not None:
self.close()
self.runAlgorithm(alg)
elif s.startswith("Command: "):
command = s[len("Command: "):]
try:
self.runCommand(command)
self.close()
except Exception as e:
self.label.setVisible(True)
self.label.setText('Error:' + unicode(e))
elif s.startswith('Menu action: '):
actionName = s[len('Menu action: '):]
menuActions = []
actions = iface.mainWindow().menuBar().actions()
for action in actions:
menuActions.extend(self.getActions(action))
for action in menuActions:
if action.text() == actionName:
self.close()
action.trigger()
return
else:
try:
self.runCommand(s)
self.close()
except Exception as e:
self.label.setVisible(True)
self.label.setText('Error:' + unicode(e))
def runCommand(self, command):
tokens = command.split(' ')
if len(tokens) == 1:
method = self.commands.__dict__.get(command)
if method is not None:
method()
else:
raise Exception('Wrong command')
else:
method = self.commands.__dict__.get(tokens[0])
if method is not None:
method(*tokens[1:])
else:
raise Exception('Wrong command')
def runAlgorithm(self, alg):
alg = alg.getCopy()
message = alg.checkBeforeOpeningParametersDialog()
if message:
dlg = MessageDialog()
dlg.setTitle(self.tr('Missing dependency'))
dlg.setMessage(message)
dlg.exec_()
return
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = AlgorithmDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
class ExtendedComboBox(QComboBox):
def __init__(self, parent=None):
super(ExtendedComboBox, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
self.setEditable(True)
self.pFilterModel = QSortFilterProxyModel(self)
self.pFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.pFilterModel.setSourceModel(self.model())
self.completer = QCompleter(self.pFilterModel, self)
self.completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)
self.completer.popup().setStyleSheet('min-height: 150px')
self.completer.popup().setAlternatingRowColors(True)
self.setCompleter(self.completer)
self.lineEdit().textEdited[unicode].connect(self.pFilterModel.setFilterFixedString)
| gpl-2.0 |
poliastro/poliastro | tests/test_bodies.py | 1 | 2126 | import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from poliastro.bodies import Body, Earth, Jupiter, Sun
def test_body_has_k_given_in_constructor():
k = 3.98e5 * u.km ** 3 / u.s ** 2
earth = Body(None, k, "")
assert earth.k == k
def test_body_from_parameters_raises_valueerror_if_k_units_not_correct():
wrong_k = 4902.8 * u.kg
_name = _symbol = ""
_R = 0
with pytest.raises(u.UnitsError) as excinfo:
Body.from_parameters(None, wrong_k, _name, _symbol, _R)
assert (
"UnitsError: Argument 'k' to function 'from_parameters' must be in units convertible to 'km3 / s2'."
in excinfo.exconly()
)
def test_body_from_parameters_returns_body_object():
k = 1.26712763e17 * u.m ** 3 / u.s ** 2
R = 71492000 * u.m
_name = _symbol = "jupiter"
jupiter = Body.from_parameters(Sun, k, _name, _symbol, Jupiter.R)
assert jupiter.k == k
assert jupiter.R == R
def test_body_printing_has_name_and_symbol():
name = "2 Pallas"
symbol = u"\u26b4"
k = 1.41e10 * u.m ** 3 / u.s ** 2
pallas2 = Body(None, k, name, symbol)
assert name in str(pallas2)
assert symbol in str(pallas2)
def test_earth_has_k_given_in_literature():
expected_k = 3.986004418e14 * u.m ** 3 / u.s ** 2
k = Earth.k
assert_quantity_allclose(k.decompose([u.km, u.s]), expected_k)
def test_earth_has_angular_velocity_given_in_literature():
expected_k = 7.292114e-5 * u.rad / u.s
k = Earth.angular_velocity
assert_quantity_allclose(k.decompose([u.rad, u.s]), expected_k)
def test_from_relative():
TRAPPIST1 = Body.from_relative(
reference=Sun,
parent=None,
k=0.08, # Relative to the Sun
name="TRAPPIST",
symbol=None,
R=0.114,
) # Relative to the Sun
# Check values properly calculated
VALUECHECK = Body.from_relative(
reference=Earth,
parent=TRAPPIST1,
k=1,
name="VALUECHECK",
symbol=None,
R=1,
)
assert Earth.k == VALUECHECK.k
assert Earth.R == VALUECHECK.R
| mit |
cyx1231st/nova | nova/objects/build_request.py | 6 | 6724 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from nova.db.sqlalchemy import api as db
from nova.db.sqlalchemy import api_models
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova.objects import base
from nova.objects import fields
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
OBJECT_FIELDS = ['info_cache', 'security_groups']
JSON_FIELDS = ['instance_metadata']
IP_FIELDS = ['access_ip_v4', 'access_ip_v6']
@base.NovaObjectRegistry.register
class BuildRequest(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(),
'project_id': fields.StringField(),
'user_id': fields.StringField(),
'display_name': fields.StringField(nullable=True),
'instance_metadata': fields.DictOfStringsField(nullable=True),
'progress': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'config_drive': fields.BooleanField(default=False),
'key_name': fields.StringField(nullable=True),
'locked_by': fields.EnumField(['owner', 'admin'], nullable=True),
'request_spec': fields.ObjectField('RequestSpec'),
# NOTE(alaski): Normally these would come from the NovaPersistentObject
# mixin but they're being set explicitly because we only need
# created_at/updated_at. There is no soft delete for this object.
# These fields should be carried over to the instance when it is
# scheduled and created in a cell database.
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
}
def _load_request_spec(self, db_spec):
self.request_spec = objects.RequestSpec._from_db_object(self._context,
objects.RequestSpec(), db_spec)
def _load_info_cache(self, db_info_cache):
self.info_cache = objects.InstanceInfoCache.obj_from_primitive(
jsonutils.loads(db_info_cache))
def _load_security_groups(self, db_sec_group):
self.security_groups = objects.SecurityGroupList.obj_from_primitive(
jsonutils.loads(db_sec_group))
@staticmethod
def _from_db_object(context, req, db_req):
for key in req.fields:
if isinstance(req.fields[key], fields.ObjectField):
try:
getattr(req, '_load_%s' % key)(db_req[key])
except AttributeError:
LOG.exception(_LE('No load handler for %s'), key)
elif key in JSON_FIELDS and db_req[key] is not None:
setattr(req, key, jsonutils.loads(db_req[key]))
else:
setattr(req, key, db_req[key])
req.obj_reset_changes()
req._context = context
return req
@staticmethod
@db.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_req = (context.session.query(api_models.BuildRequest)
.join(api_models.RequestSpec)
.with_entities(api_models.BuildRequest,
api_models.RequestSpec)
.filter(
api_models.RequestSpec.instance_uuid == instance_uuid)
).first()
if not db_req:
raise exception.BuildRequestNotFound(uuid=instance_uuid)
# db_req is a tuple (api_models.BuildRequest, api_models.RequestSpect)
build_req = db_req[0]
build_req['request_spec'] = db_req[1]
return build_req
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_req = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_req)
@staticmethod
@db.api_context_manager.writer
def _create_in_db(context, updates):
db_req = api_models.BuildRequest()
db_req.update(updates)
db_req.save(context.session)
# NOTE: This is done because a later access will trigger a lazy load
# outside of the db session so it will fail. We don't lazy load
# request_spec on the object later because we never need a BuildRequest
# without the RequestSpec.
db_req.request_spec
return db_req
def _get_update_primitives(self):
updates = self.obj_get_changes()
for key, value in six.iteritems(updates):
if key in OBJECT_FIELDS and value is not None:
updates[key] = jsonutils.dumps(value.obj_to_primitive())
elif key in JSON_FIELDS and value is not None:
updates[key] = jsonutils.dumps(value)
elif key in IP_FIELDS and value is not None:
# These are stored as a string in the db and must be converted
updates[key] = str(value)
req_spec_obj = updates.pop('request_spec', None)
if req_spec_obj:
updates['request_spec_id'] = req_spec_obj.id
return updates
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self._get_update_primitives()
db_req = self._create_in_db(self._context, updates)
self._from_db_object(self._context, self, db_req)
@staticmethod
@db.api_context_manager.writer
def _destroy_in_db(context, id):
context.session.query(api_models.BuildRequest).filter_by(
id=id).delete()
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.id)
| apache-2.0 |
eddyb/servo | tests/wpt/web-platform-tests/mathml/tools/stacks.py | 92 | 2243 | #!/usr/bin/python
from utils import mathfont
import fontforge
v = 7 * mathfont.em
f = mathfont.create("stack-axisheight%d" % v)
f.math.AxisHeight = v
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 5 * mathfont.em
f = mathfont.create("stack-bottomdisplaystyleshiftdown%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = v
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 6 * mathfont.em
f = mathfont.create("stack-bottomshiftdown%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = v
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 4 * mathfont.em
f = mathfont.create("stack-displaystylegapmin%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = v
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 8 * mathfont.em
f = mathfont.create("stack-gapmin%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = v
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 3 * mathfont.em
f = mathfont.create("stack-topdisplaystyleshiftup%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = v
f.math.StackTopShiftUp = 0
mathfont.save(f)
v = 9 * mathfont.em
f = mathfont.create("stack-topshiftup%d" % v)
f.math.AxisHeight = 0
f.math.StackBottomDisplayStyleShiftDown = 0
f.math.StackBottomShiftDown = 0
f.math.StackDisplayStyleGapMin = 0
f.math.StackGapMin = 0
f.math.StackTopDisplayStyleShiftUp = 0
f.math.StackTopShiftUp = v
mathfont.save(f)
| mpl-2.0 |
lferr/charm | charm/test/schemes/dabenc_test.py | 1 | 3191 | from charm.schemes.dabe_aw11 import Dabe
from charm.adapters.dabenc_adapt_hybrid import HybridABEncMA
from charm.toolbox.pairinggroup import PairingGroup, GT
import unittest
debug = False
class DabeTest(unittest.TestCase):
def testDabe(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
GP = dabe.setup()
#Setup an authority
auth_attrs= ['ONE', 'TWO', 'THREE', 'FOUR']
(SK, PK) = dabe.authsetup(GP, auth_attrs)
if debug: print("Authority SK")
if debug: print(SK)
#Setup a user and give him some keys
gid, K = "bob", {}
usr_attrs = ['THREE', 'ONE', 'TWO']
for i in usr_attrs: dabe.keygen(GP, SK, i, gid, K)
if debug: print('User credential list: %s' % usr_attrs)
if debug: print("\nSecret key:")
if debug: groupObj.debug(K)
#Encrypt a random element in GT
m = groupObj.random(GT)
policy = '((one or three) and (TWO or FOUR))'
if debug: print('Acces Policy: %s' % policy)
CT = dabe.encrypt(PK, GP, m, policy)
if debug: print("\nCiphertext...")
if debug: groupObj.debug(CT)
orig_m = dabe.decrypt(GP, K, CT)
assert m == orig_m, 'FAILED Decryption!!!'
if debug: print('Successful Decryption!')
class HybridABEncMATest(unittest.TestCase):
def testHybridABEncMA(self):
groupObj = PairingGroup('SS512')
dabe = Dabe(groupObj)
hyb_abema = HybridABEncMA(dabe, groupObj)
#Setup global parameters for all new authorities
gp = hyb_abema.setup()
#Instantiate a few authorities
#Attribute names must be globally unique. HybridABEncMA
#Two authorities may not issue keys for the same attribute.
#Otherwise, the decryption algorithm will not know which private key to use
jhu_attributes = ['jhu.professor', 'jhu.staff', 'jhu.student']
jhmi_attributes = ['jhmi.doctor', 'jhmi.nurse', 'jhmi.staff', 'jhmi.researcher']
(jhuSK, jhuPK) = hyb_abema.authsetup(gp, jhu_attributes)
(jhmiSK, jhmiPK) = hyb_abema.authsetup(gp, jhmi_attributes)
allAuthPK = {}; allAuthPK.update(jhuPK); allAuthPK.update(jhmiPK)
#Setup a user with a few keys
bobs_gid = "20110615 [email protected] cryptokey"
K = {}
hyb_abema.keygen(gp, jhuSK,'jhu.professor', bobs_gid, K)
hyb_abema.keygen(gp, jhmiSK,'jhmi.researcher', bobs_gid, K)
msg = b'Hello World, I am a sensitive record!'
size = len(msg)
policy_str = "(jhmi.doctor or (jhmi.researcher and jhu.professor))"
ct = hyb_abema.encrypt(allAuthPK, gp, msg, policy_str)
if debug:
print("Ciphertext")
print("c1 =>", ct['c1'])
print("c2 =>", ct['c2'])
decrypted_msg = hyb_abema.decrypt(gp, K, ct)
if debug: print("Result =>", decrypted_msg)
assert decrypted_msg == msg, "Failed Decryption!!!"
if debug: print("Successful Decryption!!!")
del groupObj
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
sem-geologist/hyperspy | hyperspy/samfire_utils/goodness_of_fit_tests/test_general.py | 6 | 1052 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
class goodness_test(object):
_tolerance = None
@property
def tolerance(self):
return self._tolerance
@tolerance.setter
def tolerance(self, value):
if value is None:
self._tolerance = None
else:
self._tolerance = np.abs(value)
| gpl-3.0 |
rhdedgar/openshift-tools | openshift/installer/vendored/openshift-ansible-3.4.40/roles/lib_openshift/src/test/unit/test_oadm_manage_node.py | 17 | 10306 | '''
Unit tests for oadm_manage_node
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oadm_manage_node import ManageNode, locate_oc_binary # noqa: E402
class ManageNodeTest(unittest.TestCase):
'''
Test class for oadm_manage_node
'''
@mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
@mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
def test_list_pods(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
'schedulable': None,
'selector': None,
'pod_selector': None,
'list_pods': True,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'evacuate': False,
'grace_period': False,
'dry_run': False,
'force': False}
pod_list = '''{
"metadata": {},
"items": [
{
"metadata": {
"name": "docker-registry-1-xuhik",
"generateName": "docker-registry-1-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/docker-registry-1-xuhik",
"uid": "ae2a25a2-e316-11e6-80eb-0ecdc51fcfc4",
"resourceVersion": "1501",
"creationTimestamp": "2017-01-25T15:55:23Z",
"labels": {
"deployment": "docker-registry-1",
"deploymentconfig": "docker-registry",
"docker-registry": "default"
},
"annotations": {
"openshift.io/deployment-config.latest-version": "1",
"openshift.io/deployment-config.name": "docker-registry",
"openshift.io/deployment.name": "docker-registry-1",
"openshift.io/scc": "restricted"
}
},
"spec": {}
},
{
"metadata": {
"name": "router-1-kp3m3",
"generateName": "router-1-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/router-1-kp3m3",
"uid": "9e71f4a5-e316-11e6-80eb-0ecdc51fcfc4",
"resourceVersion": "1456",
"creationTimestamp": "2017-01-25T15:54:56Z",
"labels": {
"deployment": "router-1",
"deploymentconfig": "router",
"router": "router"
},
"annotations": {
"openshift.io/deployment-config.latest-version": "1",
"openshift.io/deployment-config.name": "router",
"openshift.io/deployment.name": "router-1",
"openshift.io/scc": "hostnetwork"
}
},
"spec": {}
}]
}'''
mock_openshift_cmd.side_effect = [
{"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --list-pods",
"results": pod_list,
"returncode": 0}
]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = ManageNode.run_ansible(params, False)
# returned a single node
self.assertTrue(len(results['results']['nodes']) == 1)
# returned 2 pods
self.assertTrue(len(results['results']['nodes']['ip-172-31-49-140.ec2.internal']) == 2)
@mock.patch('oadm_manage_node.Utils.create_tmpfile_copy')
@mock.patch('oadm_manage_node.ManageNode.openshift_cmd')
def test_schedulable_false(self, mock_openshift_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'node': ['ip-172-31-49-140.ec2.internal'],
'schedulable': False,
'selector': None,
'pod_selector': None,
'list_pods': False,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'evacuate': False,
'grace_period': False,
'dry_run': False,
'force': False}
node = [{
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"creationTimestamp": "2017-01-26T14:34:43Z",
"labels": {
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/instance-type": "m4.large",
"beta.kubernetes.io/os": "linux",
"failure-domain.beta.kubernetes.io/region": "us-east-1",
"failure-domain.beta.kubernetes.io/zone": "us-east-1c",
"hostname": "opstest-node-compute-0daaf",
"kubernetes.io/hostname": "ip-172-31-51-111.ec2.internal",
"ops_node": "old",
"region": "us-east-1",
"type": "compute"
},
"name": "ip-172-31-51-111.ec2.internal",
"resourceVersion": "6936",
"selfLink": "/api/v1/nodes/ip-172-31-51-111.ec2.internal",
"uid": "93d7fdfb-e3d4-11e6-a982-0e84250fc302"
},
"spec": {
"externalID": "i-06bb330e55c699b0f",
"providerID": "aws:///us-east-1c/i-06bb330e55c699b0f",
}}]
mock_openshift_cmd.side_effect = [
{"cmd": "/usr/bin/oc get node -o json ip-172-31-49-140.ec2.internal",
"results": node,
"returncode": 0},
{"cmd": "/usr/bin/oadm manage-node ip-172-31-49-140.ec2.internal --schedulable=False",
"results": "NAME STATUS AGE\n" +
"ip-172-31-49-140.ec2.internal Ready,SchedulingDisabled 5h\n",
"returncode": 0}]
mock_tmpfile_copy.side_effect = [
'/tmp/mocked_kubeconfig',
]
results = ManageNode.run_ansible(params, False)
self.assertTrue(results['changed'])
self.assertEqual(results['results']['nodes'][0]['name'], 'ip-172-31-49-140.ec2.internal')
self.assertEqual(results['results']['nodes'][0]['schedulable'], False)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
sankalpg/Essentia_tonicDebug_TEMP | test/src/unittest/sfx/test_aftermaxtobeforemaxenergyratio_streaming.py | 10 | 2179 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import AfterMaxToBeforeMaxEnergyRatio as \
sAfterMaxToBeforeMaxEnergyRatio
class TestAfterMaxToBeforeMaxEnergyRatio_Streaming(TestCase):
def testEmpty(self):
gen = VectorInput([])
strRatio = sAfterMaxToBeforeMaxEnergyRatio()
p = Pool()
gen.data >> strRatio.pitch
strRatio.afterMaxToBeforeMaxEnergyRatio >> (p, 'lowlevel.amtbmer')
run(gen)
self.assertRaises(KeyError, lambda: p['lowlevel.amtbmer'])
def testRegression(self):
# this algorithm has a standard mode implementation which has been
# tested thru the unitests in python. Therefore it's only tested that
# for a certain input standard == streaming
pitch = readVector(join(filedir(), 'aftermaxtobeforemaxenergyratio', 'input.txt'))
p = Pool()
gen = VectorInput(pitch)
strRatio = sAfterMaxToBeforeMaxEnergyRatio()
gen.data >> strRatio.pitch
strRatio.afterMaxToBeforeMaxEnergyRatio >> (p, 'lowlevel.amtbmer')
run(gen)
stdResult = AfterMaxToBeforeMaxEnergyRatio()(pitch)
strResult = p['lowlevel.amtbmer']
self.assertAlmostEqual(strResult, stdResult, 5e-7)
suite = allTests(TestAfterMaxToBeforeMaxEnergyRatio_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
blankenberg/tools-iuc | data_managers/data_manager_diamond_database_builder/data_manager/data_manager_diamond_database_builder.py | 9 | 10627 | #!/usr/bin/env python
import bz2
import gzip
import json
import optparse
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import zipfile
from ftplib import FTP
CHUNK_SIZE = 2**20 # 1mb
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def _get_files_in_ftp_path(ftp, path):
path_contents = []
ftp.retrlines('MLSD %s' % (path), path_contents.append)
return [line.split(';')[-1].lstrip() for line in path_contents]
def _get_stream_readers_for_tar(file_obj, tmp_dir):
fasta_tar = tarfile.open(fileobj=file_obj, mode='r:*')
return [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()]
def _get_stream_readers_for_zip(file_obj, tmp_dir):
fasta_zip = zipfile.ZipFile(file_obj, 'r')
rval = []
for member in fasta_zip.namelist():
fasta_zip.extract(member, tmp_dir)
rval.append(open(os.path.join(tmp_dir, member), 'rb'))
return rval
def _get_stream_readers_for_gzip(file_obj, tmp_dir):
return [gzip.GzipFile(fileobj=file_obj, mode='rb')]
def _get_stream_readers_for_bz2(file_obj, tmp_dir):
return [bz2.BZ2File(file_obj.name, 'rb')]
def download_from_ncbi(data_manager_dict, params, target_directory,
database_id, database_name):
NCBI_FTP_SERVER = 'ftp.ncbi.nlm.nih.gov'
NCBI_DOWNLOAD_PATH = '/blast/db/FASTA/'
COMPRESSED_EXTENSIONS = [('.tar.gz', _get_stream_readers_for_tar),
('.tar.bz2', _get_stream_readers_for_tar),
('.zip', _get_stream_readers_for_zip),
('.gz', _get_stream_readers_for_gzip),
('.bz2', _get_stream_readers_for_bz2)]
ncbi_identifier = params['reference_source']['requested_identifier']
ftp = FTP(NCBI_FTP_SERVER)
ftp.login()
path_contents = _get_files_in_ftp_path(ftp, NCBI_DOWNLOAD_PATH)
ncbi_file_name = None
get_stream_reader = None
ext = None
for ext, get_stream_reader in COMPRESSED_EXTENSIONS:
if "%s%s" % (ncbi_identifier, ext) in path_contents:
ncbi_file_name = "%s%s%s" % (NCBI_DOWNLOAD_PATH, ncbi_identifier, ext)
break
if not ncbi_file_name:
raise Exception('Unable to determine filename for NCBI database for %s: %s' % (ncbi_identifier, path_contents))
tmp_dir = tempfile.mkdtemp(prefix='tmp-data-manager-ncbi-')
ncbi_fasta_filename = os.path.join(tmp_dir, "%s%s" % (ncbi_identifier, ext))
# fasta_base_filename = "%s.fa" % database_id
# fasta_filename = os.path.join(target_directory, fasta_base_filename)
# fasta_writer = open(fasta_filename, 'wb+')
tmp_extract_dir = os.path.join(tmp_dir, 'extracted_fasta')
os.mkdir(tmp_extract_dir)
tmp_fasta = open(ncbi_fasta_filename, 'wb+')
ftp.retrbinary('RETR %s' % ncbi_file_name, tmp_fasta.write)
tmp_fasta.flush()
tmp_fasta.seek(0)
fasta_readers = get_stream_reader(tmp_fasta, tmp_extract_dir)
data_table_entry = _stream_fasta_to_file(fasta_readers, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
for fasta_reader in fasta_readers:
fasta_reader.close()
tmp_fasta.close()
cleanup_before_exit(tmp_dir)
def download_from_url(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: we should automatically do decompression here
urls = list(filter(bool, [x.strip() for x in params['reference_source']['user_url'].split('\n')]))
fasta_reader = [urllib.request.urlopen(url) for url in urls]
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def download_from_history(data_manager_dict, params, target_directory, database_id, database_name):
# TODO: allow multiple FASTA input files
input_filename = params['reference_source']['input_fasta']
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename, 'rb')
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def copy_from_directory(data_manager_dict, params, target_directory, database_id, database_name):
input_filename = params['reference_source']['fasta_filename']
create_symlink = params['reference_source']['create_symlink'] == 'create_symlink'
if create_symlink:
data_table_entry = _create_symlink(input_filename, target_directory, database_id, database_name)
else:
if isinstance(input_filename, list):
fasta_reader = [open(filename, 'rb') for filename in input_filename]
else:
fasta_reader = open(input_filename)
data_table_entry = _stream_fasta_to_file(fasta_reader, target_directory, database_id, database_name, params)
_add_data_table_entry(data_manager_dict, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables']['diamond_database'] = data_manager_dict['data_tables'].get('diamond_database', [])
data_manager_dict['data_tables']['diamond_database'].append(data_table_entry)
return data_manager_dict
def _stream_fasta_to_file(fasta_stream, target_directory, database_id,
database_name, params, close_stream=True):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
temp_fasta = tempfile.NamedTemporaryFile(delete=False, suffix=".fasta")
temp_fasta.close()
fasta_writer = open(temp_fasta.name, 'wb+')
if not isinstance(fasta_stream, list):
fasta_stream = [fasta_stream]
last_char = None
for fh in fasta_stream:
if last_char not in [None, '\n', '\r']:
fasta_writer.write('\n')
while True:
data = fh.read(CHUNK_SIZE)
if data:
fasta_writer.write(data)
last_char = data[-1]
else:
break
if close_stream:
fh.close()
fasta_writer.close()
args = ['diamond', 'makedb',
'--in', temp_fasta.name,
'--db', fasta_filename]
if params['tax_cond']['tax_select'] == "history":
for i in ["taxonmap", "taxonnodes", "taxonnames"]:
args.extend(['--' + i, params['tax_cond'][i]])
elif params['tax_cond']['tax_select'] == "ncbi":
if os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.FULL')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid.gz')])
elif os.path.isfile(os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')):
args.extend(['--taxonmap',
os.path.join(params['tax_cond']['ncbi_tax'], 'prot.accession2taxid')])
else:
raise Exception('Unable to find prot.accession2taxid file in %s' % (params['tax_cond']['ncbi_tax']))
args.extend(['--taxonnodes',
os.path.join(params['tax_cond']['ncbi_tax'], 'nodes.dmp')])
args.extend(['--taxonnames',
os.path.join(params['tax_cond']['ncbi_tax'], 'names.dmp')])
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-diamond-database-builder-stderr")
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory,
stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
print("Error building diamond database:", file=sys.stderr)
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk.decode('utf-8'))
sys.exit(return_code)
tmp_stderr.close()
os.remove(temp_fasta.name)
return dict(value=database_id, name=database_name,
db_path="%s.dmnd" % fasta_base_filename)
def _create_symlink(input_filename, target_directory, database_id, database_name):
fasta_base_filename = "%s.fa" % database_id
fasta_filename = os.path.join(target_directory, fasta_base_filename)
os.symlink(input_filename, fasta_filename)
return dict(value=database_id, name=database_name, db_path=fasta_base_filename)
REFERENCE_SOURCE_TO_DOWNLOAD = dict(ncbi=download_from_ncbi,
url=download_from_url,
history=download_from_history,
directory=copy_from_directory)
def main():
# Parse Command Line
parser = optparse.OptionParser()
parser.add_option('-d', '--dbkey_description', dest='dbkey_description',
action='store', type="string", default=None,
help='dbkey_description')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fp:
params = json.load(fp)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
param_dict = params['param_dict']
database_id = param_dict['database_id']
database_name = param_dict['database_name']
if param_dict['tax_cond']['tax_select'] == "ncbi":
param_dict['tax_cond']['ncbi_tax'] = args[1]
# Fetch the FASTA
REFERENCE_SOURCE_TO_DOWNLOAD[param_dict['reference_source']['reference_source_selector']](data_manager_dict, param_dict, target_directory, database_id, database_name)
# save info to json file
open(filename, 'w').write(json.dumps(data_manager_dict, sort_keys=True))
if __name__ == "__main__":
main()
| mit |
yuewko/neutron | neutron/agent/ovsdb/impl_idl.py | 4 | 7372 | # Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import queue as Queue
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ovs.db import idl
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import commands as cmd
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
OPTS = [
cfg.StrOpt('ovsdb_connection',
default='tcp:127.0.0.1:6640',
help=_('The connection string for the native OVSDB backend')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
# TODO(twilson) DEFAULT.ovs_vsctl_timeout should be OVS.vsctl_timeout
cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib')
LOG = logging.getLogger(__name__)
class Transaction(api.Transaction):
def __init__(self, api, ovsdb_connection, timeout,
check_error=False, log_errors=False):
self.api = api
self.check_error = check_error
self.log_errors = log_errors
self.commands = []
self.results = Queue.Queue(1)
self.ovsdb_connection = ovsdb_connection
self.timeout = timeout
def add(self, command):
"""Add a command to the transaction
returns The command passed as a convenience
"""
self.commands.append(command)
return command
def commit(self):
self.ovsdb_connection.queue_txn(self)
result = self.results.get()
if self.check_error:
if isinstance(result, idlutils.ExceptionResult):
if self.log_errors:
LOG.error(result.tb)
raise result.ex
return result
def do_commit(self):
start_time = time.time()
attempts = 0
while True:
elapsed_time = time.time() - start_time
if attempts > 0 and elapsed_time > self.timeout:
raise RuntimeError("OVS transaction timed out")
attempts += 1
# TODO(twilson) Make sure we don't loop longer than vsctl_timeout
txn = idl.Transaction(self.api.idl)
for i, command in enumerate(self.commands):
LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s",
{'idx': i, 'cmd': command})
try:
command.run_idl(txn)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
txn.abort()
if not self.check_error:
ctx.reraise = False
seqno = self.api.idl.change_seqno
status = txn.commit_block()
if status == txn.TRY_AGAIN:
LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying")
if self.api.idl._session.rpc.status != 0:
LOG.debug("Lost connection to OVSDB, reconnecting!")
self.api.idl.force_reconnect()
idlutils.wait_for_change(
self.api.idl, self.timeout - elapsed_time,
seqno)
continue
elif status == txn.ERROR:
msg = _LE("OVSDB Error: %s") % txn.get_error()
if self.log_errors:
LOG.error(msg)
if self.check_error:
# For now, raise similar error to vsctl/utils.execute()
raise RuntimeError(msg)
return
elif status == txn.ABORTED:
LOG.debug("Transaction aborted")
return
elif status == txn.UNCHANGED:
LOG.debug("Transaction caused no change")
return [cmd.result for cmd in self.commands]
class OvsdbIdl(api.API):
ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection,
cfg.CONF.ovs_vsctl_timeout,
'Open_vSwitch')
def __init__(self, context):
super(OvsdbIdl, self).__init__(context)
OvsdbIdl.ovsdb_connection.start()
self.idl = OvsdbIdl.ovsdb_connection.idl
@property
def _tables(self):
return self.idl.tables
@property
def _ovs(self):
return self._tables['Open_vSwitch'].rows.values()[0]
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self, OvsdbIdl.ovsdb_connection,
self.context.vsctl_timeout,
check_error, log_errors)
def add_br(self, name, may_exist=True):
return cmd.AddBridgeCommand(self, name, may_exist)
def del_br(self, name, if_exists=True):
return cmd.DelBridgeCommand(self, name, if_exists)
def br_exists(self, name):
return cmd.BridgeExistsCommand(self, name)
def port_to_br(self, name):
return cmd.PortToBridgeCommand(self, name)
def iface_to_br(self, name):
# For our purposes, ports and interfaces always have the same name
return cmd.PortToBridgeCommand(self, name)
def list_br(self):
return cmd.ListBridgesCommand(self)
def br_get_external_id(self, name, field):
return cmd.BrGetExternalIdCommand(self, name, field)
def br_set_external_id(self, name, field, value):
return cmd.BrSetExternalIdCommand(self, name, field, value)
def db_set(self, table, record, *col_values):
return cmd.DbSetCommand(self, table, record, *col_values)
def db_clear(self, table, record, column):
return cmd.DbClearCommand(self, table, record, column)
def db_get(self, table, record, column):
return cmd.DbGetCommand(self, table, record, column)
def db_list(self, table, records=None, columns=None, if_exists=False):
return cmd.DbListCommand(self, table, records, columns, if_exists)
def db_find(self, table, *conditions, **kwargs):
return cmd.DbFindCommand(self, table, *conditions, **kwargs)
def set_controller(self, bridge, controllers):
return cmd.SetControllerCommand(self, bridge, controllers)
def del_controller(self, bridge):
return cmd.DelControllerCommand(self, bridge)
def get_controller(self, bridge):
return cmd.GetControllerCommand(self, bridge)
def set_fail_mode(self, bridge, mode):
return cmd.SetFailModeCommand(self, bridge, mode)
def add_port(self, bridge, port, may_exist=True):
return cmd.AddPortCommand(self, bridge, port, may_exist)
def del_port(self, port, bridge=None, if_exists=True):
return cmd.DelPortCommand(self, port, bridge, if_exists)
def list_ports(self, bridge):
return cmd.ListPortsCommand(self, bridge)
| apache-2.0 |
theilmbh/pyoperant | pyoperant/panels.py | 3 | 1654 | ## Panel classes
class BasePanel(object):
"""Returns a panel instance.
This class should be subclassed to define a local panel configuration.
To build a panel, do the following in the __init__() method of your local
subclass:
1. add instances of the necessary interfaces to the 'interfaces' dict
attribute:
>>> self.interfaces['comedi'] = comedi.ComediInterface(device_name='/dev/comedi0')
2. add inputs and outputs to the 'inputs' and 'outputs' list attributes:
>>> for in_chan in range(4):
self.inputs.append(hwio.BooleanInput(interface=self.interfaces['comedi'],
params = {'subdevice': 2,
'channel': in_chan
},
)
3. add components constructed from your inputs and outputs:
>>> self.hopper = components.Hopper(IR=self.inputs[3],solenoid=self.outputs[4])
4. assign panel methods needed for operant behavior, such as 'reward':
>>> self.reward = self.hopper.reward
5. finally, define a reset() method that will set the entire panel to a
neutral state:
>>> def reset(self):
>>> for output in self.outputs:
>>> output.set(False)
>>> self.house_light.write(True)
>>> return True
"""
def __init__(self, *args,**kwargs):
self.interfaces = {}
self.inputs = []
self.outputs = []
def reset(self):
raise NotImplementedError
| bsd-3-clause |
lattwood/phantomjs | src/breakpad/src/tools/gyp/tools/pretty_vcproj.py | 137 | 9479 | #!/usr/bin/python2.5
# Copyright 2009 Google Inc.
# All Rights Reserved.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple:
"""Compare function between 2 tuple."""
def __call__(self, x, y):
(key1, value1) = x
(key2, value2) = y
return cmp(key1, key2)
class CmpNode:
"""Compare function between 2 xml nodes."""
def get_string(self, node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
def __call__(self, x, y):
return cmp(self.get_string(x), self.get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
# Make all the properties we know about in this node absolute.
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
# For each sub node, we call recursively this function.
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
[unique_list.append(i) for i in sorted_list if not unique_list.count(i)]
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
global REPLACEMENTS
global ARGUMENTS
ARGUMENTS = argv
"""Main function of this vcproj prettifier."""
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
if __name__ == '__main__':
main(sys.argv) | bsd-3-clause |
ahmetcemturan/SFACT | skeinforge_application/skeinforge_plugins/craft_plugins/limit.py | 1 | 8282 | #! /usr/bin/env python
"""
This page is in the table of contents.
This plugin limits the feed rate of the tool head, so that the stepper motors are not driven too fast and skip steps.
The limit manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Limit
The maximum z feed rate is defined in speed.
==Operation==
The default 'Activate Limit' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Maximum Initial Feed Rate===
Default is one millimeter per second.
Defines the maximum speed of the inital tool head move.
==Examples==
The following examples limit the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and limit.py.
> python limit.py
This brings up the limit dialog.
> python limit.py Screw Holder Bottom.stl
The limit tool is parsing the file:
Screw Holder Bottom.stl
..
The limit tool has created the file:
.. Screw Holder Bottom_limit.gcode
"""
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from datetime import date
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/28/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText(fileName, gcodeText='', repository=None):
'Limit a gcode file or text.'
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, gcodeText), repository )
def getCraftedTextFromText(gcodeText, repository=None):
'Limit a gcode text.'
if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'limit'):
return gcodeText
if repository == None:
repository = settings.getReadRepository(LimitRepository())
if not repository.activateLimit.value:
return gcodeText
return LimitSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return LimitRepository()
def writeOutput(fileName, shouldAnalyze=True):
'Limit a gcode file.'
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'limit', shouldAnalyze)
class LimitRepository:
'A class to handle the limit settings.'
def __init__(self):
'Set the default settings, execute title & settings fileName.'
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.limit.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Limit', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Limit')
self.activateLimit = settings.BooleanSetting().getFromValue('Activate Limit', self, False)
self.maximumInitialFeedRate = settings.FloatSpin().getFromValue(0.5, 'Maximum Initial Feed Rate (mm/s):', self, 10.0, 1.0)
self.executeTitle = 'Limit'
def execute(self):
'Limit button has been clicked.'
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class LimitSkein:
'A class to limit a skein of extrusions.'
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.feedRateMinute = None
self.lineIndex = 0
self.maximumZDrillFeedRatePerSecond = 987654321.0
self.maximumZFeedRatePerSecond = 2.0
self.oldLocation = None
def getCraftedGcode(self, gcodeText, repository):
'Parse gcode text and store the limit gcode.'
self.repository = repository
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization()
self.maximumZDrillFeedRatePerSecond = min(self.maximumZDrillFeedRatePerSecond, self.maximumZFeedRatePerSecond)
self.maximumZCurrentFeedRatePerSecond = self.maximumZFeedRatePerSecond
for lineIndex in xrange(self.lineIndex, len(self.lines)):
self.parseLine( lineIndex )
return self.distanceFeedRate.output.getvalue()
def getLimitedInitialMovement(self, line, splitLine):
'Get a limited linear movement.'
if self.oldLocation == None:
line = self.distanceFeedRate.getLineWithFeedRate(60.0 * self.repository.maximumInitialFeedRate.value, line, splitLine)
return line
def getZLimitedLine(self, deltaZ, distance, line, splitLine):
'Get a replaced z limited gcode movement line.'
zFeedRateSecond = self.feedRateMinute * deltaZ / distance / 60.0
if zFeedRateSecond <= self.maximumZCurrentFeedRatePerSecond:
return line
limitedFeedRateMinute = self.feedRateMinute * self.maximumZCurrentFeedRatePerSecond / zFeedRateSecond
return self.distanceFeedRate.getLineWithFeedRate(limitedFeedRateMinute, line, splitLine)
def getZLimitedLineArc(self, line, splitLine):
'Get a replaced z limited gcode arc movement line.'
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
if self.feedRateMinute == None or self.oldLocation == None:
return line
relativeLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.oldLocation += relativeLocation
deltaZ = abs(relativeLocation.z)
distance = gcodec.getArcDistance(relativeLocation, splitLine)
return self.getZLimitedLine(deltaZ, distance, line, splitLine)
def getZLimitedLineLinear(self, line, location, splitLine):
'Get a replaced z limited gcode linear movement line.'
self.feedRateMinute = gcodec.getFeedRateMinute(self.feedRateMinute, splitLine)
if location == self.oldLocation:
return ''
if self.feedRateMinute == None or self.oldLocation == None:
return line
deltaZ = abs(location.z - self.oldLocation.z)
distance = abs(location - self.oldLocation)
return self.getZLimitedLine(deltaZ, distance, line, splitLine)
def parseInitialization(self):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('limit')
return
elif firstWord == '(<maximumZDrillFeedRatePerSecond>':
self.maximumZDrillFeedRatePerSecond = float(splitLine[1])
elif firstWord == '(<maximumZFeedRatePerSecond>':
self.maximumZFeedRatePerSecond = float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine( self, lineIndex ):
'Parse a gcode line and add it to the limit skein.'
line = self.lines[lineIndex].lstrip()
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
line = self.getLimitedInitialMovement(line, splitLine)
line = self.getZLimitedLineLinear(line, location, splitLine)
self.oldLocation = location
elif firstWord == 'G2' or firstWord == 'G3':
line = self.getZLimitedLineArc(line, splitLine)
elif firstWord == 'M101':
self.maximumZCurrentFeedRatePerSecond = self.maximumZDrillFeedRatePerSecond
elif firstWord == 'M103':
self.maximumZCurrentFeedRatePerSecond = self.maximumZFeedRatePerSecond
self.distanceFeedRate.addLine(line)
def main():
'Display the limit dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == '__main__':
main()
| agpl-3.0 |
staranjeet/fjord | vendor/packages/mock/setup.py | 91 | 2134 | #! /usr/bin/env python
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
from mock import __version__
import os
NAME = 'mock'
MODULES = ['mock']
DESCRIPTION = 'A Python Mocking and Patching Library for Testing'
URL = "http://www.voidspace.org.uk/python/mock/"
readme = os.path.join(os.path.dirname(__file__), 'README.txt')
LONG_DESCRIPTION = open(readme).read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: Implementation :: Jython',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
]
AUTHOR = 'Michael Foord'
AUTHOR_EMAIL = '[email protected]'
KEYWORDS = ("testing test mock mocking unittest patching "
"stubs fakes doubles").split(' ')
params = dict(
name=NAME,
version=__version__,
py_modules=MODULES,
# metadata for upload to PyPI
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
keywords=KEYWORDS,
url=URL,
classifiers=CLASSIFIERS,
)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
params['tests_require'] = ['unittest2']
params['test_suite'] = 'unittest2.collector'
setup(**params)
| bsd-3-clause |
K-Constantine/Amaraki | core/deps/gyp/test/mac/gyptest-debuginfo.py | 349 | 1152 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to debug information generation.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='debuginfo')
test.build('test.gyp', test.ALL, chdir='debuginfo')
test.built_file_must_exist('libnonbundle_shared_library.dylib.dSYM',
chdir='debuginfo')
test.built_file_must_exist('nonbundle_loadable_module.so.dSYM',
chdir='debuginfo')
test.built_file_must_exist('nonbundle_executable.dSYM',
chdir='debuginfo')
test.built_file_must_exist('bundle_shared_library.framework.dSYM',
chdir='debuginfo')
test.built_file_must_exist('bundle_loadable_module.bundle.dSYM',
chdir='debuginfo')
test.built_file_must_exist('My App.app.dSYM',
chdir='debuginfo')
test.pass_test()
| mit |
trankmichael/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
mfnch/pyrtist | old/web/in/examples/create_example.py | 1 | 2754 | import sys, os, os.path, commands, re
usage = "USAGE: python create_example.py box.example"
if len(sys.argv) != 2:
raise "Expected one argument.\n" + usage
example_file = sys.argv[1]
print "Working on '%s'..." % example_file
# Default values for variables which may be changed inside example_file
in_directory = ".."
box = "box -l g"
convert = "convert"
convert_opts = ""
highlight = "%s/../katehighlight/bin/highlight" % in_directory
rst_skeleton = "skeleton"
rst_out = None
title = None
description = None
figure_caption = None
box_source = None
out_eps = None
out_png = None
_f = open(example_file)
exec(_f)
_f.close()
if title == None:
title = "Box example: %s" % crumb
print "Removing old figure if present..."
if out_eps and os.access(out_eps, os.W_OK):
try:
os.remove(out_eps)
except:
print "Failed to remove the figure: continuing anyway..."
print "Executing the Box program..."
print commands.getoutput("%s %s" % (box, box_source))
have_figure = False
if out_eps and os.access(out_eps, os.R_OK):
print "Adjusting eps figure..."
out_png = os.path.splitext(out_eps)[0] + ".png"
print commands.getoutput("%s %s %s %s" %
(convert, convert_opts, out_eps, out_png))
print out_png
have_figure = os.access(out_png, os.R_OK)
if not have_figure:
raise "The figure '%s' has not been produced: stopping here!" % out_png
print "Highlighting the Box source..."
highlighted_source = "/tmp/h.html"
print commands.getoutput("%s Box %s %s" % (highlight, box_source, highlighted_source))
f = open(highlighted_source, "r")
htmlized_box_program = f.read()
f.close()
print "Opening the skeleton..."
f = open(rst_skeleton, "r")
data_skeleton = f.read()
f.close()
vars_dict = {
'title': title,
'description': description,
'crumb': crumb,
'box_file':box_source,
'figure_caption':figure_caption,
'image': out_png,
'htmlized_box_program': htmlized_box_program
}
r = re.compile("[$][^$]*[$]")
def substitutor(var):
try:
var_name = var.group(0)[1:-1]
except:
raise "Error when substituting variable."
if vars_dict.has_key(var_name):
return str(vars_dict[var_name])
print "WARNING: Variable '%s' not found!" % var_name
return var.group(0)
print "Filling the skeleton..."
out = re.sub(r, substitutor, data_skeleton)
f = open(rst_out, "w")
f.write(out)
f.close()
print "Output produced (%s)" % rst_out
print "Generating thumbnail..."
html_out = os.path.splitext(out_png)[0] + ".html"
out_thumb_png = "small_" + out_png
scale_opts = "-scale 100"
print commands.getoutput("%s %s %s %s"
% (convert, scale_opts, out_png, out_thumb_png))
f = open("thumbnails.dat", "a")
f.write("%s, %s\n" % (html_out, out_thumb_png))
f.close()
| lgpl-2.1 |
amir-qayyum-khan/edx-platform | cms/djangoapps/contentstore/management/commands/fix_not_found.py | 62 | 1152 | """
Script for fixing the item not found errors in a course
"""
from django.core.management.base import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
# To run from command line: ./manage.py cms fix_not_found course-v1:org+course+run
class Command(BaseCommand):
"""Fix a course's item not found errors"""
help = "Fix a course's ItemNotFound errors"
def add_arguments(self, parser):
parser.add_argument('course_id')
def handle(self, *args, **options):
"""Execute the command"""
course_id = options.get('course_id', None)
course_key = CourseKey.from_string(course_id)
# for now only support on split mongo
# pylint: disable=protected-access
owning_store = modulestore()._get_modulestore_for_courselike(course_key)
if hasattr(owning_store, 'fix_not_found'):
owning_store.fix_not_found(course_key, ModuleStoreEnum.UserID.mgmt_command)
else:
raise CommandError("The owning modulestore does not support this command.")
| agpl-3.0 |
Jgarcia-IAS/Fidelizacion_odoo | openerp/addons/project/res_partner.py | 334 | 1953 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
return {
partner_id: Task.search_count(cr,uid, [('partner_id', '=', partner_id)], context=context)
for partner_id in ids
}
""" Inherits partner and adds Tasks information in the partner form """
_inherit = 'res.partner'
_columns = {
'task_ids': fields.one2many('project.task', 'partner_id', 'Tasks'),
'task_count': fields.function(_task_count, string='# Tasks', type='integer'),
}
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default['task_ids'] = []
return super(res_partner, self).copy(
cr, uid, record_id, default=default, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kohnle-lernmodule/exeLearningPlus1_04 | twisted/test/test_stateful.py | 20 | 1570 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.stateful
"""
from twisted.test import test_protocols
from twisted.protocols.stateful import StatefulProtocol
from struct import pack, unpack
class MyInt32StringReceiver(StatefulProtocol):
MAX_LENGTH = 99999
def getInitialState(self):
return self._getHeader, 4
def _getHeader(self, msg):
length, = unpack("!i", msg)
if length > self.MAX_LENGTH:
self.transport.loseConnection()
return
return self._getString, length
def _getString(self, msg):
self.stringReceived(msg)
return self._getHeader, 4
def stringReceived(self, msg):
"""Override this.
"""
raise NotImplementedError
def sendString(self, data):
"""Send an int32-prefixed string to the other end of the connection.
"""
self.transport.write(pack("!i",len(data))+data)
class TestInt32(MyInt32StringReceiver):
def connectionMade(self):
self.received = []
def stringReceived(self, s):
self.received.append(s)
MAX_LENGTH = 50
closed = 0
def connectionLost(self, reason):
self.closed = 1
class Int32TestCase(test_protocols.Int32TestCase):
protocol = TestInt32
def testBigReceive(self):
r = self.getProtocol()
big = ""
for s in self.strings * 4:
big += pack("!i",len(s))+s
r.dataReceived(big)
self.assertEquals(r.received, self.strings * 4)
| gpl-2.0 |
PythonSanSebastian/epcon | p3/management/commands/attendify_speakers_xlsx.py | 2 | 6861 | # -*- coding: utf-8 -*-
""" Update an Attendify speakers XLSX file with the current list of
speakers.
Usage: manage.py attendify_speakers_xlsx ep2016 speakers.xlsx
Note that for Attendify you have to download the speakers before
running this script, since they add meta data to the downloaded
file which has to be kept around when uploading it again.
The script updates speakers.xlsx in place. Unfortunately, Attendify
currently has a bug in that it doesn't accept the file format
generated by openpyxl. Opening the file in LibreOffice and saving
it (without changes) fixes this as work-around.
Attendify Worksheet "Schedule" format
-------------------------------------
Row A4: First Name, Last Name, Company (Optional), Position
(Optional), Group (Optional). Profile (Optional), Email
(Optional), Phone (Optional), Twitter (Optional), Facebook
(Optional), LinkedIn (Optional), Google+ (Optional), UID (do not
delete)
Row A6: Start of data
"""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from django.conf import settings
from django.utils.html import strip_tags
from conference import models as cmodels
from conference import utils
from p3 import models
import datetime
from collections import defaultdict
from optparse import make_option
import operator
import markdown2
import openpyxl
### Globals
# Debug output ?
_debug = 1
# These must match the talk .type or .admin_type
from accepted_talks import TYPE_NAMES
### Helpers
def profile_url(user):
return urlresolvers.reverse('conference-profile',
args=[user.attendeeprofile.slug])
def format_text(text, remove_tags=False, output_html=True):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
# Convert markdown markup to HTML
if output_html:
text = markdown2.markdown(text)
return text
def add_speaker(data, speaker):
# Get speaker profile
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
p3profile = models.P3Profile.objects.get(profile=profile)
# Skip speakers without public profile. Speaker profiles must be
# public, but you never know. See conference/models.py
if profile.visibility != 'p':
return
# Collect data
first_name = speaker.user.first_name.title()
last_name = speaker.user.last_name.title()
company = profile.company
position = profile.job_title
profile_text = (u'<a href="%s%s">Profile on EuroPython Website</a>' %
(settings.DEFAULT_URL_PREFIX, profile_url(user)))
twitter = p3profile.twitter
if twitter.startswith(('https://twitter.com/', 'http://twitter.com/')):
twitter = twitter.split('/')[-1]
# Skip special entries
full_name = first_name + last_name
if first_name == 'To Be' and last_name == 'Announced':
return
# UID
uid = u''
data.append((
first_name,
last_name,
company,
position,
u'', # group
profile_text,
u'', # email: not published
u'', # phone: not published
twitter,
u'', # facebook
u'', # linkedin
u'', # google+
uid))
# Start row of data in spreadsheet (Python 0-based index)
SPEAKERS_WS_START_DATA = 5
# Column number of UID columns (Python 0-based index)
SPEAKERS_UID_COLUMN = 12
# Number of columns to make row unique (first, last, company)
SPEAKERS_UNIQUE_COLS = 3
def update_speakers(speakers_xlsx, new_data, updated_xlsx=None):
# Load workbook
wb = openpyxl.load_workbook(speakers_xlsx)
assert wb.sheetnames == [u'Instructions', u'Speakers', u'System']
ws = wb['Speakers']
# Extract data values
ws_data = list(ws.values)[SPEAKERS_WS_START_DATA:]
print ('read %i data lines' % len(ws_data))
print ('first line: %r' % ws_data[:1])
print ('last line: %r' % ws_data[-1:])
# Reconcile UIDs / talks
uids = {}
for line in ws_data:
uid = line[SPEAKERS_UID_COLUMN]
if not uid:
continue
uids[tuple(line[:SPEAKERS_UNIQUE_COLS])] = uid
# Add UID to new data
new_speakers = []
for line in new_data:
key = tuple(line[:SPEAKERS_UNIQUE_COLS])
if key not in uids:
print ('New speaker %s found' % (key,))
uid = u''
else:
uid = uids[key]
line = tuple(line[:SPEAKERS_UID_COLUMN]) + (uid,)
new_speakers.append(line)
new_data = new_speakers
# Replace old data with new data
old_data_rows = len(ws_data)
new_data_rows = len(new_data)
print ('new data: %i data lines' % new_data_rows)
offset = SPEAKERS_WS_START_DATA + 1
print ('new_data = %i rows' % len(new_data))
for j, row in enumerate(ws[offset: offset + new_data_rows - 1]):
new_row = new_data[j]
if _debug:
print ('updating row %i with %r' % (j, new_row))
if len(row) > len(new_row):
row = row[:len(new_row)]
for i, cell in enumerate(row):
cell.value = new_row[i]
# Overwrite unused cells with None
if new_data_rows < old_data_rows:
for j, row in enumerate(ws[offset + new_data_rows + 1:
offset + old_data_rows + 1]):
if _debug:
print ('clearing row %i' % (j,))
for i, cell in enumerate(row):
cell.value = None
# Write updated data
if updated_xlsx is None:
updated_xlsx = speakers_xlsx
wb.save(updated_xlsx)
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
speakers_xlsx = args[1]
except IndexError:
raise CommandError('XLSX file not specified')
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
# Collect profiles
data = []
for speaker in speakers:
add_speaker(data, speaker)
data.sort()
# Update spreadsheet with new data
update_speakers(speakers_xlsx, data)
| bsd-2-clause |
modulexcite/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/iso8859_5.py | 93 | 13578 | """ Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-5',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
u'\u2116' # 0xF0 -> NUMERO SIGN
u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
u'\xa7' # 0xFD -> SECTION SIGN
u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
Nitaco/ansible | test/units/modules/network/nxos/test_nxos_portchannel.py | 53 | 2843 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import _nxos_portchannel
from .nxos_module import TestNxosModule, set_module_args
class TestNxosPortchannelModule(TestNxosModule):
module = _nxos_portchannel
def setUp(self):
super(TestNxosPortchannelModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos._nxos_portchannel.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos._nxos_portchannel.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos._nxos_portchannel.get_config')
self.get_config = self.mock_get_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos._nxos_portchannel.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosPortchannelModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_portchannel(self):
set_module_args(dict(group='99',
members=['Ethernet2/1', 'Ethernet2/2'],
mode='active',
state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface port-channel99',
'interface Ethernet2/1',
'channel-group 99 mode active',
'interface Ethernet2/2',
'channel-group 99 mode active'])
| gpl-3.0 |
skidzo/sympy | sympy/physics/quantum/cartesian.py | 98 | 8766 | """Operators and states for 1D cartesian position and momentum.
TODO:
* Add 3D classes to mappings in operatorset.py
"""
from __future__ import print_function, division
from sympy import DiracDelta, exp, I, Interval, pi, S, sqrt
from sympy.core.compatibility import range
from sympy.physics.quantum.constants import hbar
from sympy.physics.quantum.hilbert import L2
from sympy.physics.quantum.operator import DifferentialOperator, HermitianOperator
from sympy.physics.quantum.state import Ket, Bra, State
__all__ = [
'XOp',
'YOp',
'ZOp',
'PxOp',
'X',
'Y',
'Z',
'Px',
'XKet',
'XBra',
'PxKet',
'PxBra',
'PositionState3D',
'PositionKet3D',
'PositionBra3D'
]
#-------------------------------------------------------------------------
# Position operators
#-------------------------------------------------------------------------
class XOp(HermitianOperator):
"""1D cartesian position operator."""
@classmethod
def default_args(self):
return ("X",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _eval_commutator_PxOp(self, other):
return I*hbar
def _apply_operator_XKet(self, ket):
return ket.position*ket
def _apply_operator_PositionKet3D(self, ket):
return ket.position_x*ket
def _represent_PxKet(self, basis, **options):
index = options.pop("index", 1)
states = basis._enumerate_state(2, start_index=index)
coord1 = states[0].momentum
coord2 = states[1].momentum
d = DifferentialOperator(coord1)
delta = DiracDelta(coord1 - coord2)
return I*hbar*(d*delta)
class YOp(HermitianOperator):
""" Y cartesian coordinate operator (for 2D or 3D systems) """
@classmethod
def default_args(self):
return ("Y",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PositionKet3D(self, ket):
return ket.position_y*ket
class ZOp(HermitianOperator):
""" Z cartesian coordinate operator (for 3D systems) """
@classmethod
def default_args(self):
return ("Z",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PositionKet3D(self, ket):
return ket.position_z*ket
#-------------------------------------------------------------------------
# Momentum operators
#-------------------------------------------------------------------------
class PxOp(HermitianOperator):
"""1D cartesian momentum operator."""
@classmethod
def default_args(self):
return ("Px",)
@classmethod
def _eval_hilbert_space(self, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PxKet(self, ket):
return ket.momentum*ket
def _represent_XKet(self, basis, **options):
index = options.pop("index", 1)
states = basis._enumerate_state(2, start_index=index)
coord1 = states[0].position
coord2 = states[1].position
d = DifferentialOperator(coord1)
delta = DiracDelta(coord1 - coord2)
return -I*hbar*(d*delta)
X = XOp('X')
Y = YOp('Y')
Z = ZOp('Z')
Px = PxOp('Px')
#-------------------------------------------------------------------------
# Position eigenstates
#-------------------------------------------------------------------------
class XKet(Ket):
"""1D cartesian position eigenket."""
@classmethod
def _operators_to_state(self, op, **options):
return self.__new__(self, *_lowercase_labels(op), **options)
def _state_to_operators(self, op_class, **options):
return op_class.__new__(op_class,
*_uppercase_labels(self), **options)
@classmethod
def default_args(self):
return ("x",)
@classmethod
def dual_class(self):
return XBra
@property
def position(self):
"""The position of the state."""
return self.label[0]
def _enumerate_state(self, num_states, **options):
return _enumerate_continuous_1D(self, num_states, **options)
def _eval_innerproduct_XBra(self, bra, **hints):
return DiracDelta(self.position - bra.position)
def _eval_innerproduct_PxBra(self, bra, **hints):
return exp(-I*self.position*bra.momentum/hbar)/sqrt(2*pi*hbar)
class XBra(Bra):
"""1D cartesian position eigenbra."""
@classmethod
def default_args(self):
return ("x",)
@classmethod
def dual_class(self):
return XKet
@property
def position(self):
"""The position of the state."""
return self.label[0]
class PositionState3D(State):
""" Base class for 3D cartesian position eigenstates """
@classmethod
def _operators_to_state(self, op, **options):
return self.__new__(self, *_lowercase_labels(op), **options)
def _state_to_operators(self, op_class, **options):
return op_class.__new__(op_class,
*_uppercase_labels(self), **options)
@classmethod
def default_args(self):
return ("x", "y", "z")
@property
def position_x(self):
""" The x coordinate of the state """
return self.label[0]
@property
def position_y(self):
""" The y coordinate of the state """
return self.label[1]
@property
def position_z(self):
""" The z coordinate of the state """
return self.label[2]
class PositionKet3D(Ket, PositionState3D):
""" 3D cartesian position eigenket """
def _eval_innerproduct_PositionBra3D(self, bra, **options):
x_diff = self.position_x - bra.position_x
y_diff = self.position_y - bra.position_y
z_diff = self.position_z - bra.position_z
return DiracDelta(x_diff)*DiracDelta(y_diff)*DiracDelta(z_diff)
@classmethod
def dual_class(self):
return PositionBra3D
class PositionBra3D(Bra, PositionState3D):
""" 3D cartesian position eigenbra """
@classmethod
def dual_class(self):
return PositionKet3D
#-------------------------------------------------------------------------
# Momentum eigenstates
#-------------------------------------------------------------------------
class PxKet(Ket):
"""1D cartesian momentum eigenket."""
@classmethod
def _operators_to_state(self, op, **options):
return self.__new__(self, *_lowercase_labels(op), **options)
def _state_to_operators(self, op_class, **options):
return op_class.__new__(op_class,
*_uppercase_labels(self), **options)
@classmethod
def default_args(self):
return ("px",)
@classmethod
def dual_class(self):
return PxBra
@property
def momentum(self):
"""The momentum of the state."""
return self.label[0]
def _enumerate_state(self, *args, **options):
return _enumerate_continuous_1D(self, *args, **options)
def _eval_innerproduct_XBra(self, bra, **hints):
return exp(I*self.momentum*bra.position/hbar)/sqrt(2*pi*hbar)
def _eval_innerproduct_PxBra(self, bra, **hints):
return DiracDelta(self.momentum - bra.momentum)
class PxBra(Bra):
"""1D cartesian momentum eigenbra."""
@classmethod
def default_args(self):
return ("px",)
@classmethod
def dual_class(self):
return PxKet
@property
def momentum(self):
"""The momentum of the state."""
return self.label[0]
#-------------------------------------------------------------------------
# Global helper functions
#-------------------------------------------------------------------------
def _enumerate_continuous_1D(*args, **options):
state = args[0]
num_states = args[1]
state_class = state.__class__
index_list = options.pop('index_list', [])
if len(index_list) == 0:
start_index = options.pop('start_index', 1)
index_list = list(range(start_index, start_index + num_states))
enum_states = [0 for i in range(len(index_list))]
for i, ind in enumerate(index_list):
label = state.args[0]
enum_states[i] = state_class(str(label) + "_" + str(ind), **options)
return enum_states
def _lowercase_labels(ops):
if not isinstance(ops, set):
ops = [ops]
return [str(arg.label[0]).lower() for arg in ops]
def _uppercase_labels(ops):
if not isinstance(ops, set):
ops = [ops]
new_args = [str(arg.label[0])[0].upper() +
str(arg.label[0])[1:] for arg in ops]
return new_args
| bsd-3-clause |
pelodelfuego/word2vec-toolbox | toolbox/cpLib/test/testConcept.py | 1 | 2049 | #!/usr/bin/env python
# encoding: utf-8
import unittest
import cpLib.concept as cp
import cpLib.conceptDB as db
import numpy as np
class ConceptTest(unittest.TestCase):
def setUp(self):
self.d = db.DB('../data/voc/npy/googleNews_mini.npy')
def test_transform(self):
k = self.d.get('king')
norm = np.linalg.norm(k.vect)
k_p = k.polarVect()
k_a = k.angularVect()
for a, b in zip(np.concatenate(([norm], k_a)), k_p):
self.assertAlmostEquals(a, b, places=5)
# DISTANCE
def test_cosSim(self):
k = self.d.get('king')
q = self.d.get('queen')
self.assertAlmostEquals(cp.cosSim(k, q), cp.cosSim(q, k), places=5)
self.assertAlmostEquals(cp.cosSim(k, k), 1.0, places=5)
def test_euclDist(self):
k = self.d.get('king')
q = self.d.get('queen')
self.assertEqual(cp.euclDist(k, q), cp.euclDist(q, k))
self.assertAlmostEquals(cp.euclDist(k, k), 0.0, places=5)
def test_manaDist(self):
k = self.d.get('king')
q = self.d.get('queen')
self.assertEqual(cp.manaDist(k, q), cp.manaDist(q, k))
self.assertAlmostEquals(cp.manaDist(k, k), 0.0, places=5)
# OPERATION
def test_arith(self):
# k - m = q - w
k = self.d.get('king')
q = self.d.get('queen')
m = self.d.get('man')
w = self.d.get('woman')
v1 = cp.add(k, w)
v1 = cp.sub(v1, m)
v2 = cp.sub(k, m)
v2 = cp.add(v2, w)
v3 = cp.addSub([k, w], [m])
v4 = cp.sub(k.normalized(), m.normalized())
v4 = cp.add(v4, w.normalized())
self.assertAlmostEquals(cp.cosSim(v1, v2), 1.0, places=5)
self.assertAlmostEquals(cp.cosSim(v3, v4), 1.0, places=5)
self.assertEquals(self.d.find_cosSim(v1)[0][1], 'queen')
self.assertEquals(self.d.find_cosSim(v2)[0][1], 'queen')
self.assertEquals(self.d.find_cosSim(v3)[0][1], 'queen')
self.assertEquals(self.d.find_cosSim(v4)[0][1], 'queen')
| gpl-3.0 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/gslib/tests/test_cp.py | 11 | 93771 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for cp command."""
from __future__ import absolute_import
import base64
import binascii
import datetime
import httplib
import logging
import os
import pickle
import pkgutil
import random
import re
import string
import sys
from apitools.base.py import exceptions as apitools_exceptions
import boto
from boto import storage_uri
from boto.exception import ResumableTransferDisposition
from boto.exception import ResumableUploadException
from boto.exception import StorageResponseError
from boto.storage_uri import BucketStorageUri
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadException
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.copy_helper import GetTrackerFilePath
from gslib.copy_helper import TrackerFileType
from gslib.cs_api_map import ApiSelector
from gslib.gcs_json_api import GcsJsonApi
from gslib.hashing_helper import CalculateMd5FromContents
from gslib.storage_url import StorageUrlFromString
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import NotParallelizable
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import HAS_S3_CREDS
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import PerformsFileToObjectUpload
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.util import EIGHT_MIB
from gslib.util import IS_WINDOWS
from gslib.util import MakeHumanReadable
from gslib.util import ONE_KIB
from gslib.util import ONE_MIB
from gslib.util import Retry
from gslib.util import START_CALLBACK_PER_BYTES
from gslib.util import UTF8
# Custom test callbacks must be pickleable, and therefore at global scope.
class _HaltingCopyCallbackHandler(object):
"""Test callback handler for intentionally stopping a resumable transfer."""
def __init__(self, is_upload, halt_at_byte):
self._is_upload = is_upload
self._halt_at_byte = halt_at_byte
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if total_bytes_transferred >= self._halt_at_byte:
sys.stderr.write(
'Halting transfer after byte %s. %s/%s transferred.\r\n' % (
self._halt_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
if self._is_upload:
raise ResumableUploadException('Artifically halting upload.')
else:
raise ResumableDownloadException('Artifically halting download.')
class _JSONForceHTTPErrorCopyCallbackHandler(object):
"""Test callback handler that raises an arbitrary HTTP error exception."""
def __init__(self, startover_at_byte, http_error_num):
self._startover_at_byte = startover_at_byte
self._http_error_num = http_error_num
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte
and not self.started_over_once):
sys.stderr.write(
'Forcing HTTP error %s after byte %s. '
'%s/%s transferred.\r\n' % (
self._http_error_num,
self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise apitools_exceptions.HttpError(
{'status': self._http_error_num}, None, None)
class _XMLResumableUploadStartOverCopyCallbackHandler(object):
"""Test callback handler that raises start-over exception during upload."""
def __init__(self, startover_at_byte):
self._startover_at_byte = startover_at_byte
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte
and not self.started_over_once):
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' % (
self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise boto.exception.ResumableUploadException(
'Forcing upload start over',
ResumableTransferDisposition.START_OVER)
class _DeleteBucketThenStartOverCopyCallbackHandler(object):
"""Test callback handler that deletes bucket then raises start-over."""
def __init__(self, startover_at_byte, bucket_uri):
self._startover_at_byte = startover_at_byte
self._bucket_uri = bucket_uri
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte
and not self.started_over_once):
sys.stderr.write('Deleting bucket (%s)' %(self._bucket_uri.bucket_name))
@Retry(StorageResponseError, tries=5, timeout_secs=1)
def DeleteBucket():
bucket_list = list(self._bucket_uri.list_bucket(all_versions=True))
for k in bucket_list:
self._bucket_uri.get_bucket().delete_key(k.name,
version_id=k.version_id)
self._bucket_uri.delete_bucket()
DeleteBucket()
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' % (
self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise ResumableUploadStartOverException(
'Artificially forcing start-over')
class _RewriteHaltException(Exception):
pass
class _HaltingRewriteCallbackHandler(object):
"""Test callback handler for intentionally stopping a rewrite operation."""
def __init__(self, halt_at_byte):
self._halt_at_byte = halt_at_byte
# pylint: disable=invalid-name
def call(self, total_bytes_rewritten, unused_total_size):
"""Forcibly exits if the operation has passed the halting point."""
if total_bytes_rewritten >= self._halt_at_byte:
raise _RewriteHaltException('Artificially halting rewrite')
class _EnsureRewriteResumeCallbackHandler(object):
"""Test callback handler for ensuring a rewrite operation resumed."""
def __init__(self, required_byte):
self._required_byte = required_byte
# pylint: disable=invalid-name
def call(self, total_bytes_rewritten, unused_total_size):
"""Forcibly exits if the operation has passed the halting point."""
if total_bytes_rewritten <= self._required_byte:
raise _RewriteHaltException(
'Rewrite did not resume; %s bytes written, but %s bytes should '
'have already been written.' % (total_bytes_rewritten,
self._required_byte))
class _ResumableUploadRetryHandler(object):
"""Test callback handler for causing retries during a resumable transfer."""
def __init__(self, retry_at_byte, exception_to_raise, exc_args,
num_retries=1):
self._retry_at_byte = retry_at_byte
self._exception_to_raise = exception_to_raise
self._exception_args = exc_args
self._num_retries = num_retries
self._retries_made = 0
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, unused_total_size):
"""Cause a single retry at the retry point."""
if (total_bytes_transferred >= self._retry_at_byte
and self._retries_made < self._num_retries):
self._retries_made += 1
raise self._exception_to_raise(*self._exception_args)
class TestCp(testcase.GsUtilIntegrationTestCase):
"""Integration tests for cp command."""
# For tests that artificially halt, we need to ensure at least one callback
# occurs.
halt_size = START_CALLBACK_PER_BYTES * 2
def _get_test_file(self, name):
contents = pkgutil.get_data('gslib', 'tests/test_data/%s' % name)
return self.CreateTempFile(file_name=name, contents=contents)
@PerformsFileToObjectUpload
def test_noclobber(self):
key_uri = self.CreateObject(contents='foo')
fpath = self.CreateTempFile(contents='bar')
stderr = self.RunGsUtil(['cp', '-n', fpath, suri(key_uri)],
return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), 'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'r') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), 'bar')
def test_dest_bucket_not_exist(self):
fpath = self.CreateTempFile(contents='foo')
invalid_bucket_uri = (
'%s://%s' % (self.default_provider, self.nonexistent_bucket_name))
stderr = self.RunGsUtil(['cp', fpath, invalid_bucket_uri],
expected_status=1, return_stderr=True)
self.assertIn('does not exist.', stderr)
def test_copy_in_cloud_noclobber(self):
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents='foo')
stderr = self.RunGsUtil(['cp', suri(key_uri), suri(bucket2_uri)],
return_stderr=True)
# Rewrite API may output an additional 'Copying' progress notification.
self.assertGreaterEqual(stderr.count('Copying'), 1)
self.assertLessEqual(stderr.count('Copying'), 2)
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), suri(bucket2_uri)],
return_stderr=True)
self.assertIn('Skipping existing item: %s' %
suri(bucket2_uri, key_uri.object_name), stderr)
@PerformsFileToObjectUpload
def test_streaming(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['cp', '-', '%s' % suri(bucket_uri, 'foo')],
stdin='bar', return_stderr=True)
self.assertIn('Copying from <STDIN>', stderr)
key_uri = bucket_uri.clone_replace_name('foo')
self.assertEqual(key_uri.get_contents_as_string(), 'bar')
def test_streaming_multiple_arguments(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['cp', '-', '-', suri(bucket_uri)],
stdin='bar', return_stderr=True, expected_status=1)
self.assertIn('Multiple URL strings are not supported with streaming',
stderr)
# TODO: Implement a way to test both with and without using magic file.
@PerformsFileToObjectUpload
def test_detect_content_type(self):
"""Tests local detection of content type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['cp', self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
if IS_WINDOWS:
self.assertTrue(
re.search(r'Content-Type:\s+audio/x-mpg', stdout) or
re.search(r'Content-Type:\s+audio/mpeg', stdout))
else:
self.assertRegexpMatches(stdout, r'Content-Type:\s+audio/mpeg')
_Check1()
self.RunGsUtil(['cp', self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
_Check2()
def test_content_type_override_default(self):
"""Tests overriding content type with the default value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['-h', 'Content-Type:', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout,
r'Content-Type:\s+application/octet-stream')
_Check1()
self.RunGsUtil(['-h', 'Content-Type:', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout,
r'Content-Type:\s+application/octet-stream')
_Check2()
def test_content_type_override(self):
"""Tests overriding content type with a value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+text/plain')
_Check1()
self.RunGsUtil(['-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+text/plain')
_Check2()
@unittest.skipIf(IS_WINDOWS, 'magicfile is not available on Windows.')
@PerformsFileToObjectUpload
def test_magicfile_override(self):
"""Tests content type override with magicfile value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents='foo/bar\n')
self.RunGsUtil(['cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
use_magicfile = boto.config.getbool('GSUtil', 'use_magicfile', False)
content_type = ('text/plain' if use_magicfile
else 'application/octet-stream')
self.assertRegexpMatches(stdout, r'Content-Type:\s+%s' % content_type)
_Check1()
@PerformsFileToObjectUpload
def test_content_type_mismatches(self):
"""Tests overriding content type when it does not match the file type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents='foo/bar\n')
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
_Check1()
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
_Check2()
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
_Check3()
@PerformsFileToObjectUpload
def test_content_type_header_case_insensitive(self):
"""Tests that content type header is treated with case insensitivity."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil(['-h', 'content-Type:text/plain', 'cp',
fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+text/plain')
self.assertNotRegexpMatches(stdout, r'image/gif')
_Check1()
self.RunGsUtil(['-h', 'CONTENT-TYPE:image/gif',
'-h', 'content-type:image/gif',
'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
self.assertNotRegexpMatches(stdout, r'image/gif,\s*image/gif')
_Check2()
@PerformsFileToObjectUpload
def test_other_headers(self):
"""Tests that non-content-type headers are applied successfully on copy."""
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil(['-h', 'Cache-Control:public,max-age=12',
'-h', 'x-%s-meta-1:abcd' % self.provider_custom_meta, 'cp',
fpath, dst_uri])
stdout = self.RunGsUtil(['ls', '-L', dst_uri], return_stdout=True)
self.assertRegexpMatches(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegexpMatches(stdout, r'Metadata:\s*1:\s*abcd')
dst_uri2 = suri(bucket_uri, 'bar')
self.RunGsUtil(['cp', dst_uri, dst_uri2])
# Ensure metadata was preserved across copy.
stdout = self.RunGsUtil(['ls', '-L', dst_uri2], return_stdout=True)
self.assertRegexpMatches(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegexpMatches(stdout, r'Metadata:\s*1:\s*abcd')
@PerformsFileToObjectUpload
def test_versioning(self):
"""Tests copy with versioning."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data2')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data1')
g1 = urigen(k2_uri)
self.RunGsUtil(['cp', suri(k1_uri), suri(k2_uri)])
k2_uri = bucket_uri.clone_replace_name(k2_uri.object_name)
k2_uri = bucket_uri.clone_replace_key(k2_uri.get_key())
g2 = urigen(k2_uri)
k2_uri.set_contents_from_string('data3')
g3 = urigen(k2_uri)
fpath = self.CreateTempFile()
# Check to make sure current version is data3.
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'data3')
# Check contents of all three versions
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g1), fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'data1')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g2), fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'data2')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g3), fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'data3')
# Copy first version to current and verify.
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g1),
k2_uri.versionless_uri])
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'data1')
# Attempt to specify a version-specific URI for destination.
stderr = self.RunGsUtil(['cp', fpath, k2_uri.uri], return_stderr=True,
expected_status=1)
self.assertIn('cannot be the destination for gsutil cp', stderr)
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_recursive_copying_versioned_bucket(self):
"""Tests that cp -R with versioned buckets copies all versions in order."""
bucket1_uri = self.CreateVersionedBucket()
bucket2_uri = self.CreateVersionedBucket()
# Write two versions of an object to the bucket1.
self.CreateObject(bucket_uri=bucket1_uri, object_name='k', contents='data0')
self.CreateObject(bucket_uri=bucket1_uri, object_name='k',
contents='longer_data1')
self.AssertNObjectsInBucket(bucket1_uri, 2, versioned=True)
self.AssertNObjectsInBucket(bucket2_uri, 0, versioned=True)
# Recursively copy to second versioned bucket.
self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'), suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
self.assertEquals(len(listing2), 4)
# First object in each bucket should match in size and version-less name.
size1, _, uri_str1, _ = listing1[0].split()
self.assertEquals(size1, str(len('data0')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[0].split()
self.assertEquals(size2, str(len('data0')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
# Similarly for second object in each bucket.
size1, _, uri_str1, _ = listing1[1].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[1].split()
self.assertEquals(size2, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
_Check2()
@PerformsFileToObjectUpload
@SkipForS3('Preconditions not supported for S3.')
def test_cp_generation_zero_match(self):
"""Tests that cp handles an object-not-exists precondition header."""
bucket_uri = self.CreateBucket()
fpath1 = self.CreateTempFile(contents='data1')
# Match 0 means only write the object if it doesn't already exist.
gen_match_header = 'x-goog-if-generation-match:0'
# First copy should succeed.
# TODO: This can fail (rarely) if the server returns a 5xx but actually
# commits the bytes. If we add restarts on small uploads, handle this
# case.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(bucket_uri)])
# Second copy should fail with a precondition error.
stderr = self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True, expected_status=1)
self.assertIn('PreconditionException', stderr)
@PerformsFileToObjectUpload
@SkipForS3('Preconditions not supported for S3.')
def test_cp_v_generation_match(self):
"""Tests that cp -v option handles the if-generation-match header."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data1')
g1 = k1_uri.generation
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents='data2')
gen_match_header = 'x-goog-if-generation-match:%s' % g1
# First copy should succeed.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(k1_uri)])
# Second copy should fail the precondition.
stderr = self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1,
suri(k1_uri)],
return_stderr=True, expected_status=1)
self.assertIn('PreconditionException', stderr)
# Specifiying a generation with -n should fail before the request hits the
# server.
stderr = self.RunGsUtil(['-h', gen_match_header, 'cp', '-n', fpath1,
suri(k1_uri)],
return_stderr=True, expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn('Specifying x-goog-if-generation-match is not supported '
'with cp -n', stderr)
@PerformsFileToObjectUpload
def test_cp_nv(self):
"""Tests that cp -nv works when skipping existing file."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data1')
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents='data2')
# First copy should succeed.
self.RunGsUtil(['cp', '-nv', fpath1, suri(k1_uri)])
# Second copy should skip copying.
stderr = self.RunGsUtil(['cp', '-nv', fpath1, suri(k1_uri)],
return_stderr=True)
self.assertIn('Skipping existing item:', stderr)
@PerformsFileToObjectUpload
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_cp_v_option(self):
""""Tests that cp -v returns the created object's version-specific URI."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data1')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents='data2')
# Case 1: Upload file to object using one-shot PUT.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents='data1')
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 2: Upload file to object using resumable upload.
size_threshold = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(size_threshold))
with SetBotoConfigForTest([boto_config_for_test]):
file_as_string = os.urandom(size_threshold)
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=file_as_string)
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 3: Upload stream to object.
self._run_cp_minus_v_test('-v', '-', k2_uri.uri)
# Case 4: Download object to file. For this case we just expect output of
# gsutil cp -v to be the URI of the file.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir)
dst_uri = storage_uri(fpath1)
stderr = self.RunGsUtil(['cp', '-v', suri(k1_uri), suri(dst_uri)],
return_stderr=True)
self.assertIn('Created: %s' % dst_uri.uri, stderr.split('\n')[-2])
# Case 5: Daisy-chain from object to object.
self._run_cp_minus_v_test('-Dv', k1_uri.uri, k2_uri.uri)
# Case 6: Copy object to object in-the-cloud.
self._run_cp_minus_v_test('-v', k1_uri.uri, k2_uri.uri)
def _run_cp_minus_v_test(self, opt, src_str, dst_str):
"""Runs cp -v with the options and validates the results."""
stderr = self.RunGsUtil(['cp', opt, src_str, dst_str], return_stderr=True)
match = re.search(r'Created: (.*)\n', stderr)
self.assertIsNotNone(match)
created_uri = match.group(1)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-a', dst_str], return_stdout=True)
lines = stdout.split('\n')
# Final (most recent) object should match the "Created:" URI. This is
# in second-to-last line (last line is '\n').
self.assertGreater(len(lines), 2)
self.assertEqual(created_uri, lines[-2])
_Check1()
@PerformsFileToObjectUpload
def test_stdin_args(self):
"""Tests cp with the -I option."""
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents='data1')
fpath2 = self.CreateTempFile(tmpdir=tmpdir, contents='data2')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '-I', suri(bucket_uri)],
stdin='\n'.join((fpath1, fpath2)))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath1), stdout)
self.assertIn(os.path.basename(fpath2), stdout)
self.assertNumLines(stdout, 2)
_Check1()
def test_cross_storage_class_cloud_cp(self):
bucket1_uri = self.CreateBucket(storage_class='STANDARD')
bucket2_uri = self.CreateBucket(
storage_class='DURABLE_REDUCED_AVAILABILITY')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents='foo')
# Server now allows copy-in-the-cloud across storage classes.
self.RunGsUtil(['cp', suri(key_uri), suri(bucket2_uri)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_cross_provider_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket, contents='foo')
gs_key = self.CreateObject(bucket_uri=gs_bucket, contents='bar')
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
@unittest.skip('This test performs a large copy but remains here for '
'debugging purposes.')
def test_cross_provider_large_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket, contents='f'*1024*1024)
gs_key = self.CreateObject(bucket_uri=gs_bucket, contents='b'*1024*1024)
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
with SetBotoConfigForTest([
('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'json_resumable_chunk_size', str(ONE_KIB * 256))]):
# Ensure copy also works across json upload chunk boundaries.
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
@unittest.skip('This test is slow due to creating many objects, '
'but remains here for debugging purposes.')
def test_daisy_chain_cp_file_sizes(self):
"""Ensure daisy chain cp works with a wide of file sizes."""
bucket_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
exponent_cap = 28 # Up to 256 MiB in size.
for i in range(exponent_cap):
one_byte_smaller = 2**i - 1
normal = 2**i
one_byte_larger = 2**i + 1
self.CreateObject(bucket_uri=bucket_uri, contents='a'*one_byte_smaller)
self.CreateObject(bucket_uri=bucket_uri, contents='b'*normal)
self.CreateObject(bucket_uri=bucket_uri, contents='c'*one_byte_larger)
self.AssertNObjectsInBucket(bucket_uri, exponent_cap*3)
self.RunGsUtil(['-m', 'cp', '-D', suri(bucket_uri, '**'),
suri(bucket2_uri)])
self.AssertNObjectsInBucket(bucket2_uri, exponent_cap*3)
def test_daisy_chain_cp(self):
"""Tests cp with the -D option."""
bucket1_uri = self.CreateBucket(storage_class='STANDARD')
bucket2_uri = self.CreateBucket(
storage_class='DURABLE_REDUCED_AVAILABILITY')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents='foo')
# Set some headers on source object so we can verify that headers are
# presereved by daisy-chain copy.
self.RunGsUtil(['setmeta', '-h', 'Cache-Control:public,max-age=12',
'-h', 'Content-Type:image/gif',
'-h', 'x-%s-meta-1:abcd' % self.provider_custom_meta,
suri(key_uri)])
# Set public-read (non-default) ACL so we can verify that cp -D -p works.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
acl_json = self.RunGsUtil(['acl', 'get', suri(key_uri)], return_stdout=True)
# Perform daisy-chain copy and verify that source object headers and ACL
# were preserved. Also specify -n option to test that gsutil correctly
# removes the x-goog-if-generation-match:0 header that was set at uploading
# time when updating the ACL.
stderr = self.RunGsUtil(['cp', '-Dpn', suri(key_uri), suri(bucket2_uri)],
return_stderr=True)
self.assertNotIn('Copy-in-the-cloud disallowed', stderr)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
stdout = self.RunGsUtil(['ls', '-L', uri], return_stdout=True)
self.assertRegexpMatches(stdout, r'Cache-Control:\s+public,max-age=12')
self.assertRegexpMatches(stdout, r'Content-Type:\s+image/gif')
self.assertRegexpMatches(stdout, r'Metadata:\s+1:\s+abcd')
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(acl_json, new_acl_json)
_Check()
def test_daisy_chain_cp_download_failure(self):
"""Tests cp with the -D option when the download thread dies."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri,
contents='a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
'-D', suri(key_uri), suri(bucket2_uri)],
expected_status=1, return_stderr=True)
# Should have two exception traces; one from the download thread and
# one from the upload thread.
self.assertEqual(stderr.count(
'ResumableDownloadException: Artifically halting download'), 2)
def test_canned_acl_cp(self):
"""Tests copying with a canned ACL."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents='foo')
self.RunGsUtil(['cp', '-a', 'public-read', suri(key_uri),
suri(bucket2_uri)])
# Set public-read on the original key after the copy so we can compare
# the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
_Check()
@PerformsFileToObjectUpload
def test_canned_acl_upload(self):
"""Tests uploading a file with a canned ACL."""
bucket1_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents='foo')
# Set public-read on the object so we can compare the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
file_name = 'bar'
fpath = self.CreateTempFile(file_name=file_name, contents='foo')
self.RunGsUtil(['cp', '-a', 'public-read', fpath, suri(bucket1_uri)])
new_acl_json = self.RunGsUtil(['acl', 'get', suri(bucket1_uri, file_name)],
return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
resumable_size = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(resumable_size))
with SetBotoConfigForTest([boto_config_for_test]):
resumable_file_name = 'resumable_bar'
resumable_contents = os.urandom(resumable_size)
resumable_fpath = self.CreateTempFile(
file_name=resumable_file_name, contents=resumable_contents)
self.RunGsUtil(['cp', '-a', 'public-read', resumable_fpath,
suri(bucket1_uri)])
new_resumable_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, resumable_file_name)],
return_stdout=True)
self.assertEqual(public_read_acl, new_resumable_acl_json)
def test_cp_key_to_local_stream(self):
bucket_uri = self.CreateBucket()
contents = 'foo'
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
stdout = self.RunGsUtil(['cp', suri(key_uri), '-'], return_stdout=True)
self.assertIn(contents, stdout)
def test_cp_local_file_to_local_stream(self):
contents = 'content'
fpath = self.CreateTempFile(contents=contents)
stdout = self.RunGsUtil(['cp', fpath, '-'], return_stdout=True)
self.assertIn(contents, stdout)
@PerformsFileToObjectUpload
def test_cp_zero_byte_file(self):
dst_bucket_uri = self.CreateBucket()
src_dir = self.CreateTempDir()
fpath = os.path.join(src_dir, 'zero_byte')
with open(fpath, 'w') as unused_out_file:
pass # Write a zero byte file
self.RunGsUtil(['cp', fpath, suri(dst_bucket_uri)])
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(dst_bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath), stdout)
_Check1()
download_path = os.path.join(src_dir, 'zero_byte_download')
self.RunGsUtil(['cp', suri(dst_bucket_uri, 'zero_byte'), download_path])
self.assertTrue(os.stat(download_path))
def test_copy_bucket_to_bucket(self):
"""Tests that recursively copying from bucket to bucket.
This should produce identically named objects (and not, in particular,
destination objects named by the version-specific URI from source objects).
"""
src_bucket_uri = self.CreateVersionedBucket()
dst_bucket_uri = self.CreateVersionedBucket()
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj0',
contents='abc')
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj1',
contents='def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri),
suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn('%s%s/obj0\n' % (dst_bucket_uri,
src_bucket_uri.bucket_name), stdout)
self.assertIn('%s%s/obj1\n' % (dst_bucket_uri,
src_bucket_uri.bucket_name), stdout)
_CopyAndCheck()
def test_copy_bucket_to_dir(self):
"""Tests recursively copying from bucket to a directory.
This should produce identically named objects (and not, in particular,
destination objects named by the version- specific URI from source objects).
"""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj0',
contents='abc')
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj1',
contents='def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name,
'obj0'), dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name,
'obj1'), dir_list[1])
_CopyAndCheck()
def test_recursive_download_with_leftover_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj0',
contents='abc')
self.CreateObject(bucket_uri=src_bucket_uri, object_name='obj1',
contents='def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = src_bucket_uri.clone_replace_name('/')
key_uri.set_contents_from_string('')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
stderr = self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir],
return_stderr=True)
self.assertIn('Skipping cloud sub-directory placeholder object', stderr)
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name,
'obj0'), dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name,
'obj1'), dir_list[1])
def test_copy_quiet(self):
bucket_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
stderr = self.RunGsUtil(['-q', 'cp', suri(key_uri),
suri(bucket_uri.clone_replace_name('o2'))],
return_stderr=True)
self.assertEqual(stderr.count('Copying '), 0)
def test_cp_md5_match(self):
"""Tests that the uploaded object has the expected MD5.
Note that while this does perform a file to object upload, MD5's are
not supported for composite objects so we don't use the decorator in this
case.
"""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='bar')
with open(fpath, 'r') as f_in:
file_md5 = base64.encodestring(binascii.unhexlify(
CalculateMd5FromContents(f_in))).rstrip('\n')
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertRegexpMatches(stdout,
r'Hash\s+\(md5\):\s+%s' % re.escape(file_md5))
_Check1()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
@PerformsFileToObjectUpload
def test_cp_manifest_upload_unicode(self):
return self._ManifestUpload('foo-unicöde', 'bar-unicöde',
'manifest-unicöde')
@PerformsFileToObjectUpload
def test_cp_manifest_upload(self):
"""Tests uploading with a mnifest file."""
return self._ManifestUpload('foo', 'bar', 'manifest')
def _ManifestUpload(self, file_name, object_name, manifest_name):
"""Tests uploading with a manifest file."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, object_name)
fpath = self.CreateTempFile(file_name=file_name, contents='bar')
logpath = self.CreateTempFile(file_name=manifest_name, contents='')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(['cp', '-L', logpath, fpath, dsturi])
with open(logpath, 'r') as f:
lines = f.readlines()
self.assertEqual(len(lines), 2)
expected_headers = ['Source', 'Destination', 'Start', 'End', 'Md5',
'UploadId', 'Source Size', 'Bytes Transferred',
'Result', 'Description']
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
self.assertEqual(results[0][:7], 'file://') # source
self.assertEqual(results[1][:5], '%s://' %
self.default_provider) # destination
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results[2], date_format)
end_date = datetime.datetime.strptime(results[3], date_format)
self.assertEqual(end_date > start_date, True)
if self.RunGsUtil == testcase.GsUtilIntegrationTestCase.RunGsUtil:
# Check that we didn't do automatic parallel uploads - compose doesn't
# calculate the MD5 hash. Since RunGsUtil is overriden in
# TestCpParallelUploads to force parallel uploads, we can check which
# method was used.
self.assertEqual(results[4], 'rL0Y20zC+Fzt72VPzMSk2A==') # md5
self.assertEqual(int(results[6]), 3) # Source Size
self.assertEqual(int(results[7]), 3) # Bytes Transferred
self.assertEqual(results[8], 'OK') # Result
@PerformsFileToObjectUpload
def test_cp_manifest_download(self):
"""Tests downloading with a manifest file."""
key_uri = self.CreateObject(contents='foo')
fpath = self.CreateTempFile(contents='')
logpath = self.CreateTempFile(contents='')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(['cp', '-L', logpath, suri(key_uri), fpath],
return_stdout=True)
with open(logpath, 'r') as f:
lines = f.readlines()
self.assertEqual(len(lines), 2)
expected_headers = ['Source', 'Destination', 'Start', 'End', 'Md5',
'UploadId', 'Source Size', 'Bytes Transferred',
'Result', 'Description']
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
self.assertEqual(results[0][:5], '%s://' %
self.default_provider) # source
self.assertEqual(results[1][:7], 'file://') # destination
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results[2], date_format)
end_date = datetime.datetime.strptime(results[3], date_format)
self.assertEqual(end_date > start_date, True)
self.assertEqual(results[4], 'rL0Y20zC+Fzt72VPzMSk2A==') # md5
self.assertEqual(int(results[6]), 3) # Source Size
# Bytes transferred might be more than 3 if the file was gzipped, since
# the minimum gzip header is 10 bytes.
self.assertGreaterEqual(int(results[7]), 3) # Bytes Transferred
self.assertEqual(results[8], 'OK') # Result
@PerformsFileToObjectUpload
def test_copy_unicode_non_ascii_filename(self):
key_uri = self.CreateObject(contents='foo')
# Make file large enough to cause a resumable upload (which hashes filename
# to construct tracker filename).
fpath = self.CreateTempFile(file_name=u'Аудиоархив',
contents='x' * 3 * 1024 * 1024)
fpath_bytes = fpath.encode(UTF8)
stderr = self.RunGsUtil(['cp', fpath_bytes, suri(key_uri)],
return_stderr=True)
self.assertIn('Copying file:', stderr)
# Note: We originally one time implemented a test
# (test_copy_invalid_unicode_filename) that invalid unicode filenames were
# skipped, but it turns out os.walk() on MacOS doesn't have problems with
# such files (so, failed that test). Given that, we decided to remove the
# test.
def test_gzip_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = 'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
# Test that copying specifying only 2 of the 3 prefixes gzips the correct
# files, and test that including whitespace in the extension list works.
self.RunGsUtil(['cp', '-z', 'js, html',
os.path.join(tmpdir, 'test.*'), suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, 3)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegexpMatches(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertNotRegexpMatches(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'r') as f:
self.assertEqual(f.read(), contents)
def test_upload_with_subdir_and_unexpanded_wildcard(self):
fpath1 = self.CreateTempFile(file_name=('tmp', 'x', 'y', 'z'))
bucket_uri = self.CreateBucket()
wildcard_uri = '%s*' % fpath1[:-5]
stderr = self.RunGsUtil(['cp', '-R', wildcard_uri, suri(bucket_uri)],
return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
def test_cp_object_ending_with_slash(self):
"""Tests that cp works with object names ending with slash."""
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/',
contents='dir')
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/def',
contents='def')
self.AssertNObjectsInBucket(bucket_uri, 2)
self.RunGsUtil(['cp', '-R', suri(bucket_uri), tmpdir])
# Check that files in the subdir got copied even though subdir object
# download was skipped.
with open(os.path.join(tmpdir, bucket_uri.bucket_name, 'abc', 'def')) as f:
self.assertEquals('def', '\n'.join(f.readlines()))
def test_cp_without_read_access(self):
"""Tests that cp fails without read access to the object."""
# TODO: With 401's triggering retries in apitools, this test will take
# a long time. Ideally, make apitools accept a num_retries config for this
# until we stop retrying the 401's.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
# Use @Retry as hedge against bucket listing eventual consistency.
self.AssertNObjectsInBucket(bucket_uri, 1)
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(['cp', suri(object_uri), 'foo'],
return_stderr=True, expected_status=1)
self.assertIn('AccessDenied', stderr)
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_e(self):
fpath_dir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=fpath_dir)
fpath2 = os.path.join(fpath_dir, 'cp_minus_e')
bucket_uri = self.CreateBucket()
os.symlink(fpath1, fpath2)
stderr = self.RunGsUtil(
['cp', '-e', '%s%s*' % (fpath_dir, os.path.sep),
suri(bucket_uri, 'files')],
return_stderr=True)
self.assertIn('Copying file', stderr)
self.assertIn('Skipping symbolic link file', stderr)
def test_cp_multithreaded_wildcard(self):
"""Tests that cp -m works with a wildcard."""
num_test_files = 5
tmp_dir = self.CreateTempDir(test_files=num_test_files)
bucket_uri = self.CreateBucket()
wildcard_uri = '%s%s*' % (tmp_dir, os.sep)
self.RunGsUtil(['-m', 'cp', wildcard_uri, suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, num_test_files)
def test_cp_duplicate_source_args(self):
"""Tests that cp -m works when a source argument is provided twice."""
object_contents = 'edge'
object_uri = self.CreateObject(object_name='foo', contents=object_contents)
tmp_dir = self.CreateTempDir()
self.RunGsUtil(['-m', 'cp', suri(object_uri), suri(object_uri), tmp_dir])
with open(os.path.join(tmp_dir, 'foo'), 'r') as in_fp:
contents = in_fp.read()
# Contents should be not duplicated.
self.assertEqual(contents, object_contents)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break(self):
"""Tests that an upload can be resumed after a connection break."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_retry(self):
"""Tests that a resumable upload completes with one retry."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='a' * self.halt_size)
# TODO: Raising an httplib or socket error blocks bucket teardown
# in JSON for 60-120s on a multiprocessing lock acquire. Figure out why;
# until then, raise an apitools retryable exception.
if self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_ResumableUploadRetryHandler(
5, httplib.BadStatusLine, ('unused',))))
else:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_ResumableUploadRetryHandler(
5, apitools_exceptions.BadStatusCodeError,
('unused', 'unused', 'unused'))))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', '--testcallbackfile',
test_callback_file, fpath, suri(bucket_uri)],
return_stderr=1)
if self.test_api == ApiSelector.XML:
self.assertIn('Got retryable failure', stderr)
else:
self.assertIn('Retrying', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_streaming_upload_retry(self):
"""Tests that a streaming resumable upload completes with one retry."""
if self.test_api == ApiSelector.XML:
return unittest.skip('XML does not support resumable streaming uploads.')
bucket_uri = self.CreateBucket()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_ResumableUploadRetryHandler(
5, apitools_exceptions.BadStatusCodeError,
('unused', 'unused', 'unused'))))
# Need to reduce the JSON chunk size since streaming uploads buffer a
# full chunk.
boto_configs_for_test = [('GSUtil', 'json_resumable_chunk_size',
str(256 * ONE_KIB)),
('Boto', 'num_retries', '2')]
with SetBotoConfigForTest(boto_configs_for_test):
stderr = self.RunGsUtil(
['-D', 'cp', '--testcallbackfile', test_callback_file, '-',
suri(bucket_uri, 'foo')],
stdin='a' * 512 * ONE_KIB, return_stderr=1)
self.assertIn('Retrying', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload(self):
"""Tests that a basic resumable upload completes successfully."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
@SkipForS3('No resumable upload support for S3.')
def test_resumable_upload_break_leaves_tracker(self):
"""Tests that a tracker file is created with a resumable upload."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='foo',
contents='a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')),
TrackerFileType.UPLOAD, self.test_api)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(True, 5)))
try:
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri, 'foo')],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
self.assertTrue(os.path.exists(tracker_filename),
'Tracker file %s not present.' % tracker_filename)
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_size_change(self):
"""Tests a resumable upload where the uploaded file changes size.
This should fail when we read the tracker data.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(True, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='a' * self.halt_size * 2)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_content_change(self):
"""Tests a resumable upload where the uploaded file changes content."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML doesn\'t make separate HTTP calls at fixed-size boundaries for '
'resumable uploads, so we can\'t guarantee that the server saves a '
'specific part of the upload.')
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='a' * ONE_KIB * 512)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 384)))
resumable_threshold_for_test = (
'GSUtil', 'resumable_threshold', str(ONE_KIB))
resumable_chunk_size_for_test = (
'GSUtil', 'json_resumable_chunk_size', str(ONE_KIB * 256))
with SetBotoConfigForTest([resumable_threshold_for_test,
resumable_chunk_size_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='b' * ONE_KIB * 512)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_smaller_size(self):
"""Tests a resumable upload where the uploaded file changes content.
This should fail hash validation.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='a' * ONE_KIB * 512)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 384)))
resumable_threshold_for_test = (
'GSUtil', 'resumable_threshold', str(ONE_KIB))
resumable_chunk_size_for_test = (
'GSUtil', 'json_resumable_chunk_size', str(ONE_KIB * 256))
with SetBotoConfigForTest([resumable_threshold_for_test,
resumable_chunk_size_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo', tmpdir=tmp_dir,
contents='a' * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@SkipForS3('No resumable upload support for S3.')
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@PerformsFileToObjectUpload
def test_cp_unwritable_tracker_file(self):
"""Tests a resumable upload with an unwritable tracker file."""
bucket_uri = self.CreateBucket()
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')),
TrackerFileType.UPLOAD, self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile(file_name='foo', contents='a' * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1, return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
def test_cp_unwritable_tracker_file_download(self):
"""Tests downloads with an unwritable tracker file."""
object_uri = self.CreateObject(contents='foo' * ONE_KIB)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(object_uri)),
TrackerFileType.DOWNLOAD, self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile()
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(EIGHT_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
# Should succeed because we are below the threshold.
self.RunGsUtil(['cp', suri(object_uri), fpath])
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1, return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_break(self):
"""Tests that a download can be resumed after a connection break."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='a' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(fpath), TrackerFileType.DOWNLOAD, self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
def test_cp_resumable_download_etag_differs(self):
"""Tests that download restarts the file when the source object changes.
This causes the etag not to match.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='a' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
# This will create a tracker file with an ETag.
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
# Create a new object with different contents - it should have a
# different ETag since the content has changed.
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='b' * self.halt_size)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
def test_cp_resumable_download_file_larger(self):
"""Tests download deletes the tracker file when existing file is larger."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath],
expected_status=1, return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
with open(fpath, 'w') as larger_file:
for _ in range(self.halt_size * 2):
larger_file.write('a')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1, return_stderr=True)
self.assertNotIn('Resuming download', stderr)
self.assertIn('is larger', stderr)
self.assertIn('Deleting tracker file', stderr)
def test_cp_resumable_download_content_differs(self):
"""Tests that we do not re-download when tracker file matches existing file.
We only compare size, not contents, so re-download should not occur even
though the contents are technically different. However, hash validation on
the file should still occur and we will delete the file then because
the hashes differ.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents='abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(fpath), TrackerFileType.DOWNLOAD, self.test_api)
try:
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True, expected_status=1)
self.assertIn('Download already complete for file', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
# File and tracker file should be deleted.
self.assertFalse(os.path.isfile(fpath))
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_content_matches(self):
"""Tests download no-ops when tracker file matches existing file."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
matching_contents = 'abcd' * ONE_KIB
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents=matching_contents)
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents=matching_contents)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(fpath), TrackerFileType.DOWNLOAD, self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Download already complete for file', stderr)
# Tracker file should be removed after successful hash validation.
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_tracker_file_not_matches(self):
"""Tests that download overwrites when tracker file etag does not match."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents='abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match regex for exactly one object ETag')
etag = etag_match.group(1)
etag += 'nonmatching'
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(fpath), TrackerFileType.DOWNLOAD, self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Ensure the file was overwritten.
with open(fpath, 'r') as in_fp:
contents = in_fp.read()
self.assertEqual(contents, 'efgh' * ONE_KIB,
'File not overwritten when it should have been '
'due to a non-matching tracker file.')
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_gzip(self):
"""Tests that download can be resumed successfully with a gzipped file."""
# Generate some reasonably incompressible data. This compresses to a bit
# around 128K in practice, but we assert specifically below that it is
# larger than self.halt_size to guarantee that we can halt the download
# partway through.
object_uri = self.CreateObject()
random.seed(0)
contents = str([random.choice(string.ascii_letters)
for _ in xrange(ONE_KIB * 128)])
random.seed() # Reset the seed for any other tests.
fpath1 = self.CreateTempFile(file_name='unzipped.txt', contents=contents)
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath1), suri(object_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _GetObjectSize():
stdout = self.RunGsUtil(['du', suri(object_uri)], return_stdout=True)
size_match = re.search(r'(\d+)\s+.*', stdout)
self.assertIsNotNone(size_match, 'Could not get object size')
self.assertEqual(len(size_match.groups()), 1,
'Did not match regex for exactly one object size.')
return long(size_match.group(1))
object_size = _GetObjectSize()
self.assertGreaterEqual(object_size, self.halt_size,
'Compresed object size was not large enough to '
'allow for a halted download, so the test results '
'would be invalid. Please increase the compressed '
'object size in the test.')
fpath2 = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
suri(object_uri), suri(fpath2)],
return_stderr=True, expected_status=1)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(fpath2), TrackerFileType.DOWNLOAD, self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
self.assertIn('Downloading to temp gzip filename', stderr)
# We should have a temporary gzipped file, a tracker file, and no
# final file yet.
self.assertTrue(os.path.isfile('%s_.gztmp' % fpath2))
stderr = self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)],
return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath2, 'r') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
self.assertFalse(os.path.isfile(tracker_filename))
self.assertFalse(os.path.isfile('%s_.gztmp' % fpath2))
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_bucket_deleted(self):
"""Tests that a not found exception is raised if bucket no longer exists."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='a' * 2 * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(
_DeleteBucketThenStartOverCopyCallbackHandler(5, bucket_uri)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)], return_stderr=True,
expected_status=1)
self.assertIn('Deleting bucket', stderr)
self.assertIn('bucket does not exist', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_start_over_http_error(self):
for start_over_error in (404, 410):
self.start_over_error_test_helper(start_over_error)
def start_over_error_test_helper(self, http_error_num):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='a' * 2 * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
if self.test_api == ApiSelector.JSON:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_JSONForceHTTPErrorCopyCallbackHandler(5, 404)))
elif self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(
_XMLResumableUploadStartOverCopyCallbackHandler(5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', '--testcallbackfile', test_callback_file,
fpath, suri(bucket_uri)], return_stderr=True)
self.assertIn('Restarting upload from scratch', stderr)
def test_cp_minus_c(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='foo')
self.RunGsUtil(
['cp', '-c', suri(bucket_uri) + '/foo2', suri(object_uri),
suri(bucket_uri) + '/dir/'],
expected_status=1)
self.RunGsUtil(['stat', '%s/dir/foo' % suri(bucket_uri)])
def test_rewrite_cp(self):
"""Tests the JSON Rewrite API."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='bar')
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(
name=key.name, bucket=key.bucket.name, contentType=key.content_type)
dst_obj_metadata = apitools_messages.Object(
bucket=src_obj_metadata.bucket,
name=self.MakeTempName('object'),
contentType=src_obj_metadata.contentType)
gsutil_api.CopyObject(src_obj_metadata, dst_obj_metadata)
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
def test_rewrite_cp_resume(self):
"""Tests the JSON Rewrite API, breaking and resuming via a tracker file."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='DURABLE_REDUCED_AVAILABILITY')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents=('12'*ONE_MIB) + 'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(
name=key.name, bucket=key.bucket.name, contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(
src_obj_metadata.bucket, src_obj_metadata.name,
dst_obj_metadata.bucket, dst_obj_metadata.name, self.test_api)
try:
try:
gsutil_api.CopyObject(
src_obj_metadata, dst_obj_metadata,
progress_callback=_HaltingRewriteCallbackHandler(ONE_MIB*2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected _RewriteHaltException.')
except _RewriteHaltException:
pass
# Tracker file should be left over.
self.assertTrue(os.path.exists(tracker_file_name))
# Now resume. Callback ensures we didn't start over.
gsutil_api.CopyObject(
src_obj_metadata, dst_obj_metadata,
progress_callback=_EnsureRewriteResumeCallbackHandler(ONE_MIB*2).call,
max_bytes_per_call=ONE_MIB)
# Copy completed; tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_source_changed(self):
"""Tests that Rewrite starts over when the source object has changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='DURABLE_REDUCED_AVAILABILITY')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents=('12'*ONE_MIB) + 'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(
name=key.name, bucket=key.bucket.name, contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(
src_obj_metadata.bucket, src_obj_metadata.name,
dst_obj_metadata.bucket, dst_obj_metadata.name, self.test_api)
try:
try:
gsutil_api.CopyObject(
src_obj_metadata, dst_obj_metadata,
progress_callback=_HaltingRewriteCallbackHandler(ONE_MIB*2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected _RewriteHaltException.')
except _RewriteHaltException:
pass
# Overwrite the original object.
object_uri2 = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='bar', prefer_json_api=True)
key2 = object_uri2.get_key()
src_obj_metadata2 = apitools_messages.Object(
name=key2.name, bucket=key2.bucket.name,
contentType=key2.content_type, etag=key2.etag.strip('"\''))
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the new object.
gsutil_api.CopyObject(src_obj_metadata2, dst_obj_metadata,
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata2.bucket,
src_obj_metadata2.name,
fields=['md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_command_changed(self):
"""Tests that Rewrite starts over when the arguments changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='DURABLE_REDUCED_AVAILABILITY')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents=('12'*ONE_MIB) + 'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(
name=key.name, bucket=key.bucket.name, contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(
src_obj_metadata.bucket, src_obj_metadata.name,
dst_obj_metadata.bucket, dst_obj_metadata.name, self.test_api)
try:
try:
gsutil_api.CopyObject(
src_obj_metadata, dst_obj_metadata, canned_acl='private',
progress_callback=_HaltingRewriteCallbackHandler(ONE_MIB*2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected _RewriteHaltException.')
except _RewriteHaltException:
pass
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the same object but with different call parameters.
gsutil_api.CopyObject(src_obj_metadata, dst_obj_metadata,
canned_acl='public-read',
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
new_obj_metadata = gsutil_api.GetObjectMetadata(
dst_obj_metadata.bucket, dst_obj_metadata.name,
fields=['acl,md5Hash'])
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['md5Hash']).md5Hash,
new_obj_metadata.md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
# New object should have a public-read ACL from the second command.
found_public_acl = False
for acl_entry in new_obj_metadata.acl:
if acl_entry.entity == 'allUsers':
found_public_acl = True
self.assertTrue(found_public_acl,
'New object was not written with a public ACL.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
class TestCpUnitTests(testcase.GsUtilUnitTestCase):
"""Unit tests for gsutil cp."""
def testDownloadWithNoHashAvailable(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents='foo')
object_uri.get_key().etag = '12345' # Not an MD5
dst_dir = self.CreateTempDir()
log_handler = self.RunCommand(
'cp', [suri(object_uri), dst_dir], return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(2, len(warning_messages))
self.assertRegexpMatches(
warning_messages[0],
r'Non-MD5 etag \(12345\) present for key .*, '
r'data integrity checks are not possible')
self.assertIn('Integrity cannot be assured', warning_messages[1])
def test_object_and_prefix_same_name(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo',
contents='foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar', contents='bar')
fpath = self.CreateTempFile()
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
self.RunCommand('cp', [suri(object_uri), fpath])
with open(fpath, 'r') as f:
self.assertEqual(f.read(), 'foo')
def test_cp_upload_respects_no_hashes(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents='abcd')
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
log_handler = self.RunCommand('cp', [fpath, suri(bucket_uri)],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(1, len(warning_messages))
self.assertIn('Found no hashes to validate object upload',
warning_messages[0])
| bsd-3-clause |
sestrella/ansible | lib/ansible/modules/network/f5/bigip_firewall_log_profile.py | 23 | 28678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_log_profile
short_description: Manages AFM logging profiles configured in the system
description:
- Manages AFM logging profiles configured in the system along with basic information about each profile.
version_added: 2.9
options:
name:
description:
- Specifies the name of the log profile.
type: str
required: True
description:
description:
- Description of the log profile.
type: str
dos_protection:
description:
- Configures DoS related settings of the log profile.
suboptions:
dns_publisher:
description:
- Specifies the name of the log publisher used for DNS DoS events.
- To specify the log_publisher on a different partition from the AFM log profile, specify the name in fullpath
format, e.g. C(/Foobar/log-publisher), otherwise the partition for log publisher
is inferred from C(partition) module parameter.
type: str
sip_publisher:
description:
- Specifies the name of the log publisher used for SIP DoS events.
- To specify the log_publisher on a different partition from the AFM log profile, specify the name in fullpath
format, e.g. C(/Foobar/log-publisher), otherwise the partition for log publisher
is inferred from C(partition) module parameter.
type: str
network_publisher:
description:
- Specifies the name of the log publisher used for DoS Network events.
- To specify the log_publisher on a different partition from the AFM log profile, specify the name in fullpath
format, e.g. C(/Foobar/log-publisher), otherwise the partition for log publisher
is inferred from C(partition) module parameter.
type: str
type: dict
ip_intelligence:
description:
- Configures IP Intelligence related settings of the log profile.
suboptions:
log_publisher:
description:
- Specifies the name of the log publisher used for IP Intelligence events.
- To specify the log_publisher on a different partition from the AFM log profile, specify the name in fullpath
format, e.g. C(/Foobar/log-publisher), otherwise the partition for log publisher
is inferred from C(partition) module parameter.
type: str
rate_limit:
description:
- Defines a rate limit for all combined IP intelligence log messages per second. Beyond this rate limit,
log messages are not logged until the threshold drops below the specified rate.
- To specify an indefinite rate, use the value C(indefinite).
- If specifying a numeric rate, the value must be between C(1) and C(4294967295).
type: str
log_rtbh:
description:
- Specifies, when C(yes), that remotely triggered blackholing events are logged.
type: bool
log_shun:
description:
- Specifies, when C(yes), that IP Intelligence shun list events are logged.
- This option can only be set on C(global-network) built-in profile
type: bool
log_translation_fields:
description:
- This option is used to enable or disable the logging of translated (i.e server side) fields in IP
Intelligence log messages.
- Translated fields include (but are not limited to) source address/port, destination address/port,
IP protocol, route domain, and VLAN.
type: bool
type: dict
port_misuse:
description:
- Port Misuse log configuration.
suboptions:
log_publisher:
description:
- Specifies the name of the log publisher used for Port Misuse events.
- To specify the log_publisher on a different partition from the AFM log profile, specify the name in fullpath
format, e.g. C(/Foobar/log-publisher), otherwise the partition for log publisher
is inferred from C(partition) module parameter.
type: str
rate_limit:
description:
- Defines a rate limit for all combined port misuse log messages per second. Beyond this rate limit,
log messages are not logged until the threshold drops below the specified rate.
- To specify an indefinite rate, use the value C(indefinite).
- If specifying a numeric rate, the value must be between C(1) and C(4294967295).
type: str
type: dict
partition:
description:
- Device partition to create log profile on.
- Parameter also used when specifying names for log publishers, unless log publisher names are in fullpath format.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures the resource exists.
- When C(state) is C(absent), ensures that resource is removed. Attempts to remove built-in system profiles are
ignored and no change is returned.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a basic log profile with port misuse
bigip_firewall_log_profile:
name: barbaz
port_misuse:
rate_limit: 30000
log_publisher: local-db-pub
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Change ip_intelligence settings, publisher on different partition, remove port misuse
bigip_firewall_log_profile:
name: barbaz
ip_intelligence:
rate_limit: 400000
log_translation_fields: yes
log_rtbh: yes
log_publisher: "/foobar/non-local-db"
port_misuse:
log_publisher: ""
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create a log profile with dos protection, different partition
bigip_firewall_log_profile:
name: foobar
partition: foobar
dos_protection:
dns_publisher: "/Common/local-db-pub"
sip_publisher: "non-local-db"
network_publisher: "/Common/local-db-pub"
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove log profile
bigip_firewall_log_profile:
name: barbaz
partition: Common
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: New description of the AFM log profile.
returned: changed
type: str
sample: This is my description
dos_protection:
description: Log publishers used in DoS related settings of the log profile.
type: complex
returned: changed
contains:
dns_publisher:
description: The name of the log publisher used for DNS DoS events.
returned: changed
type: str
sample: "/Common/local-db-publisher"
sip_publisher:
description: The name of the log publisher used for SIP DoS events.
returned: changed
type: str
sample: "/Common/local-db-publisher"
network_publisher:
description: The name of the log publisher used for DoS Network events.
returned: changed
type: str
sample: "/Common/local-db-publisher"
sample: hash/dictionary of values
ip_intelligence:
description: IP Intelligence related settings of the log profile.
type: complex
returned: changed
contains:
log_publisher:
description: The name of the log publisher used for IP Intelligence events.
returned: changed
type: str
sample: "/Common/local-db-publisher"
rate_limit:
description: The rate limit for all combined IP intelligence log messages per second.
returned: changed
type: str
sample: "indefinite"
log_rtbh:
description: Logging of remotely triggered blackholing events.
returned: changed
type: bool
sample: yes
log_shun:
description: Logging of IP Intelligence shun list events.
returned: changed
type: bool
sample: no
log_translation_fields:
description: Logging of translated fields in IP Intelligence log messages.
returned: changed
type: bool
sample: no
sample: hash/dictionary of values
port_misuse:
description: Port Misuse related settings of the log profile.
type: complex
returned: changed
contains:
log_publisher:
description: The name of the log publisher used for Port Misuse events.
returned: changed
type: str
sample: "/Common/local-db-publisher"
rate_limit:
description: The rate limit for all combined Port Misuse log messages per second.
returned: changed
type: str
sample: "indefinite"
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import compare_dictionary
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import compare_dictionary
class Parameters(AnsibleF5Parameters):
api_map = {
'ipIntelligence': 'ip_intelligence',
'portMisuse': 'port_misuse',
'protocolDnsDosPublisher': 'dns_publisher',
'protocolSipDosPublisher': 'sip_publisher',
'dosNetworkPublisher': 'network_publisher',
}
api_attributes = [
'description',
'ipIntelligence',
'portMisuse',
'dosNetworkPublisher',
'protocolDnsDosPublisher',
'protocolSipDosPublisher',
]
returnables = [
'ip_intelligence',
'dns_publisher',
'sip_publisher',
'network_publisher',
'port_misuse',
'description',
'ip_log_publisher',
'ip_rate_limit',
'ip_log_rthb',
'ip_log_shun',
'ip_log_translation_fields',
'port_rate_limit',
'port_log_publisher',
]
updatables = [
'dns_publisher',
'sip_publisher',
'network_publisher',
'description',
'ip_log_publisher',
'ip_rate_limit',
'ip_log_rthb',
'ip_log_shun',
'ip_log_translation_fields',
'port_rate_limit',
'port_log_publisher',
]
class ApiParameters(Parameters):
@property
def ip_log_publisher(self):
result = self._values['ip_intelligence'].get('logPublisher', None)
return result
@property
def ip_rate_limit(self):
return self._values['ip_intelligence']['aggregateRate']
@property
def port_rate_limit(self):
return self._values['port_misuse']['aggregateRate']
@property
def port_log_publisher(self):
result = self._values['port_misuse'].get('logPublisher', None)
return result
@property
def ip_log_rtbh(self):
return self._values['ip_intelligence']['logRtbh']
@property
def ip_log_shun(self):
if self._values['name'] != 'global-network':
return None
return self._values['ip_intelligence']['logShun']
@property
def ip_log_translation_fields(self):
return self._values['ip_intelligence']['logTranslationFields']
class ModuleParameters(Parameters):
def _transform_log_publisher(self, log_publisher):
if log_publisher is None:
return None
if log_publisher in ['', 'none']:
return {}
return fq_name(self.partition, log_publisher)
def _validate_rate_limit(self, rate_limit):
if rate_limit is None:
return None
if rate_limit == 'indefinite':
return 4294967295
if 0 <= int(rate_limit) <= 4294967295:
return int(rate_limit)
raise F5ModuleError(
"Valid 'maximum_age' must be in range 0 - 4294967295, or 'indefinite'."
)
@property
def ip_log_rtbh(self):
if self._values['ip_intelligence'] is None:
return None
result = flatten_boolean(self._values['ip_intelligence']['log_rtbh'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
return result
@property
def ip_log_shun(self):
if self._values['ip_intelligence'] is None:
return None
if 'global-network' not in self._values['name']:
return None
result = flatten_boolean(self._values['ip_intelligence']['log_shun'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
return result
@property
def ip_log_translation_fields(self):
if self._values['ip_intelligence'] is None:
return None
result = flatten_boolean(self._values['ip_intelligence']['log_translation_fields'])
if result == 'yes':
return 'enabled'
if result == 'no':
return 'disabled'
return result
@property
def ip_log_publisher(self):
if self._values['ip_intelligence'] is None:
return None
result = self._transform_log_publisher(self._values['ip_intelligence']['log_publisher'])
return result
@property
def ip_rate_limit(self):
if self._values['ip_intelligence'] is None:
return None
return self._validate_rate_limit(self._values['ip_intelligence']['rate_limit'])
@property
def port_rate_limit(self):
if self._values['port_misuse'] is None:
return None
return self._validate_rate_limit(self._values['port_misuse']['rate_limit'])
@property
def port_log_publisher(self):
if self._values['port_misuse'] is None:
return None
result = self._transform_log_publisher(self._values['port_misuse']['log_publisher'])
return result
@property
def dns_publisher(self):
if self._values['dos_protection'] is None:
return None
result = self._transform_log_publisher(self._values['dos_protection']['dns_publisher'])
return result
@property
def sip_publisher(self):
if self._values['dos_protection'] is None:
return None
result = self._transform_log_publisher(self._values['dos_protection']['sip_publisher'])
return result
@property
def network_publisher(self):
if self._values['dos_protection'] is None:
return None
result = self._transform_log_publisher(self._values['dos_protection']['network_publisher'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def ip_intelligence(self):
to_filter = dict(
logPublisher=self._values['ip_log_publisher'],
aggregateRate=self._values['ip_rate_limit'],
logRtbh=self._values['ip_log_rtbh'],
logShun=self._values['ip_log_shun'],
logTranslationFields=self._values['ip_log_translation_fields']
)
result = self._filter_params(to_filter)
if result:
return result
@property
def port_misuse(self):
to_filter = dict(
logPublisher=self._values['port_log_publisher'],
aggregateRate=self._values['port_rate_limit']
)
result = self._filter_params(to_filter)
if result:
return result
class ReportableChanges(Changes):
returnables = [
'ip_intelligence',
'port_misuse',
'description',
'dos_protection',
]
def _change_rate_limit_value(self, value):
if value == 4294967295:
return 'indefinite'
else:
return value
@property
def ip_log_rthb(self):
result = flatten_boolean(self._values['ip_log_rtbh'])
return result
@property
def ip_log_shun(self):
result = flatten_boolean(self._values['ip_log_shun'])
return result
@property
def ip_log_translation_fields(self):
result = flatten_boolean(self._values['ip_log_translation_fields'])
return result
@property
def ip_intelligence(self):
if self._values['ip_intelligence'] is None:
return None
to_filter = dict(
log_publisher=self._values['ip_log_publisher'],
rate_limit=self._change_rate_limit_value(self._values['ip_rate_limit']),
log_rtbh=self.ip_log_rtbh,
log_shun=self.ip_log_shun,
log_translation_fields=self.ip_log_translation_fields
)
result = self._filter_params(to_filter)
if result:
return result
@property
def port_misuse(self):
if self._values['port_misuse'] is None:
return None
to_filter = dict(
log_publisher=self._values['port_log_publisher'],
rate_limit=self._change_rate_limit_value(self._values['port_rate_limit']),
)
result = self._filter_params(to_filter)
if result:
return result
@property
def dos_protection(self):
to_filter = dict(
dns_publisher=self._values['dns_publisher'],
sip_publisher=self._values['sip_publisher'],
network_publisher=self._values['network_publisher'],
)
result = self._filter_params(to_filter)
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ip_log_publisher(self):
result = compare_dictionary(self.want.ip_log_publisher, self.have.ip_log_publisher)
return result
@property
def port_log_publisher(self):
result = compare_dictionary(self.want.port_log_publisher, self.have.port_log_publisher)
return result
@property
def dns_publisher(self):
result = compare_dictionary(self.want.dns_publisher, self.have.dns_publisher)
return result
@property
def sip_publisher(self):
result = compare_dictionary(self.want.sip_publisher, self.have.sip_publisher)
return result
@property
def network_publisher(self):
result = compare_dictionary(self.want.network_publisher, self.have.network_publisher)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
# Built-in profiles cannot be removed
built_ins = [
'Log all requests', 'Log illegal requests',
'global-network', 'local-dos'
]
if self.want.name in built_ins:
return False
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/security/log/profile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/security/log/profile/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/log/profile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/log/profile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/log/profile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404, 409]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True
),
description=dict(),
dos_protection=dict(
type='dict',
options=dict(
dns_publisher=dict(),
sip_publisher=dict(),
network_publisher=dict()
)
),
ip_intelligence=dict(
type='dict',
options=dict(
log_publisher=dict(),
log_translation_fields=dict(type='bool'),
rate_limit=dict(),
log_rtbh=dict(type='bool'),
log_shun=dict(type='bool')
)
),
port_misuse=dict(
type='dict',
options=dict(
log_publisher=dict(),
rate_limit=dict()
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Salat-Cx65/python-for-android | python3-alpha/python3-src/Tools/pybench/Dict.py | 92 | 9261 | from pybench import Test
class DictCreation(Test):
version = 2.0
operations = 5*(5 + 5)
rounds = 80000
def test(self):
for i in range(self.rounds):
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
d1 = {}
d2 = {}
d3 = {}
d4 = {}
d5 = {}
d1 = {1:2,3:4,5:6}
d2 = {2:3,4:5,6:7}
d3 = {3:4,5:6,7:8}
d4 = {4:5,6:7,8:9}
d5 = {6:7,8:9,10:11}
def calibrate(self):
for i in range(self.rounds):
pass
class DictWithStringKeys(Test):
version = 2.0
operations = 5*(6 + 6)
rounds = 200000
def test(self):
d = {}
for i in range(self.rounds):
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
d['abc'] = 1
d['def'] = 2
d['ghi'] = 3
d['jkl'] = 4
d['mno'] = 5
d['pqr'] = 6
d['abc']
d['def']
d['ghi']
d['jkl']
d['mno']
d['pqr']
def calibrate(self):
d = {}
for i in range(self.rounds):
pass
class DictWithFloatKeys(Test):
version = 2.0
operations = 5*(6 + 6)
rounds = 150000
def test(self):
d = {}
for i in range(self.rounds):
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
d[1.234] = 1
d[2.345] = 2
d[3.456] = 3
d[4.567] = 4
d[5.678] = 5
d[6.789] = 6
d[1.234]
d[2.345]
d[3.456]
d[4.567]
d[5.678]
d[6.789]
def calibrate(self):
d = {}
for i in range(self.rounds):
pass
class DictWithIntegerKeys(Test):
version = 2.0
operations = 5*(6 + 6)
rounds = 200000
def test(self):
d = {}
for i in range(self.rounds):
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
d[1] = 1
d[2] = 2
d[3] = 3
d[4] = 4
d[5] = 5
d[6] = 6
d[1]
d[2]
d[3]
d[4]
d[5]
d[6]
def calibrate(self):
d = {}
for i in range(self.rounds):
pass
class SimpleDictManipulation(Test):
version = 2.0
operations = 5*(6 + 6 + 6 + 6)
rounds = 100000
def test(self):
d = {}
has_key = lambda key: key in d
for i in range(self.rounds):
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
has_key(0)
has_key(2)
has_key(4)
has_key(6)
has_key(8)
has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
has_key(0)
has_key(2)
has_key(4)
has_key(6)
has_key(8)
has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
has_key(0)
has_key(2)
has_key(4)
has_key(6)
has_key(8)
has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
has_key(0)
has_key(2)
has_key(4)
has_key(6)
has_key(8)
has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
d[0] = 3
d[1] = 4
d[2] = 5
d[3] = 3
d[4] = 4
d[5] = 5
x = d[0]
x = d[1]
x = d[2]
x = d[3]
x = d[4]
x = d[5]
has_key(0)
has_key(2)
has_key(4)
has_key(6)
has_key(8)
has_key(10)
del d[0]
del d[1]
del d[2]
del d[3]
del d[4]
del d[5]
def calibrate(self):
d = {}
has_key = lambda key: key in d
for i in range(self.rounds):
pass
| apache-2.0 |
priyaganti/rockstor-core | src/rockstor/system/iscsi.py | 2 | 2084 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from osi import run_command
TGTADM_BIN = '/usr/sbin/tgtadm'
DD_BIN = '/bin/dd'
def create_target_device(tid, tname):
cmd = [TGTADM_BIN, '--lld', 'iscsi', '--mode', 'target', '--op', 'new',
'--tid', tid, '--targetname', tname]
return run_command(cmd)
def add_logical_unit(tid, lun, dev_name):
cmd = [TGTADM_BIN, '--lld', 'iscsi', '--mode', 'logicalunit', '--op',
'new', '--tid', tid, '--lun', lun, '-b', dev_name]
return run_command(cmd)
def ip_restrict(tid):
"""
no restrictions at all
"""
cmd = [TGTADM_BIN, '--lld', 'iscsi', '--mode', 'target', '--op', 'bind',
'--tid', tid, '-I', 'ALL']
return run_command(cmd)
def create_lun_file(dev_name, size):
"""
size in MB
"""
of = ('of=%s' % dev_name)
count = ('count=%d' % size)
cmd = [DD_BIN, 'if=/dev/zero', of, 'bs=1M', count]
return run_command(cmd)
def export_iscsi(tid, tname, lun, dev_name, size):
"""
main method that does everything to a share to make it available as a iscsi
device. this should be called from the api view
1. create the dev_name file with the given size using dd
2. create target device
3. add logical unit
4. authentication??
"""
create_lun_file(dev_name, size)
create_target_device(tid, tname)
add_logical_unit(tid, lun, dev_name)
ip_restrict(tid)
| gpl-3.0 |
tkwon/dj-stripe | djstripe/migrations/0025_auto_20170322_0428.py | 1 | 3906 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 04:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0024_auto_20170308_0757'),
]
operations = [
migrations.AlterField(
model_name='account',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='account',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='charge',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='charge',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='customer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='customer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='event',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='event',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='eventprocessingexception',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoice',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoice',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='invoiceitem',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='plan',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='plan',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='stripesource',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='stripesource',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='subscription',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='subscription',
name='modified',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='transfer',
name='created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='transfer',
name='modified',
field=models.DateTimeField(auto_now=True),
),
]
| mit |
eqcorrscan/ci.testing | eqcorrscan/utils/stacking.py | 1 | 6254 | """
Utility module of the EQcorrscan package to allow for different methods of \
stacking of seismic signal in one place.
:copyright:
EQcorrscan developers.
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.signal import hilbert
from copy import deepcopy
from eqcorrscan.core.match_filter import normxcorr2
def linstack(streams, normalize=True):
"""
Compute the linear stack of a series of seismic streams of \
multiplexed data.
:type streams: list
:param streams: List of streams to stack
:type normalize: bool
:param normalize: Normalize traces before stacking, normalizes by the RMS \
amplitude.
:returns: stacked data
:rtype: :class:`obspy.core.stream.Stream`
"""
stack = streams[np.argmax([len(stream) for stream in streams])].copy()
if normalize:
for tr in stack:
tr.data = tr.data / np.sqrt(np.mean(np.square(tr.data)))
tr.data = np.nan_to_num(tr.data)
for i in range(1, len(streams)):
for tr in stack:
matchtr = streams[i].select(station=tr.stats.station,
channel=tr.stats.channel)
if matchtr:
# Normalize the data before stacking
if normalize:
norm = matchtr[0].data /\
np.sqrt(np.mean(np.square(matchtr[0].data)))
norm = np.nan_to_num(norm)
else:
norm = matchtr[0].data
tr.data = np.sum((norm, tr.data), axis=0)
return stack
def PWS_stack(streams, weight=2, normalize=True):
"""
Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream`
"""
# First get the linear stack which we will weight by the phase stack
Linstack = linstack(streams)
# Compute the instantaneous phase
instaphases = []
print("Computing instantaneous phase")
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
envelope = np.sqrt(np.sum((np.square(analytic),
np.square(tr.data)), axis=0))
tr.data = analytic / envelope
instaphases.append(instaphase)
# Compute the phase stack
print("Computing the phase stack")
Phasestack = linstack(instaphases, normalize=normalize)
# Compute the phase-weighted stack
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station)[0].data *\
np.abs(tr.data ** weight)
return Phasestack
def align_traces(trace_list, shift_len, master=False, positive=False,
plot=False):
"""
Align traces relative to each other based on their cross-correlation value.
Uses the :func:`obspy.signal.cross_correlation.xcorr` function to find the
optimum shift to align traces relative to a master event. Either uses a
given master to align traces, or uses the first trace in the list.
.. Note::
The cross-correlation function may yield an error/warning
about shift_len being too large: this is raised by the
:func:`obspy.signal.cross_correlation.xcorr` routine when the shift_len
is greater than half the length of either master or a trace, then
the correlation will not be robust. We may switch to a different
correlation routine later.
:type trace_list: list
:param trace_list: List of traces to align
:type shift_len: int
:param shift_len: Length to allow shifting within in samples
:type master: obspy.core.trace.Trace
:param master: Master trace to align to, if set to False will align to \
the largest amplitude trace (default)
:type positive: bool
:param positive: Return the maximum positive cross-correlation, or the \
absolute maximum, defaults to False (absolute maximum).
:type plot: bool
:param plot: If true, will plot each trace aligned with the master.
:returns: list of shifts and correlations for best alignment in seconds.
:rtype: list
"""
from eqcorrscan.utils.plotting import xcorr_plot
traces = deepcopy(trace_list)
if not master:
# Use trace with largest MAD amplitude as master
master = traces[0]
MAD_master = np.median(np.abs(master.data))
for i in range(1, len(traces)):
if np.median(np.abs(traces[i])) > MAD_master:
master = traces[i]
MAD_master = np.median(np.abs(master.data))
else:
print('Using master given by user')
shifts = []
ccs = []
for i in range(len(traces)):
if not master.stats.sampling_rate == traces[i].stats.sampling_rate:
raise ValueError('Sampling rates not the same')
cc_vec = normxcorr2(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32))
cc_vec = cc_vec[0]
shift = np.abs(cc_vec).argmax()
cc = cc_vec[shift]
if plot:
xcorr_plot(template=traces[i].data.
astype(np.float32)[shift_len:-shift_len],
image=master.data.astype(np.float32), shift=shift,
cc=cc)
shift -= shift_len
if cc < 0 and positive:
cc = cc_vec.max()
shift = cc_vec.argmax() - shift_len
shifts.append(shift / master.stats.sampling_rate)
ccs.append(cc)
return shifts, ccs
if __name__ == "__main__":
import doctest
doctest.testmod()
| lgpl-3.0 |
ArtRand/signalAlign | externalTools/lastz-distrib-1.03.54/tools/any_to_qdna.py | 6 | 3214 | #!/usr/bin/env python
"""
Convert any file to a LASTZ quantum dna file, just by appending qdna headers
Qdna file format is shown below (omitting "named properties", which we don't
use). We simply create all the headers and copy the file as the "data
sequence".
offset 0x00: C4 B4 71 97 big endian magic number (97 71 B4 C4 => little endian)
offset 0x04: 00 00 02 00 version 2.0 (fourth byte is sub version)
offset 0x08: 00 00 00 14 header length (in bytes, including this field)
offset 0x0C: xx xx xx xx S, offset (from file start) to data sequence
offset 0x10: xx xx xx xx N, offset to name, 0 indicates no name
offset 0x14: xx xx xx xx length of data sequence (counted in 'items')
offset 0x18: 00 00 00 00 (offset to named properties, not used)
offset N: ... name (zero-terminated string)
offset S: ... data sequence
:Author: Bob Harris ([email protected])
"""
from sys import argv,stdin,stdout,exit
def usage(s=None):
message = """any_to_qdna [options] < any_file > qdna_file
Convert any file to a LASTZ quantum dna file.
options:
--name=<string> the name of the sequence
(by default, the sequence is unnamed)
--striplinebreaks strip line breaks from the file
(default is to include line breaks in the qdna file)
--simple create an "old-style" qdna file
(default is to create a version 2 qda file)"""
if (s == None): exit (message)
else: exit ("%s\n%s" % (s,message))
def main():
qdnaOldMagic = 0xF656659EL # big endian magic number for older qdna files
qdnaMagic = 0xC4B47197L # big endian magic number for qdna files
qdnaVersion = 0x00000200L
# parse args
name = None
strip = False
simple = False
for arg in argv[1:]:
if (arg.startswith("--name=")):
name = arg.split("=",1)[1]
elif (arg == "--striplinebreaks") or (arg == "--strip"):
strip = True
elif (arg == "--simple") or (arg == "--old"):
simple = True
elif (arg.startswith("--")):
usage("can't understand %s" % arg)
else:
usage("can't understand %s" % arg)
if (simple) and (name != None):
uaseg("simple qdna file cannot carry a sequence name")
# === read the input file ===
seq = []
for line in stdin:
if (strip): line = line.rstrip()
seq += [line]
seq = "".join(seq)
# === write the qdna file ===
if (not simple):
headerLen = 20
if (name == None):
nameOffset = 0
seqOffset = headerLen + 8;
else:
nameOffset = headerLen + 8;
seqOffset = nameOffset + len(name) + 1
# prepend magic number
if (simple): write_4(stdout,qdnaOldMagic)
else: write_4(stdout,qdnaMagic)
# write the rest of the header
if (not simple):
write_4(stdout,qdnaVersion)
write_4(stdout,headerLen)
write_4(stdout,seqOffset)
write_4(stdout,nameOffset)
write_4(stdout,len(seq))
write_4(stdout,0)
if (name != None):
stdout.write(name)
stdout.write(chr(0))
# write the sequence
stdout.write(seq)
def write_4(f,val):
f.write (chr((val >> 24) & 0xFF))
f.write (chr((val >> 16) & 0xFF))
f.write (chr((val >> 8) & 0xFF))
f.write (chr( val & 0xFF))
if __name__ == "__main__": main()
| mit |
gspilio/nova | nova/network/quantumv2/api.py | 1 | 41934 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved
# Copyright (c) 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import time
from oslo.config import cfg
from nova.compute import instance_types
from nova import conductor
from nova import context
from nova.db import base
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
quantum_opts = [
cfg.StrOpt('quantum_url',
default='http://127.0.0.1:9696',
help='URL for connecting to quantum'),
cfg.IntOpt('quantum_url_timeout',
default=30,
help='timeout value for connecting to quantum in seconds'),
cfg.StrOpt('quantum_admin_username',
help='username for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_password',
help='password for connecting to quantum in admin context',
secret=True),
cfg.StrOpt('quantum_admin_tenant_name',
help='tenant name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_region_name',
help='region name for connecting to quantum in admin context'),
cfg.StrOpt('quantum_admin_auth_url',
default='http://localhost:5000/v2.0',
help='auth url for connecting to quantum in admin context'),
cfg.BoolOpt('quantum_api_insecure',
default=False,
help='if set, ignore any SSL validation issues'),
cfg.StrOpt('quantum_auth_strategy',
default='keystone',
help='auth strategy for connecting to '
'quantum in admin context'),
# TODO(berrange) temporary hack until Quantum can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('quantum_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('quantum_extension_sync_interval',
default=600,
help='Number of seconds before querying quantum for'
' extensions'),
]
CONF = cfg.CONF
CONF.register_opts(quantum_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
refresh_cache = network_api.refresh_cache
update_instance_info_cache = network_api.update_instance_cache_with_nw_info
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
conductor_api = conductor.API()
security_group_api = openstack_driver.get_openstack_security_group_driver()
def __init__(self):
super(API, self).__init__()
self.last_quantum_extension_sync = None
self.extensions = {}
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
quantum = quantumv2.get_client(context)
# If user has specified to attach instance only to specific
# networks, add them to **search_opts
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {"tenant_id": project_id, 'shared': False}
if net_ids:
search_opts['id'] = net_ids
nets = quantum.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
if net_ids:
search_opts['id'] = net_ids
nets += quantum.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
return nets
@refresh_cache
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
TODO(someone): document the rest of these parameters.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: QuantumV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = quantum.show_port(port_id)['port']
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = quantum.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
msg = (_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific."),
security_group)
raise exception.NoUniqueMatch(msg)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
security_group_ids.append(name_match)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
if port:
quantum.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
else:
fixed_ip = fixed_ips.get(network_id)
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address':
fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = (
security_group_ids)
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
self._populate_quantum_extension_values(instance,
port_req_body)
created_port_ids.append(
quantum.create_port(port_req_body)['port']['id'])
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
port_in_server = quantum.show_port(port_id).get('port')
if not port_in_server:
raise Exception(_('Port not found'))
port_req_body = {'port': {'device_id': None}}
quantum.update_port(port_id, port_req_body)
for port_id in created_port_ids:
try:
quantum.delete_port(port_id)
except Exception as ex:
msg = _("Fail to delete port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': port_id,
'exception': ex})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_add_security_group_refresh(context, instance)
nw_info = self._get_instance_nw_info(context, instance, networks=nets)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_quantum_extensions_cache(self):
if (not self.last_quantum_extension_sync or
((time.time() - self.last_quantum_extension_sync)
>= CONF.quantum_extension_sync_interval)):
quantum = quantumv2.get_client(context.get_admin_context())
extensions_list = quantum.list_extensions()['extensions']
self.last_quantum_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _populate_quantum_extension_values(self, instance, port_req_body):
self._refresh_quantum_extensions_cache()
if 'nvp-qos' in self.extensions:
instance_type = instance_types.extract_instance_type(instance)
rxtx_factor = instance_type.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug(_('deallocate_for_instance() for %s'),
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
for port in ports:
try:
quantumv2.get_client(context).delete_port(port['id'])
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(portid)s ")
% {'portid': port['id']})
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
@refresh_cache
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)],
conductor_api=conductor_api)
@refresh_cache
def deallocate_port_for_instance(self, context, instance, port_id,
conductor_api=None):
try:
quantumv2.get_client(context).delete_port(port_id)
except Exception as ex:
LOG.exception(_("Failed to delete quantum port %(port_id)s ") %
locals())
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
return self._get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
return quantumv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
return quantumv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, conductor_api=None,
networks=None):
result = self._get_instance_nw_info(context, instance, networks)
update_instance_info_cache(self, context, instance, result,
conductor_api)
return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
return network_model.NetworkInfo.hydrate(nw_info)
@refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id,
conductor_api=None):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
return
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address,
conductor_api=None):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
quantumv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks):
"""Validate that the tenant can use the requested networks."""
LOG.debug(_('validate_networks() for %s'),
requested_networks)
if not requested_networks:
return
net_ids = []
for (net_id, _i, port_id) in requested_networks:
if not port_id:
net_ids.append(net_id)
continue
port = quantumv2.get_client(context).show_port(port_id).get('port')
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
net_id = port['network_id']
if net_id in net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
net_ids.append(net_id)
nets = self._get_available_networks(context, context.project_id,
net_ids)
if len(nets) != len(net_ids):
requsted_netid_set = set(net_ids)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requsted_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = quantumv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def trigger_instance_add_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_add_security_group', instance_ref, group['name'])
def trigger_instance_remove_security_group_refresh(self, context,
instance_ref):
admin_context = context.elevated()
for group in instance_ref['security_groups']:
self.conductor_api.security_groups_trigger_handler(context,
'instance_remove_security_group', instance_ref, group['name'])
def trigger_security_group_members_refresh(self, context, instance_ref):
admin_context = context.elevated()
group_ids = [group['id'] for group in instance_ref['security_groups']]
self.conductor_api.security_groups_trigger_members_refresh(
admin_context, group_ids)
self.conductor_api.security_groups_trigger_handler(admin_context,
'security_group_members', group_ids)
def _get_port_id_by_fixed_address(self, client,
instance, address):
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
def get_all(self, context):
client = quantumv2.get_client(context)
networks = client.list_networks().get('networks') or {}
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
client = quantumv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
raise NotImplementedError()
def disassociate(self, context, network_uuid):
raise NotImplementedError()
def get_fixed_ip(self, context, id):
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
client = quantumv2.get_client(context)
fip = client.show_floatingip(id)['floatingip']
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
client = quantumv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Quantum v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Quantum v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = quantumv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return []
def get_instance_id_by_floating_address(self, context, address):
"""Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = quantumv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
fip = client.create_floatingip(param)
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
data = client.list_floatingips(fixed_ip_address=fixed_ip, port_id=port)
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _build_network_info_model(self, context, instance, networks=None):
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = quantumv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
ports = data.get('ports', [])
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'])
else:
# ensure ports are in preferred network order
_ensure_requested_network_ordering(
lambda x: x['network_id'],
ports,
[n['id'] for n in networks])
nw_info = network_model.NetworkInfo()
for port in ports:
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
break
if network_name is None:
raise exception.NotFound(_('Network %(net)s for '
'port %(port_id)s not found!') %
{'net': port['network_id'],
'port': port['id']})
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Quantum should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.quantum_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
devname = "tap" + port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=net['tenant_id']
)
network['subnets'] = subnets
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
nw_info.append(network_model.VIF(
id=port['id'],
address=port['mac_address'],
network=network,
type=port.get('binding:vif_type'),
ovs_interfaceid=ovs_interfaceid,
devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = quantumv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = quantumv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
| apache-2.0 |
dsbrown/FreeCAD | src/Mod/Path/PathScripts/PathCompoundExtended.py | 16 | 5492 | # -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2014 Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,FreeCADGui,Path,PathGui
from PySide import QtCore,QtGui
"""Path Compound Extended object and FreeCAD command"""
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class ObjectCompoundExtended:
def __init__(self,obj):
obj.addProperty("App::PropertyString","Description", "Path",translate("PathCompoundExtended","An optional description of this compounded operation"))
# obj.addProperty("App::PropertySpeed", "FeedRate", "Path",translate("PathCompoundExtended","The feed rate of the paths in these compounded operations"))
# obj.addProperty("App::PropertyFloat", "SpindleSpeed", "Path",translate("PathCompoundExtended","The spindle speed, in revolutions per minute, of the tool used in these compounded operations"))
obj.addProperty("App::PropertyLength","SafeHeight", "Path",translate("PathCompoundExtended","The safe height for this operation"))
obj.addProperty("App::PropertyLength","RetractHeight","Path",translate("PathCompoundExtended","The retract height, above top surface of part, between compounded operations inside clamping area"))
obj.Proxy = self
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def execute(self,obj):
cmds = []
for child in obj.Group:
if child.isDerivedFrom("Path::Feature"):
cmds.extend(child.Path.Commands)
if cmds:
path = Path.Path(cmds)
obj.Path = path
class ViewProviderCompoundExtended:
def __init__(self,vobj):
vobj.Proxy = self
def attach(self,vobj):
self.Object = vobj.Object
return
def getIcon(self):
return ":/icons/Path-Compound.svg"
def __getstate__(self):
return None
def __setstate__(self,state):
return None
class CommandCompoundExtended:
def GetResources(self):
return {'Pixmap' : 'Path-Compound',
'MenuText': QtCore.QT_TRANSLATE_NOOP("PathCompoundExtended","Compound"),
'Accel': "P, C",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("PathCompoundExtended","Creates a Path Compound object")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
FreeCAD.ActiveDocument.openTransaction(translate("PathCompoundExtended","Create Compound"))
FreeCADGui.addModule("PathScripts.PathCompoundExtended")
snippet = '''
import Path
import PathScripts
from PathScripts import PathUtils
incl = []
prjexists = False
sel = FreeCADGui.Selection.getSelection()
for s in sel:
if s.isDerivedFrom("Path::Feature"):
incl.append(s)
obj = FreeCAD.ActiveDocument.addObject("Path::FeatureCompoundPython","Compound")
PathScripts.PathCompoundExtended.ObjectCompoundExtended(obj)
PathScripts.PathCompoundExtended.ViewProviderCompoundExtended(obj.ViewObject)
project = PathUtils.addToProject(obj)
if incl:
children = []
p = project.Group
g = obj.Group
for child in incl:
p.remove(child)
children.append(FreeCAD.ActiveDocument.getObject(child.Name))
project.Group = p
g.append(children)
obj.Group = children
'''
FreeCADGui.doCommand(snippet)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_CompoundExtended',CommandCompoundExtended())
FreeCAD.Console.PrintLog("Loading PathCompoundExtended... done\n")
| lgpl-2.1 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/dibdemo.py | 17 | 1930 | # A demo which creates a view and a frame which displays a PPM format bitmap
#
# This hasnnt been run in a while, as I dont have many of that format around!
import win32ui
import win32con
import win32api
import string
class DIBView:
def __init__(self, doc, dib):
self.dib = dib
self.view = win32ui.CreateView(doc)
self.width = self.height = 0
# set up message handlers
# self.view.OnPrepareDC = self.OnPrepareDC
self.view.HookMessage (self.OnSize, win32con.WM_SIZE)
def OnSize (self, params):
lParam = params[3]
self.width = win32api.LOWORD(lParam)
self.height = win32api.HIWORD(lParam)
def OnDraw (self, ob, dc):
# set sizes used for "non strecth" mode.
self.view.SetScrollSizes(win32con.MM_TEXT, self.dib.GetSize())
dibSize = self.dib.GetSize()
dibRect = (0,0,dibSize[0], dibSize[1])
# stretch BMP.
#self.dib.Paint(dc, (0,0,self.width, self.height),dibRect)
# non stretch.
self.dib.Paint(dc)
class DIBDemo:
def __init__(self, filename, * bPBM):
# init data members
f = open(filename, 'rb')
dib=win32ui.CreateDIBitmap()
if len(bPBM)>0:
magic=f.readline()
if magic <> "P6\n":
print "The file is not a PBM format file"
raise "Failed"
# check magic?
rowcollist=string.split(f.readline())
cols=string.atoi(rowcollist[0])
rows=string.atoi(rowcollist[1])
f.readline() # whats this one?
dib.LoadPBMData(f,(cols,rows))
else:
dib.LoadWindowsFormatFile(f)
f.close()
# create doc/view
self.doc = win32ui.CreateDoc()
self.dibView = DIBView( self.doc, dib )
self.frame = win32ui.CreateMDIFrame()
self.frame.LoadFrame() # this will force OnCreateClient
self.doc.SetTitle ('DIB Demo')
self.frame.ShowWindow()
# display the sucka
self.frame.ActivateFrame()
def OnCreateClient( self, createparams, context ):
self.dibView.view.CreateWindow(self.frame)
return 1
if __name__=='__main__':
import demoutils
demoutils.NotAScript() | apache-2.0 |
sentient-energy/emsw-oe-mirror | contrib/mtnpatch.py | 45 | 2048 | #!/usr/bin/env python
import sys, os, string, getopt, re
mtncmd = "mtn"
def main(argv = None):
if argv is None:
argv = sys.argv
opts, list = getopt.getopt(sys.argv[1:], ':R')
if len(list) < 1:
print "You must specify a file"
return 2
reverse = False
for o, a in opts:
if o == "-R":
reverse = True
if os.path.exists(list[0]):
input = open(list[0], 'r')
renameFrom = ""
cmd = ""
for line in input:
if len(line) > 0:
if line[0] == '#':
matches = re.search("#\s+(\w+)\s+\"(.*)\"", line)
if matches is not None:
cmd = matches.group(1)
fileName = matches.group(2)
if cmd == "delete":
if reverse:
print "%s add %s" % (mtncmd, fileName)
else:
print "%s drop -e %s" % (mtncmd, fileName)
elif cmd == "add" or cmd == "add_file" or cmd == "add_dir":
if reverse:
print "%s drop -e %s" % (mtncmd, fileName)
else:
print "%s add %s" % (mtncmd, fileName)
elif cmd == "rename":
renameFrom = fileName
elif cmd == "to" and renameFrom != "":
if reverse:
print "%s rename -e %s %s" % (mtncmd, fileName, renameFrom)
else:
print "%s rename -e %s %s" % (mtncmd, renameFrom, fileName)
renameFrom = ""
else:
cmd = ""
if reverse:
print "patch -R -p0 < %s" % list[0]
else:
print "patch -p0 < %s" % list[0]
if __name__ == "__main__":
sys.exit(main())
| mit |
osvalr/odoo | addons/resource/tests/common.py | 305 | 5016 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tests import common
class TestResourceCommon(common.TransactionCase):
def setUp(self):
super(TestResourceCommon, self).setUp()
cr, uid = self.cr, self.uid
if not hasattr(self, 'context'):
self.context = {}
# Usefull models
self.resource_resource = self.registry('resource.resource')
self.resource_calendar = self.registry('resource.calendar')
self.resource_attendance = self.registry('resource.calendar.attendance')
self.resource_leaves = self.registry('resource.calendar.leaves')
# Some demo data
self.date1 = datetime.strptime('2013-02-12 09:08:07', '%Y-%m-%d %H:%M:%S') # weekday() returns 1, isoweekday() returns 2
self.date2 = datetime.strptime('2013-02-15 10:11:12', '%Y-%m-%d %H:%M:%S') # weekday() returns 4, isoweekday() returns 5
# Leave1: 19/02/2013, from 9 to 12, is a day 1
self.leave1_start = datetime.strptime('2013-02-19 09:00:00', '%Y-%m-%d %H:%M:%S')
self.leave1_end = datetime.strptime('2013-02-19 12:00:00', '%Y-%m-%d %H:%M:%S')
# Leave2: 22/02/2013, from 9 to 15, is a day 4
self.leave2_start = datetime.strptime('2013-02-22 09:00:00', '%Y-%m-%d %H:%M:%S')
self.leave2_end = datetime.strptime('2013-02-22 15:00:00', '%Y-%m-%d %H:%M:%S')
# Leave3: 25/02/2013 (day0) -> 01/03/2013 (day4)
self.leave3_start = datetime.strptime('2013-02-25 13:00:00', '%Y-%m-%d %H:%M:%S')
self.leave3_end = datetime.strptime('2013-03-01 11:30:00', '%Y-%m-%d %H:%M:%S')
# Resource data
# Calendar working days: 1 (8-16 -> 8hours), 4 (8-13, 16-23 -> 12hours)
self.calendar_id = self.resource_calendar.create(
cr, uid, {
'name': 'TestCalendar',
}
)
self.att1_id = self.resource_attendance.create(
cr, uid, {
'name': 'Att1',
'dayofweek': '1',
'hour_from': 8,
'hour_to': 16,
'calendar_id': self.calendar_id,
}
)
self.att2_id = self.resource_attendance.create(
cr, uid, {
'name': 'Att2',
'dayofweek': '4',
'hour_from': 8,
'hour_to': 13,
'calendar_id': self.calendar_id,
}
)
self.att3_id = self.resource_attendance.create(
cr, uid, {
'name': 'Att3',
'dayofweek': '4',
'hour_from': 16,
'hour_to': 23,
'calendar_id': self.calendar_id,
}
)
self.resource1_id = self.resource_resource.create(
cr, uid, {
'name': 'TestResource1',
'resource_type': 'user',
'time_efficiency': 150.0,
'calendar_id': self.calendar_id,
}
)
self.leave1_id = self.resource_leaves.create(
cr, uid, {
'name': 'GenericLeave',
'calendar_id': self.calendar_id,
'date_from': self.leave1_start,
'date_to': self.leave1_end,
}
)
self.leave2_id = self.resource_leaves.create(
cr, uid, {
'name': 'ResourceLeave',
'calendar_id': self.calendar_id,
'resource_id': self.resource1_id,
'date_from': self.leave2_start,
'date_to': self.leave2_end,
}
)
self.leave3_id = self.resource_leaves.create(
cr, uid, {
'name': 'ResourceLeave2',
'calendar_id': self.calendar_id,
'resource_id': self.resource1_id,
'date_from': self.leave3_start,
'date_to': self.leave3_end,
}
)
# Some browse data
self.calendar = self.resource_calendar.browse(cr, uid, self.calendar_id)
| agpl-3.0 |
hlt-mt/tensorflow | tensorflow/python/framework/function.py | 4 | 15235 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import re
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
def _make_argname_from_tensor_name(name):
return re.sub(":0$", "", name).replace(":", "_o")
def _tensor_to_argdef(t):
arg = op_def_pb2.OpDef.ArgDef()
arg.name = _make_argname_from_tensor_name(t.name)
arg.type = t.dtype.as_datatype_enum
return arg
def _get_node_def_attr(op):
# pylint: disable=protected-access
return op._node_def.attr
# pylint: enable=protected-access
def _add_input_array(op, start, limit, dtype, func):
"""Adds a _ListToArray node in the func for op.inputs[start:limit]."""
node = function_pb2.FunctionDef.Node()
node.op = "_ListToArray"
ret_name = op.name + "_L2A_" + str(start)
node.ret.extend([ret_name])
node.arg.extend([_make_argname_from_tensor_name(x.name)
for x in op.inputs[start:limit]])
num = limit - start
node.attr["Tin"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=[dtype] * num)))
node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtype))
node.attr["N"].CopyFrom(attr_value_pb2.AttrValue(i=num))
func.node.extend([node])
return ret_name
def _add_output_array(op, start, limit, dtype, func):
"""Adds a _ArrayToList node in the func for op.outputs[start:limit]."""
dtype_proto = attr_value_pb2.AttrValue(type=dtype)
# A node converting N*T to list(T)
node = function_pb2.FunctionDef.Node()
node.op = "_ArrayToList"
arg_name = op.name + "_A2L_" + str(start)
ret_name = arg_name + "_out"
node.ret.append(ret_name)
node.arg.append(arg_name)
node.attr["T"].CopyFrom(dtype_proto)
num = limit - start
node.attr["N"].CopyFrom(attr_value_pb2.AttrValue(i=num))
node.attr["out_types"].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(type=[dtype] * num)))
func.node.extend([node])
num = limit - start
# Adds an identity node for each element in the array N*T so that
# uses of each element can be added easily later. These Identity
# will be eliminated before graph execution.
for i in xrange(num):
node = function_pb2.FunctionDef.Node()
node.op = "Identity"
node.arg.append(ret_name + ":" + str(i))
node.ret.append(_make_argname_from_tensor_name(op.outputs[i].name))
node.attr["T"].CopyFrom(dtype_proto)
func.node.extend([node])
return arg_name
def _add_output_list(op, start, limit, dtype_lst, func):
"""Adds a _ArrayToList node in the func for op.outputs[start:limit]."""
ret_name = op.name + "_Lst_" + str(start) + "_" + str(limit)
num = limit - start
assert len(dtype_lst) == num
# Adds an identity node for each element in the array N*T so that
# uses of each element can be added easily later. These Identity
# will be eliminated before graph execution.
for i in xrange(num):
node = function_pb2.FunctionDef.Node()
node.op = "Identity"
node.arg.append(ret_name + ":" + str(i))
node.ret.append(_make_argname_from_tensor_name(op.outputs[i].name))
node.attr["T"].CopyFrom(attr_value_pb2.AttrValue(type=dtype_lst[i]))
func.node.extend([node])
return ret_name
def _add_op_node(graph, op, func):
"""Converts an op to a function def node and add it to `func`."""
node = function_pb2.FunctionDef.Node()
node.op = op.type
# pylint: disable=protected-access
if graph._is_function(op.type):
op_def = graph._get_function(op.type).signature
else:
op_def = op_def_registry.get_registered_ops()[op.type]
# pylint: enable=protected-access
attrs = _get_node_def_attr(op)
out_index = 0
for arg_def in op_def.output_arg:
if arg_def.number_attr:
dtype = arg_def.type or attrs[arg_def.type_attr].type
num = attrs[arg_def.number_attr].i
node.ret.append(_add_output_array(op, out_index, out_index + num, dtype,
func))
out_index += num
elif arg_def.type_list_attr:
dtype_lst = attrs[arg_def.type_list_attr].list.type
num = len(dtype_lst)
node.ret.append(_add_output_list(op, out_index, out_index + num,
dtype_lst, func))
out_index += num
else:
node.ret.append(_make_argname_from_tensor_name(op.outputs[
out_index].name))
out_index += 1
inp_index = 0
for arg_def in op_def.input_arg:
if arg_def.number_attr:
dtype = arg_def.type or attrs[arg_def.type_attr].type
num = attrs[arg_def.number_attr].i
node.arg.append(_add_input_array(op, inp_index, inp_index + num, dtype,
func))
inp_index += num
elif arg_def.type_list_attr:
num = len(attrs[arg_def.type_list_attr].list.type)
node.arg.extend([_make_argname_from_tensor_name(op.inputs[i].name)
for i in range(inp_index, inp_index + num)])
inp_index += num
else:
node.arg.append(_make_argname_from_tensor_name(op.inputs[inp_index].name))
inp_index += 1
node.dep.extend([_make_argname_from_tensor_name(x.name)
for x in op.control_inputs])
for k, v in _get_node_def_attr(op).iteritems():
node.attr[k].CopyFrom(v)
func.node.extend([node])
# pylint: disable=line-too-long
def graph_to_function_def(graph, name, inputs, outputs):
"""Returns `graph` as a `FunctionDef` protocol buffer.
This method creates a [`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protocol buffer that contains all the ops present in the graph. The
graph effectively becomes the body of the function.
The arguments `inputs` and `outputs` will be listed as the inputs
and outputs tensors of the function. They must be lists of
tensors present in the graph. The lists can optionally be empty.
The returned protocol buffer can be passed to the
[`Graph.add_function()`](#Graph.add_function) method of a
different graph to make it available there.
Args:
graph: GraphDef proto.
name: string. The name to use for the function.
inputs: List of tensors. Inputs to the function.
outputs: List of tensors. Outputs of the function.
Returns:
A FunctionDef protocol buffer.
"""
# pylint: enable=line-too-long
func = function_pb2.FunctionDef()
func.signature.name = name
func.signature.input_arg.extend([_tensor_to_argdef(graph.get_tensor_by_name(
i.name)) for i in inputs])
func.signature.output_arg.extend([_tensor_to_argdef(graph.get_tensor_by_name(
o.name)) for o in outputs])
func_arg_placeholders = set([i.name for i in inputs])
g = ops.get_default_graph()
for op in graph.get_operations():
tensor_name = op.values()[0].name
if tensor_name not in func_arg_placeholders:
_add_op_node(g, op, func)
return func
def call_function(func_def, *inputs, **kwargs):
"""Calls the function described by `func_def`.
This adds a `call` op to the default graph that calls the function described
by `func_def` with the tensors listed in `inputs` as arguments. It returns
the outputs of the call, which are one or more tensors.
`func_def` is a
[`FunctionDef`](
https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)
protcol buffer describing a
TensorFlow function. See [`define_function()`](#define_function) for an
easy way to create one from a Python function.
You can pass an optional keyword parameters `name=string` to name the
added operation.
`func_def` is automatically added to the function library of the graph if
needed.
Args:
func_def: A `FunctionDef` protocol buffer.
*inputs: A list of tensors
**kwargs: Optional keyword arguments. Can only contain 'name'.
Returns:
A list of tensors representing the outputs of the call to `func_def`.
Raises:
ValueError: if the arguments are invalid.
"""
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
func_name = func_def.signature.name
with ops.op_scope(inputs, name, func_name) as name:
if len(inputs) != len(func_def.signature.input_arg):
raise ValueError("Expected number of arguments: %d" %
len(func_def.signature.input_arg))
output_types = [dtypes.DType(x.type) for x in func_def.signature.output_arg]
# TODO(touts): Pass compute_shapes as "try if function exists"
g = ops.get_default_graph()
op = g.create_op(func_name,
list(inputs),
output_types,
name=name,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
return op.outputs[0]
else:
return tuple(op.outputs)
else:
return op
def define_function(func, input_types):
"""Creates a `FunctionDef` for a python function.
`func` is a Python function that receives zero or more tensors and returns at
least one tensor. It should add ops to the default graph the usual way by
calling TensorFlow functions such as `tf.constant()`, `tf.matmul()`, etc.
`input_types` is a dictionary of strings to `tf.Dtype` objects. Keys are
names arguments to `func`. The value indicate the type of tensor expected
by the function.
The returned `FunctionDef` protocol buffer is also added to the
default graph library. After it has been added you can add calls to
the function by passing it to `tf.call_function()`, together with a
list of tensors to use as inputs for the function.
Notes:
* `func` is called once, with `placeholder` tensors of the types specified in
`input_types` as arguments.
* Values returned by `func` must be tensors and they are recorded as being
the output of the function def.
* While `func` is a called, an empty graph is temporarily pushed as the
default graph. All ops added by `func` to that graph are part of the body
of the returned function def.
Example, but also see the [How To on functions](link_needed).
```python
# A function that receives two tensors x, y and returns their
# sum and difference.
def my_func(x, y):
return x + y, x - y
# Create a FunctionDef for 'my_func'. (This does not change the default
graph.)
my_func_def = tf.define_function(my_func, {'x': tf.float32, 'y': tf.float32})
# Build the graph, calling the function.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = tf.call_function(my_func_def, a, b, name='mycall')
```
Args:
func: a Python function.
input_types: dict. Keys are the names of the arguments of `func`, values
are their expected `tf.DType`.
Returns:
A FunctionDef protocol buffer.
Raises:
ValueError: if the arguments are invalid.
"""
# TODO(touts): Lift the limitation that func can only receive Tensor args.
if inspect.isfunction(func):
func_name = func.__name__
elif inspect.ismethod(func):
func_name = func.im_self.__name__ + "." + func.__name__
else:
raise ValueError("Argument must be a function")
argspec = inspect.getargspec(func)
if argspec.varargs or argspec.keywords or argspec.defaults:
raise ValueError("Only functions with plain arglists are supported.")
if inspect.isfunction(func):
if len(argspec.args) != len(input_types):
raise ValueError("The function must have the same number of arguments "
"as the number of specified input types.")
args = argspec.args
elif inspect.ismethod(func):
if len(argspec.args) != 1 + len(input_types):
raise ValueError(
"The class function must have the same number of arguments "
"as the number of specified input types.")
args = argspec.args[1:] # 1st argument is the "class" type.
# Create the func_def object.
temp_graph = ops.Graph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
# Arglist to call 'func'
kwargs = {}
for argname in args:
if argname not in input_types:
raise ValueError("Missing type for argument: " + argname)
argholder = array_ops.placeholder(input_types[argname], name=argname)
inputs.append(argholder)
kwargs[argname] = argholder
# Call func and gather the output tensors.
outputs = func(**kwargs)
if not outputs:
raise ValueError("Function must return at least one tensor")
# Convenience: if func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
# Build the FunctionDef
func_def = graph_to_function_def(temp_graph, func_name, inputs, outputs)
g = ops.get_default_graph()
g._add_function(func_def) # pylint: disable=protected-access
return func_def
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts to `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(x=tf.float32, y=tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the graph.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(x=tf.float32, y=tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.Constant([1.0])
b = tf.Constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
@@__init__
"""
def __init__(self, **input_types):
"""Create a `Defun` decorator.
Args:
**input_types: Dict mapping string with `tf.DType`
One key for each argument of the function to decorate.
"""
self._input_types = input_types
def __call__(self, f):
func_def = define_function(f, self._input_types)
return lambda *args, **kwargs: call_function(func_def, *args, **kwargs)
| apache-2.0 |
Chealion/yycbike | archive/weatherLoad.py | 1 | 6271 | #! /usr/bin/python
# :set tabstop=4 shiftwidth=4 expandtab
# Downoads Environment Canada data and sends the data to Graphite. Additionally logs the data to a file we can use to import later
import csv
import time
import graphitesend
import urllib2
from datetime import date, timedelta
import datetime
graphitesend.init(graphite_server='localhost',prefix='yycbike',system_name='')
metriclog = open('/home/ubuntu/devmetriclog.log', 'a')
# Watch out for timezones - this script fails to function past 5 PM MST.
yesterday = date.today() - timedelta(1)
year = yesterday.strftime('%Y')
month = yesterday.strftime('%m')
day = yesterday.strftime('%d')
#Installations
# URLs per ftp://ftp.tor.ec.gc.ca/Pub/Get_More_Data_Plus_de_donnees/Readme.txt
HOURLY_URL='http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=50430&Year=' + year + '&Month=' + month + '&Day=' + day + '&submit=Download+Data&timeframe=1'
DAILY_URL= 'http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=50430&Year=' + year + '&Month=' + month + '&Day=' + day + '&submit=Download+Data&timeframe=2'
## HOURLY
url = HOURLY_URL
print 'Loading Hourly Weather Data...'
response = urllib2.urlopen(url)
csv_data = response.read()
# Delete first 17 lines - up to and inlcuding header line
cleaned_data = '\n'.join(csv_data.split('\n')[17:])
# split into list, and use non unicode field names
csv_reader = csv.DictReader(cleaned_data.split('\n'), fieldnames=['Date', 'Year', 'Month', 'Day', 'Time', 'Quality', 'Temp', 'TempFlag', 'DewPoint', 'DewPointFlag', 'Humidity', 'HumFlag', 'WindDir', 'WindFlag', 'WindSpd', 'WindFlg', 'Visbility', 'VisFlag', 'Pressure', 'PressFlag', 'Humidex', 'HmdxFlag', 'WindChill', 'WindChillFlag', 'Weather'])
for row in csv_reader:
#Create timestamp
timestamp = time.mktime(datetime.datetime.strptime(row['Date'], "%Y-%m-%d %H:%M").timetuple())
yesterday_timestamp = float(yesterday.strftime('%s'))
#Ignore any data "newer" than yesterday. Data that doesn't exist yet.
if timestamp > yesterday_timestamp:
break
else:
timestamp = str(int(timestamp))
#print row
# Data Cleaning - Wind Chill or Humidex - merge
if row['Temp'] is None or row['Temp'] == '':
continue
if row['Humidex'] == '' and row['WindChill'] == '':
feelslike = row['Temp']
elif row['Humidex'] == '':
feelslike = row['WindChill']
else:
feelslike = row['Humidex']
if row['WindSpd'] == '':
row['WindSpd'] = 0
if row['WindDir'] == '':
row['WindDir'] = 0
metric_string = 'weather.hourly.temp ' + str(row['Temp']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.temp', str(row['Temp']), timestamp)
metric_string = 'weather.hourly.windspeed ' + str(row['WindSpd']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.windspeed', str(row['WindSpd']), timestamp)
metric_string = 'weather.hourly.winddir ' + str(row['WindDir']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.winddir', str(row['WindDir']), timestamp)
metric_string = 'weather.hourly.humidity ' + str(row['Humidity']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.humidity', str(row['Humidity']), timestamp)
metric_string = 'weather.hourly.feelslike ' + str(feelslike) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.hourly.feelslike', str(feelslike), timestamp)
## DAILY
url = DAILY_URL
print 'Loading Daily Weather Data...'
response = urllib2.urlopen(url)
csv_data = response.read()
# Delete first 26 lines - up to and including header line
cleaned_data = '\n'.join(csv_data.split('\n')[26:])
# split into list, and use non unicode field names
csv_reader = csv.DictReader(cleaned_data.split('\n'), fieldnames=['Date', 'Year', 'Month', 'Day', 'Quality', 'Max', 'MaxFlag', 'Min', 'MinFlag', 'Mean', 'MeanFlag', 'Heat1', 'Heat2', 'Heat3', 'Heat4', 'Rain', 'RainFlag', 'Snow', 'SnowFlag', 'TotalPrecip', 'PrecipFlag', 'SnowonGround', 'SnowFlag', 'Wind1', 'Wind2', 'Wind3', 'Wind4'])
for row in csv_reader:
#Create timestamp
timestamp = time.mktime(datetime.datetime.strptime(row['Date'], "%Y-%m-%d").timetuple())
yesterday_timestamp = float(yesterday.strftime('%s'))
#Ignore any data "newer" than yesterday. Data that doesn't exist yet.
if timestamp > yesterday_timestamp:
break
else:
timestamp = str(int(timestamp))
#print row
if row['Max'] is None or row['Max'] == '' or row['Min'] == '':
continue
metric_string = 'weather.daily.high ' + str(row['Max']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.high', str(row['Max']), timestamp)
metric_string = 'weather.daily.low ' + str(row['Min']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.low', str(row['Min']), timestamp)
metric_string = 'weather.daily.mean ' + str(row['Mean']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.mean', str(row['Mean']), timestamp)
# Data Cleaning
if row['TotalPrecip'] == '':
row['TotalPrecip'] = 0
metric_string = 'weather.daily.precip ' + str(row['TotalPrecip']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.precip', str(row['TotalPrecip']), timestamp)
# Data Cleaning
if row['SnowonGround'] == '':
row['SnowonGround'] = 0
metric_string = 'weather.daily.snowamt ' + str(row['SnowonGround']) + ' ' + timestamp
metriclog.write(metric_string + "\n")
graphitesend.send('weather.daily.snowamt', str(row['SnowonGround']), timestamp)
# OUTPUT FORMAT:
# <metric path> <metric value> <metric timestamp>
# yycbike.peacebridge.north.trips 5 123456789
metriclog.close()
print 'Done.'
| mit |
VirtusLab/ansible-modules-extras | system/kernel_blacklist.py | 153 | 3807 | #!/usr/bin/python
# encoding: utf-8 -*-
# (c) 2013, Matthias Vogelgesang <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
DOCUMENTATION = '''
---
module: kernel_blacklist
author: "Matthias Vogelgesang (@matze)"
version_added: 1.4
short_description: Blacklist kernel modules
description:
- Add or remove kernel modules from blacklist.
options:
name:
required: true
description:
- Name of kernel module to black- or whitelist.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present in the blacklist or absent.
blacklist_file:
required: false
description:
- If specified, use this blacklist file instead of
C(/etc/modprobe.d/blacklist-ansible.conf).
default: null
requirements: []
'''
EXAMPLES = '''
# Blacklist the nouveau driver module
- kernel_blacklist: name=nouveau state=present
'''
class Blacklist(object):
def __init__(self, module, filename):
if not os.path.exists(filename):
open(filename, 'a').close()
self.filename = filename
self.module = module
def get_pattern(self):
return '^blacklist\s*' + self.module + '$'
def readlines(self):
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
return lines
def module_listed(self):
lines = self.readlines()
pattern = self.get_pattern()
for line in lines:
stripped = line.strip()
if stripped.startswith('#'):
continue
if re.match(pattern, stripped):
return True
return False
def remove_module(self):
lines = self.readlines()
pattern = self.get_pattern()
f = open(self.filename, 'w')
for line in lines:
if not re.match(pattern, line.strip()):
f.write(line)
f.close()
def add_module(self):
f = open(self.filename, 'a')
f.write('blacklist %s\n' % self.module)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
blacklist_file=dict(required=False, default=None)
),
supports_check_mode=False,
)
args = dict(changed=False, failed=False,
name=module.params['name'], state=module.params['state'])
filename = '/etc/modprobe.d/blacklist-ansible.conf'
if module.params['blacklist_file']:
filename = module.params['blacklist_file']
blacklist = Blacklist(args['name'], filename)
if blacklist.module_listed():
if args['state'] == 'absent':
blacklist.remove_module()
args['changed'] = True
else:
if args['state'] == 'present':
blacklist.add_module()
args['changed'] = True
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
gmimano/commcaretest | corehq/apps/api/util.py | 2 | 1330 | from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext as _
from couchdbkit.exceptions import ResourceNotFound
def get_object_or_not_exist(cls, doc_id, domain, additional_doc_types=None):
"""
Given a Document class, id, and domain, get that object or raise
an ObjectDoesNotExist exception if it's not found, not the right
type, or doesn't belong to the domain.
"""
additional_doc_types = additional_doc_types or []
doc_type = getattr(cls, '_doc_type', cls.__name__)
additional_doc_types.append(doc_type)
try:
doc = cls.get(doc_id)
if doc and doc.domain == domain and doc.doc_type in additional_doc_types:
return doc
except ResourceNotFound:
pass # covered by the below
except AttributeError:
# there's a weird edge case if you reference a form with a case id
# that explodes on the "version" property. might as well swallow that
# too.
pass
raise object_does_not_exist(doc_type, doc_id)
def object_does_not_exist(doc_type, doc_id):
"""
Builds a 404 error message with standard, translated, verbiage
"""
return ObjectDoesNotExist(_("Could not find %(doc_type)s with id %(id)s") % \
{"doc_type": doc_type, "id": doc_id})
| bsd-3-clause |
Ziqi-Li/bknqgis | bokeh/bokeh/server/server.py | 1 | 10467 | ''' Provides a Server which instantiates Application instances as clients connect
'''
from __future__ import absolute_import, print_function
import atexit
import logging
log = logging.getLogger(__name__)
import signal
import tornado
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado import netutil
from .tornado import BokehTornado
from bokeh import __version__
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
def _create_hosts_whitelist(host_list, port):
if not host_list:
return ['localhost:' + str(port)]
hosts = []
for host in host_list:
if '*' in host:
log.warning(
"Host wildcard %r will allow websocket connections originating "
"from multiple (or possibly all) hostnames or IPs. Use non-wildcard "
"values to restrict access explicitly", host)
if host == '*':
# do not append the :80 port suffix in that case: any port is
# accepted
hosts.append(host)
continue
parts = host.split(':')
if len(parts) == 1:
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host+":80")
elif len(parts) == 2:
try:
int(parts[1])
except ValueError:
raise ValueError("Invalid port in host value: %s" % host)
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host)
else:
raise ValueError("Invalid host value: %s" % host)
return hosts
def _bind_sockets(address, port):
'''Like tornado.netutil.bind_sockets(), but also returns the
assigned port number.
'''
ss = netutil.bind_sockets(port=port or 0, address=address)
assert len(ss)
ports = {s.getsockname()[1] for s in ss}
assert len(ports) == 1, "Multiple ports assigned??"
actual_port = ports.pop()
if port:
assert actual_port == port
return ss, actual_port
class Server(object):
''' A Server which creates a new Session for each connection, using an Application to initialize each Session.
Args:
applications (dict of str: bokeh.application.Application) or bokeh.application.Application:
mapping from URL paths to Application instances, or a single Application to put at the root URL
The Application is a factory for Document, with a new Document initialized for each Session.
Each application should be identified by a path meant to go in a URL, like "/" or "/foo"
Kwargs:
num_procs (str):
Number of worker processes for an app. Default to one. Using 0 will autodetect number of cores
tornado_server_kwargs (dict):
Additional arguments passed to tornado.httpserver.HTTPServer. E.g. max_buffer_size to
specify the maximum upload size. More details can be found at:
http://www.tornadoweb.org/en/stable/httpserver.html#http-server
'''
def __init__(self, applications, io_loop=None, tornado_server_kwargs=None, **kwargs):
log.info("Starting Bokeh server version %s (running on Tornado %s)" % (__version__, tornado.version))
if isinstance(applications, Application):
self._applications = { '/' : applications }
else:
self._applications = applications
tornado_kwargs = { key: kwargs[key] for key in ['extra_patterns',
'secret_key',
'sign_sessions',
'generate_session_ids',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
]
if key in kwargs }
prefix = kwargs.get('prefix')
if prefix is None:
prefix = ""
prefix = prefix.strip("/")
if prefix:
prefix = "/" + prefix
self._prefix = prefix
self._started = False
self._stopped = False
port = kwargs.get('port', DEFAULT_SERVER_PORT)
self._address = kwargs.get('address') or None
if tornado_server_kwargs is None:
tornado_server_kwargs = {}
tornado_server_kwargs.setdefault('xheaders', kwargs.get('use_xheaders', False))
self._num_procs = kwargs.get('num_procs', 1)
if self._num_procs != 1:
assert all(app.safe_to_fork for app in self._applications.values()), (
'User code has ran before attempting to run multiple '
'processes. This is considered an unsafe operation.')
sockets, self._port = _bind_sockets(self._address, port)
try:
tornado_kwargs['extra_websocket_origins'] = _create_hosts_whitelist(kwargs.get('allow_websocket_origin'), self._port)
tornado_kwargs['use_index'] = kwargs.get('use_index', True)
tornado_kwargs['redirect_root'] = kwargs.get('redirect_root', True)
self._tornado = BokehTornado(self._applications, self.prefix, **tornado_kwargs)
self._http = HTTPServer(self._tornado, **tornado_server_kwargs)
self._http.start(self._num_procs)
self._http.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only instantiate the IO loop after HTTPServer.start() was
# called because of `num_procs`, see issue #5524
if io_loop is None:
io_loop = IOLoop.current()
self._loop = io_loop
self._tornado.initialize(io_loop=io_loop, **tornado_kwargs)
@property
def port(self):
'''The actual port number the server is listening on for HTTP
requests.
'''
return self._port
@property
def address(self):
'''The address the server is listening on for HTTP requests
(may be empty or None).
'''
return self._address
@property
def prefix(self):
return self._prefix
@property
def io_loop(self):
return self._loop
def start(self):
''' Start the Bokeh Server and its background tasks.
Notes:
This method does not block and does not affect the state of
the Tornado I/O loop. You must start and stop the loop yourself.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
Args:
fast (boolean): whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
self._http.stop()
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
def unlisten(self):
'''Stop listening on ports (Server will no longer be usable after calling this)
Returns:
None
'''
self._http.close_all_connections()
self._http.stop()
def get_session(self, app_path, session_id):
'''Gets a session by name (session must already exist)'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path=None):
'''Gets all live sessions for an application.'''
if app_path is not None:
return self._tornado.get_sessions(app_path)
all_sessions = []
for path in self._tornado.app_paths:
all_sessions += self._tornado.get_sessions(path)
return all_sessions
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
Useful for testing server applications on your local desktop but
should not call when running bokeh-server on an actual server.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
address_string = 'localhost'
if self.address is not None and self.address != '':
address_string = self.address
url = "http://%s:%d%s%s" % (address_string, self.port, self.prefix, app_path)
from bokeh.util.browser import view
view(url, browser=browser, new=new)
| gpl-2.0 |
raedwulf/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
uwdata/termite-data-server | web2py/applications-original/admin/controllers/mercurial.py | 34 | 2545 | from gluon.fileutils import read_file, write_file
if DEMO_MODE or MULTI_USER_MODE:
session.flash = T('disabled in demo mode')
redirect(URL('default', 'site'))
if not have_mercurial:
session.flash = T("Sorry, could not find mercurial installed")
redirect(URL('default', 'design', args=request.args(0)))
_hgignore_content = """\
syntax: glob
*~
*.pyc
*.pyo
*.bak
*.bak2
cache/*
private/*
uploads/*
databases/*
sessions/*
errors/*
"""
def hg_repo(path):
import os
uio = ui.ui()
uio.quiet = True
if not os.environ.get('HGUSER') and not uio.config("ui", "username"):
os.environ['HGUSER'] = 'web2py@localhost'
try:
repo = hg.repository(ui=uio, path=path)
except:
repo = hg.repository(ui=uio, path=path, create=True)
hgignore = os.path.join(path, '.hgignore')
if not os.path.exists(hgignore):
write_file(hgignore, _hgignore_content)
return repo
def commit():
app = request.args(0)
path = apath(app, r=request)
repo = hg_repo(path)
form = FORM(T('Comment:'), INPUT(_name='comment', requires=IS_NOT_EMPTY()),
INPUT(_type='submit', _value=T('Commit')))
if form.accepts(request.vars, session):
oldid = repo[repo.lookup('.')]
addremove(repo)
repo.commit(text=form.vars.comment)
if repo[repo.lookup('.')] == oldid:
response.flash = T('no changes')
try:
files = TABLE(*[TR(file) for file in repo[repo.lookup('.')].files()])
changes = TABLE(TR(TH('revision'), TH('description')))
for change in repo.changelog:
ctx = repo.changectx(change)
revision, description = ctx.rev(), ctx.description()
changes.append(TR(A(revision, _href=URL('revision',
args=(app, revision))),
description))
except:
files = []
changes = []
return dict(form=form, files=files, changes=changes, repo=repo)
def revision():
app = request.args(0)
path = apath(app, r=request)
repo = hg_repo(path)
revision = request.args(1)
ctx = repo.changectx(revision)
form = FORM(INPUT(_type='submit', _value=T('Revert')))
if form.accepts(request.vars):
hg.update(repo, revision)
session.flash = T("reverted to revision %s") % ctx.rev()
redirect(URL('default', 'design', args=app))
return dict(
files=ctx.files(),
rev=str(ctx.rev()),
desc=ctx.description(),
form=form
)
| bsd-3-clause |
nikolasjansen/nija | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/_mapping.py | 189 | 38484 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping defintions. This file is generated by itself. Everytime
you change something on a builtin lexer defintion, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
print module_name
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them, that should make the diff files for svn smaller
found_lexers.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'wb')
f.write(header)
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
f.write(footer)
f.close()
| mit |
JamesDickenson/aima-python | submissions/Dickenson/vacuum2Runner.py | 18 | 6345 | import agents as ag
import envgui as gui
# change this line ONLY to refer to your project
import submissions.Dickenson.vacuum2 as v2
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt,
# ReflexVacuumAgent, RandomVacuumAgent,
# TableDrivenVacuumAgent, ModelBasedVacuumAgent
]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
# # Launch a Text-Based Environment
# print('Two Cells, Agent on Left:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Right:')
# v = VacuumEnvironment(4, 3)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (2, 1))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (2, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Top:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 1))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
#
# # Repeat, but put Agent on the Right
# print('Two Cells, Agent on Bottom:')
# v = VacuumEnvironment(3, 4)
# v.add_thing(Dirt(), (1, 1))
# v.add_thing(Dirt(), (1, 2))
# a = v2.HW2Agent()
# a = ag.TraceAgent(a)
# v.add_thing(a, (1, 2))
# t = gui.EnvTUI(v)
# t.mapImageNames({
# ag.Wall: '#',
# Dirt: '@',
# ag.Agent: 'V',
# })
# t.step(0)
# t.list_things(Dirt)
# t.step(4)
# if len(t.env.get_things(Dirt)) > 0:
# t.list_things(Dirt)
# else:
# print('All clean!')
#
# # Check to continue
# if input('Do you want to continue [y/N]? ') != 'y':
# exit(0)
# else:
# print('----------------------------------------')
def testVacuum(label, w=4, h=3,
dloc=[(1,1),(2,1)],
vloc=(1,1),
limit=6):
print(label)
v = VacuumEnvironment(w, h)
for loc in dloc:
v.add_thing(Dirt(), loc)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
v.add_thing(a, vloc)
t = gui.EnvTUI(v)
t.mapImageNames({
ag.Wall: '#',
Dirt: '@',
ag.Agent: 'V',
})
t.step(0)
t.list_things(Dirt)
t.step(limit)
if len(t.env.get_things(Dirt)) > 0:
t.list_things(Dirt)
else:
print('All clean!')
# Check to continue
if input('Do you want to continue [Y/n]? ') == 'n':
exit(0)
else:
print('----------------------------------------')
testVacuum('Two Cells, Agent on Left:')
testVacuum('Two Cells, Agent on Right:', vloc=(2,1))
testVacuum('Two Cells, Agent on Top:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,1) )
testVacuum('Two Cells, Agent on Bottom:', w=3, h=4,
dloc=[(1,1), (1,2)], vloc=(1,2) )
testVacuum('Five Cells, Agent on Left:', w=7, h=3,
dloc=[(2,1), (4,1)], vloc=(1,1), limit=12)
testVacuum('Five Cells, Agent near Right:', w=7, h=3,
dloc=[(2,1), (3,1)], vloc=(4,1), limit=12)
testVacuum('Five Cells, Agent on Top:', w=3, h=7,
dloc=[(1,2), (1,4)], vloc=(1,1), limit=12 )
testVacuum('Five Cells, Agent Near Bottom:', w=3, h=7,
dloc=[(1,2), (1,3)], vloc=(1,4), limit=12 )
testVacuum('5x4 Grid, Agent in Top Left:', w=7, h=6,
dloc=[(1,4), (2,2), (3, 3), (4,1), (5,2)],
vloc=(1,1), limit=46 )
testVacuum('5x4 Grid, Agent near Bottom Right:', w=7, h=6,
dloc=[(1,3), (2,2), (3, 4), (4,1), (5,2)],
vloc=(4, 3), limit=46 )
v = VacuumEnvironment(6, 3)
a = v2.HW2Agent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'images/wall.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop() | mit |
jelly/calibre | src/calibre/db/cli/cmd_catalog.py | 2 | 3866 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format
from calibre.db.cli import integers_from_string
readonly = True
version = 0 # change this if you change signature of implementation()
needs_srv_ctx = True
no_remote = True
def implementation(db, notify_changes, ctx):
raise NotImplementedError()
def option_parser(get_parser, args): # {{{
def add_plugin_parser_options(fmt, parser):
# Fetch the extension-specific CLI options from the plugin
# library.catalogs.<format>.py
plugin = plugin_for_catalog_format(fmt)
p = parser.add_option_group(_('{} OPTIONS').format(fmt.upper()))
for option in plugin.cli_options:
if option.action:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
action=option.action,
help=option.help
)
else:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
help=option.help
)
# Entry point
parser = get_parser(
_(
'''\
%prog catalog /path/to/destination.(csv|epub|mobi|xml...) [options]
Export a catalog in format specified by path/to/destination extension.
Options control how entries are displayed in the generated catalog output.
Note that different catalog formats support different sets of options.
'''
)
)
# Add options common to all catalog plugins
parser.add_option(
'-i',
'--ids',
default=None,
dest='ids',
help=_(
"Comma-separated list of database IDs to catalog.\n"
"If declared, --search is ignored.\n"
"Default: all"
)
)
parser.add_option(
'-s',
'--search',
default=None,
dest='search_text',
help=_(
"Filter the results by the search query. "
"For the format of the search query, please see "
"the search-related documentation in the User Manual.\n"
"Default: no filtering"
)
)
parser.add_option(
'-v',
'--verbose',
default=False,
action='store_true',
dest='verbose',
help=_('Show detailed output information. Useful for debugging')
)
fmt = 'epub'
if args and '.' in args[0]:
fmt = args[0].rpartition('.')[-1].lower()
if fmt not in available_catalog_formats():
fmt = 'epub'
# Add options specific to fmt plugin
add_plugin_parser_options(fmt, parser)
return parser
# }}}
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('You must specify a catalog output file'))
if opts.ids:
opts.ids = list(integers_from_string(opts.ids))
fmt = args[0].rpartition('.')[-1]
if fmt not in available_catalog_formats():
raise SystemExit(
_('Cannot generate a catalog in the {} format').format(fmt.upper())
)
# No support for connected device in CLI environment
# Parallel initialization in calibre.gui2.tools:generate_catalog()
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
dest = os.path.abspath(os.path.expanduser(args[0]))
plugin = plugin_for_catalog_format(fmt)
with plugin:
plugin.run(dest, opts, dbctx.db)
return 0
| gpl-3.0 |
skiselev/upm | examples/python/aeotecsdg2.py | 7 | 3146 | #!/usr/bin/python
# Author: Jon Trulson <[email protected]>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ozw as sensorObj
def main():
# This function lets you run code on exit
def exitHandler():
print("Turning switch off and sleeping for 5 seconds...")
sensor.off()
time.sleep(5)
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
defaultDev = "/dev/ttyACM0"
if (len(sys.argv) > 1):
defaultDev = sys.argv[1]
print("Using device", defaultDev)
# Instantiate an Aeotec Smart Dimmer Gen2 instance, on device node
# 9. You will almost certainly need to change this to reflect your
# own network. Use the ozwdump example to see what nodes are
# available.
sensor = sensorObj.AeotecSDG2(9)
# The first thing to do is create options, then lock them when done.
sensor.optionsCreate()
sensor.optionsLock()
# Next, initialize it.
print("Initializing, this may take awhile depending on your ZWave network")
sensor.init(defaultDev)
print("Initialization complete")
# turn light on
print("Turning switch on, then sleeping for 5 secs")
sensor.on();
time.sleep(5);
print("Querying data...")
dim = False;
while (True):
# put on a light show...
if (dim):
sensor.setLevel(25)
else:
sensor.on()
dim = not dim;
sensor.update()
print("Current Level:", end=' ')
print(sensor.getLevel())
print("Volts:", end=' ')
print(sensor.getVolts(), end=' ')
print("volts")
print("Energy Consumption:", end=' ')
print(sensor.getEnergy(), end=' ')
print("kWh")
print("Watts:", end=' ')
print(sensor.getWatts())
print("Current:", end=' ')
print(sensor.getCurrent(), end=' ')
print("amps")
print()
time.sleep(5)
if __name__ == '__main__':
main()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.