repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
smartboyhw/ubuntu-saucy-rt | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
Jeff-Tian/mybnb | Python27/Tools/Scripts/diff.py | 2 | 2074 | #!/usr/bin/env python
""" Command line interface to difflib.py providing diffs in four formats:
* ndiff: lists every line and highlights interline changes.
* context: highlights clusters of changes in a before/after format.
* unified: highlights clusters of changes in an inline format.
* html: generates side by side comparison with change highlights.
"""
import sys, os, time, difflib, optparse
def main():
usage = "usage: %prog [options] fromfile tofile"
parser = optparse.OptionParser(usage)
parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if len(args) != 2:
parser.error("need to specify both a fromfile and tofile")
n = options.lines
fromfile, tofile = args
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
if options.u:
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
elif options.n:
diff = difflib.ndiff(fromlines, tolines)
elif options.m:
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
else:
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
sys.stdout.writelines(diff)
if __name__ == '__main__':
main()
| apache-2.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/numpy/_globals.py | 63 | 1851 | """
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
from __future__ import division, absolute_import, print_function
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
]
# Disallow reloading this module so as to preserve the identities of the
# classes defined here.
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
pass
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
pass
class _NoValue:
"""Special keyword value.
This class may be used as the default value assigned to a deprecated
keyword in order to check if it has been given a user defined value.
"""
pass
| gpl-3.0 |
Azure/azure-quickstart-templates | application-workloads/cloudera/cloudera-director-on-centos/scripts/marketing.py | 103 | 3151 | #! /usr/bin/env python
# Copyright (c) 2016 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Simple script that shows how to use the Cloudera Director API to initialize
# the environment and instance templates
import urllib
import urllib2
from optparse import OptionParser
import sys
import logging
#loging starts
logging.basicConfig(filename='/var/log/marketing.log', level=logging.DEBUG)
def parse_options():
parser = OptionParser()
parser.add_option('-e', '--email-address', dest='email', type="string", help='Set email address')
parser.add_option('-b', '--business-phone', dest='phone', type="string", help='Set phone')
parser.add_option('-f', '--first-name', dest='fname', type="string", help='Set first name')
parser.add_option('-l', '--last-name', dest='lname', type="string", help='Set last name')
parser.add_option('-r', '--job-role', dest='jobrole', type="string", help='Set job role')
parser.add_option('-j', '--job-function', dest='jobfunction', type="string", help='Set job function')
parser.add_option('-c', '--company', dest='company', type="string", help='Set company')
(options, args) = parser.parse_args()
if (options.email is None or options.phone is None or options.fname is None or options.lname is None or
options.jobrole is None or options.jobfunction is None or options.company is None):
logging.error("required parameter cannot be empty")
sys.exit(1)
return options
def postEulaInfo(firstName, lastName, emailAddress, company,jobRole, jobFunction, businessPhone):
elqFormName='Cloudera_Director_on_Azure_EULA'
elqSiteID='1465054361'
cid='701340000018RQV'
url = 'https://s1465054361.t.eloqua.com/e/f2'
data = urllib.urlencode({'elqFormName': elqFormName,
'elqSiteID': elqSiteID,
'cid': cid,
'firstName': firstName,
'lastName': lastName,
'company': company,
'emailAddress': emailAddress,
'jobRole': jobRole,
'jobFunction': jobFunction,
'businessPhone': businessPhone
})
results = urllib2.urlopen(url, data)
logging.info(results.read())
def main():
# Parse user options
logging.info("parse_options")
options = parse_options()
postEulaInfo(options.fname, options.lname, options.email, options.company, options.jobrole, options.jobfunction,
options.phone)
if __name__ == "__main__":
main()
| mit |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/backcall/backcall.py | 4 | 3752 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 18:17:15 2014
@author: takluyver
"""
import sys
PY3 = (sys.version_info[0] >= 3)
try:
from inspect import signature, Parameter # Python >= 3.3
except ImportError:
from ._signatures import signature, Parameter
if PY3:
from functools import wraps
else:
from functools import wraps as _wraps
def wraps(f):
def dec(func):
_wraps(f)(func)
func.__wrapped__ = f
return func
return dec
def callback_prototype(prototype):
"""Decorator to process a callback prototype.
A callback prototype is a function whose signature includes all the values
that will be passed by the callback API in question.
The original function will be returned, with a ``prototype.adapt`` attribute
which can be used to prepare third party callbacks.
"""
protosig = signature(prototype)
positional, keyword = [], []
for name, param in protosig.parameters.items():
if param.kind in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
raise TypeError("*args/**kwargs not supported in prototypes")
if (param.default is not Parameter.empty) \
or (param.kind == Parameter.KEYWORD_ONLY):
keyword.append(name)
else:
positional.append(name)
kwargs = dict.fromkeys(keyword)
def adapt(callback):
"""Introspect and prepare a third party callback."""
sig = signature(callback)
try:
# XXX: callback can have extra optional parameters - OK?
sig.bind(*positional, **kwargs)
return callback
except TypeError:
pass
# Match up arguments
unmatched_pos = positional[:]
unmatched_kw = kwargs.copy()
unrecognised = []
# TODO: unrecognised parameters with default values - OK?
for name, param in sig.parameters.items():
# print(name, param.kind) #DBG
if param.kind == Parameter.POSITIONAL_ONLY:
if len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.POSITIONAL_OR_KEYWORD:
if (param.default is not Parameter.empty) and (name in unmatched_kw):
unmatched_kw.pop(name)
elif len(unmatched_pos) > 0:
unmatched_pos.pop(0)
else:
unrecognised.append(name)
elif param.kind == Parameter.VAR_POSITIONAL:
unmatched_pos = []
elif param.kind == Parameter.KEYWORD_ONLY:
if name in unmatched_kw:
unmatched_kw.pop(name)
else:
unrecognised.append(name)
else: # VAR_KEYWORD
unmatched_kw = {}
# print(unmatched_pos, unmatched_kw, unrecognised) #DBG
if unrecognised:
raise TypeError("Function {!r} had unmatched arguments: {}".format(callback, unrecognised))
n_positional = len(positional) - len(unmatched_pos)
@wraps(callback)
def adapted(*args, **kwargs):
"""Wrapper for third party callbacks that discards excess arguments"""
# print(args, kwargs)
args = args[:n_positional]
for name in unmatched_kw:
# XXX: Could name not be in kwargs?
kwargs.pop(name)
# print(args, kwargs, unmatched_pos, cut_positional, unmatched_kw)
return callback(*args, **kwargs)
return adapted
prototype.adapt = adapt
return prototype | mit |
umraniyeacil/umraniyeacil.github.io | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
kbr/fritzconnection | fritzconnection/cli/fritzhosts.py | 1 | 1157 | """
fritzhosts.py
Module to inspect the FritzBox API for registered hosts.
CLI interface.
This module is part of the FritzConnection package.
https://github.com/kbr/fritzconnection
License: MIT (https://opensource.org/licenses/MIT)
Author: Klaus Bremer
"""
from ..lib.fritzhosts import FritzHosts
from . utils import get_cli_arguments, get_instance, print_header
def print_status(fh):
print('FritzHosts:')
print('List of registered hosts:\n')
print('{:>3}: {:<16} {:<28} {:<17} {}\n'.format(
'n', 'ip', 'name', 'mac', 'status'))
hosts = fh.get_hosts_info()
for index, host in enumerate(hosts, start=1):
status = 'active' if host['status'] else '-'
ip = host['ip'] if host['ip'] else '-'
mac = host['mac'] if host['mac'] else '-'
hn = host['name']
print(f'{index:>3}: {ip:<16} {hn:<28} {mac:<17} {status}')
print('\n')
def main():
args = get_cli_arguments()
if not args.password:
print('Exit: password required.')
else:
fh = get_instance(FritzHosts, args)
print_header(fh)
print_status(fh)
if __name__ == '__main__':
main()
| mit |
vikatory/kbengine | kbe/res/scripts/common/Lib/concurrent/futures/_base.py | 88 | 19638 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan ([email protected])'
import collections
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
yield from finished
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
| lgpl-3.0 |
walterreade/scikit-learn | sklearn/neighbors/tests/test_kde.py | 80 | 5560 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
bkimmig/clumpy | clumpy/functions_em.py | 1 | 6506 | import numpy as np
from .py3 import *
# Monotonic non increasing function
def pav(y):
"""
PAV uses the pair adjacent violators method to produce a monotonic
smoothing of y
translated from matlab by Sean Collins (2006) as part of the EMAP toolbox
Parameters
----------
y: list
Returns
-------
v: list
"""
y = np.asarray(y)
assert y.ndim == 1
n_samples = len(y)
v = y.copy()
lvls = np.arange(n_samples)
lvlsets = np.c_[lvls, lvls]
flag = 1
while flag:
deriv = np.diff(v)
if np.all(deriv <= 0):
break
viol = np.where(deriv > 0)[0]
start = lvlsets[viol[0], 0]
last = lvlsets[viol[0] + 1, 1]
s = 0
n = last - start + 1
for i in range(start, last + 1):
s += v[i]
val = s / n
for i in range(start, last + 1):
v[i] = val
lvlsets[i, 0] = start
lvlsets[i, 1] = last
return v
###############################################################################
# analytical solutions to the equations in Walker 2009
# use these in the EM algorithm to construct the method
def mean(sig, p_m, vec, err_vec, model=None):
"""
Walker 2009 equation 13
sig is passed in differently every time. Used to iterate and find the mean
value of the data.
Parameters
----------
sig: float
p_m: np.array
vec: np.array
err_vec: np.array
model: np.array
Returns
-------
mean: float
"""
if model is None:
model = np.ones(len(vec))
divisor = (1. + (err_vec**2/(sig*model)**2))
numerator = (p_m*vec)/divisor
denominator = (p_m)/divisor
return np.sum(numerator)/np.sum(denominator)
def variance(mean, sig, p_m, vec, err_vec, model=None):
"""
Walker 2009 equation 14
mean and sig are passed in differently every time.
Parameters
----------
mean: float
sig: float
p_m: np.array
vec: np.array
err_vec: np.array
model: np.array
Returns
-------
variance: float
"""
if model is None:
model = np.ones(len(vec))
divisor = (1.0 + (err_vec**2/(sig*model)**2))
numerator = (p_m*(vec - mean)**2)/divisor**2
denominator = (p_m*model**2)/divisor
return np.sum(numerator)/np.sum(denominator)
def mean_non(sig, p_m, vec, err_vec, model=None):
"""
Walker 2009 equation 13
sig is passed in differently every time. Used to iterate and find the mean
value of the data.
Parameters
----------
sig: float
p_m: np.array
vec: np.array
err_vec: np.array
model: np.array
Returns
-------
mean: float
"""
if model is None:
model = np.ones(len(vec))
divisor = (1. + (err_vec**2/sig**2))
numerator = ((1.0 - p_m)*vec)/divisor
denominator = (1.0 - p_m)/divisor
return np.sum(numerator)/np.sum(denominator)
def variance_non(mean, sig, p_m, vec, err_vec, model=None):
"""
Walker 2009 equation 14
mean and sig are passed in differently every time.
Parameters
----------
mean: float
sig: float
p_m: np.array
vec: np.array
err_vec: np.array
model: np.array
Returns
-------
variance: float
"""
if model is None:
model = np.ones(len(vec))
divisor = (1.0 + (err_vec**2/(sig*model)**2))
numerator = ((1.0 - p_m)*(vec - mean)**2)/divisor**2
denominator = ((1.0 - p_m)*model**2)/divisor
return np.sum(numerator)/np.sum(denominator)
###############################################################################
# probability distributions 1d
def p_normalized(sig, mean, vec, err_vec, model=None):
"""
Walker 2009 equation 9
vbar and sig0 are passed in differently every time.
Parameters
----------
sig: float
mean: float
vec: np.array
err_vec: np.array
model: np.array
Returns
-------
membership: np.array
"""
if model is None:
model = np.ones(len(vec))
two_pi = np.pi*2.
v_sig = ((sig*model)**2 + err_vec**2)
norm = 1.0/(np.sqrt(two_pi*v_sig))
v_ = ((vec - mean)**2/((sig*model)**2 + err_vec**2))
expon = np.exp(-0.5*(v_))
return norm*expon
def _p_contamination(vec_i, contamination_model):
"""
Walker 2009 equation 7
Parameters
----------
vec_i: float
Returns
-------
contamination_probaility: float
"""
sig_model = 20.0 # 20 for your paper
n_model = len(contamination_model)
norm = 1.0/np.sqrt(2.*np.pi*sig_model**2)
expon = np.exp((-0.5*(contamination_model - vec_i)**2)/sig_model**2)
over_N = (1.0/n_model)
return over_N*np.sum(norm*expon)
def p_contamination_non(vec, contamination_model):
"""
Walker 2009 equation 10
Parameters
----------
vec: np.array
contamination_model: np.array
Returns
-------
non_member_probabilities: np.array
"""
p_model = []
for i in xrange(len(vec)):
P_ = _p_contamination(vec[i], contamination_model)
p_model.append(P_)
return np.array(p_model)
###############################################################################
# likelihood equations
def normalized_probs(p_mem, p_non, p_a):
"""
Walker 2009 equation 11
Parameters
----------
p_mem: np.array
p_non: np.array
p_a: np.array
Returns
-------
norm_probs: np.array
"""
p_m = (p_mem*p_a)/(p_mem*p_a + p_non*(1.0 - p_a))
return p_m
def neg_log_likelihood(p_m, p_a, p_mem, p_non):
"""
negative log likelihood function
Parameters
----------
p_m: np.array
p_a: np.array
p_mem: np.array
p_non: np.array
Returns
-------
neg_log_like: float
"""
mem = p_mem*p_a
non = p_non*(1.0-p_a)
log_like_term1 = np.sum(p_m*np.log(np.where(mem != 0, mem, 1)))
log_like_term2 = np.sum((1.0 - p_m)*np.log(np.where(non != 0, non, 1)))
return -(log_like_term1 + log_like_term2)
def log_likelihood(p_m, p_a, p_mem, p_non):
"""
log likelihood function
Parameters
----------
p_m: np.array
p_a: np.array
p_mem: np.array
p_non: np.array
Returns
-------
log_like: float
"""
mem = p_mem*p_a
non = p_non*(1.0-p_a)
log_like_term1 = np.sum(p_m*np.log(np.where(mem != 0, mem, 1)))
log_like_term2 = np.sum((1.0-p_m)*np.log(np.where(non != 0, non, 1)))
return log_like_term1 + log_like_term2
| mit |
twitter/pants | tests/python/pants_test/init/repro_mixin.py | 1 | 1600 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from builtins import object, open
from pants.util.dirutil import safe_mkdir_for
class ReproMixin(object):
""" Additional helper methods for use in Repro tests"""
def add_file(self, root, path, content):
"""Add a file with specified contents
:param str root: Root directory for path.
:param str path: Path relative to root.
:param str content: Content to write to file.
"""
fullpath = os.path.join(root, path)
safe_mkdir_for(fullpath)
with open(fullpath, 'w') as outfile:
outfile.write(content)
def assert_not_exists(self, root, path):
"""Assert a file at relpath doesn't exist
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertFalse(os.path.exists(fullpath))
def assert_file(self, root, path, expected_content=None):
""" Assert that a file exists with the content specified
:param str root: Root directory of path.
:param str path: Path relative to tar.gz.
:param str expected_content: file contents.
:return: bool
"""
fullpath = os.path.join(root, path)
self.assertTrue(os.path.isfile(fullpath))
if expected_content:
with open(fullpath, 'r') as infile:
content = infile.read()
self.assertEqual(expected_content, content)
| apache-2.0 |
NetApp/cinder | cinder/volume/drivers/windows/windows.py | 6 | 12570 | # Copyright 2012 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Windows Server 2012
This driver requires ISCSI target role installed
"""
import contextlib
import os
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from oslo_utils import uuidutils
from cinder.image import image_utils
from cinder.volume import driver
from cinder.volume import utils
LOG = logging.getLogger(__name__)
windows_opts = [
cfg.StrOpt('windows_iscsi_lun_path',
default='C:\iSCSIVirtualDisks',
help='Path to store VHD backed volumes'),
]
CONF = cfg.CONF
CONF.register_opts(windows_opts)
class WindowsDriver(driver.ISCSIDriver):
"""Executes volume driver commands on Windows Storage server."""
VERSION = '1.0.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Microsoft_iSCSI_CI"
def __init__(self, *args, **kwargs):
super(WindowsDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(windows_opts)
self._vhdutils = utilsfactory.get_vhdutils()
self._tgt_utils = utilsfactory.get_iscsi_target_utils()
self._hostutils = utilsfactory.get_hostutils()
def do_setup(self, context):
"""Setup the Windows Volume driver.
Called one time by the manager after the driver is loaded.
Validate the flags we care about
"""
fileutils.ensure_tree(self.configuration.windows_iscsi_lun_path)
fileutils.ensure_tree(CONF.image_conversion_dir)
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self._tgt_utils.get_portal_locations(available_only=True,
fail_if_none_found=True)
def _get_host_information(self, volume):
"""Getting the portal and port information."""
# TODO(lpetrut): properly handle multiple existing portals, also
# use the iSCSI traffic addresses config options.
target_name = self._get_target_name(volume)
available_portal_location = self._tgt_utils.get_portal_locations()[0]
properties = self._tgt_utils.get_target_information(target_name)
# Note(lpetrut): the WT_Host CHAPSecret field cannot be accessed
# for security reasons.
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
properties['target_discovered'] = False
properties['target_portal'] = available_portal_location
properties['target_lun'] = 0
properties['volume_id'] = volume['id']
return properties
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
initiator_name = connector['initiator']
target_name = volume['provider_location']
self._tgt_utils.associate_initiator_with_iscsi_target(initiator_name,
target_name)
properties = self._get_host_information(volume)
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
target_name = volume['provider_location']
self._tgt_utils.deassociate_initiator(initiator_name, target_name)
def create_volume(self, volume):
"""Driver entry point for creating a new volume."""
vhd_path = self.local_path(volume)
vol_name = volume['name']
vol_size_mb = volume['size'] * 1024
self._tgt_utils.create_wt_disk(vhd_path, vol_name,
size_mb=vol_size_mb)
def local_path(self, volume, disk_format=None):
base_vhd_folder = self.configuration.windows_iscsi_lun_path
if not disk_format:
disk_format = self._tgt_utils.get_supported_disk_format()
disk_fname = "%s.%s" % (volume['name'], disk_format)
return os.path.join(base_vhd_folder, disk_fname)
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
vol_name = volume['name']
vhd_path = self.local_path(volume)
self._tgt_utils.remove_wt_disk(vol_name)
fileutils.delete_if_exists(vhd_path)
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot."""
# Getting WT_Snapshot class
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
self._tgt_utils.create_snapshot(vol_name, snapshot_name)
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for exporting snapshots as volumes."""
snapshot_name = snapshot['name']
vol_name = volume['name']
vhd_path = self.local_path(volume)
self._tgt_utils.export_snapshot(snapshot_name, vhd_path)
self._tgt_utils.import_wt_disk(vhd_path, vol_name)
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
snapshot_name = snapshot['name']
self._tgt_utils.delete_snapshot(snapshot_name)
def ensure_export(self, context, volume):
# iSCSI targets exported by WinTarget persist after host reboot.
pass
def _get_target_name(self, volume):
return "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
target_name = self._get_target_name(volume)
updates = {}
if not self._tgt_utils.iscsi_target_exists(target_name):
self._tgt_utils.create_iscsi_target(target_name)
updates['provider_location'] = target_name
if self.configuration.use_chap_auth:
chap_username = (self.configuration.chap_username or
utils.generate_username())
chap_password = (self.configuration.chap_password or
utils.generate_password())
self._tgt_utils.set_chap_credentials(target_name,
chap_username,
chap_password)
updates['provider_auth'] = ' '.join(('CHAP',
chap_username,
chap_password))
# This operation is idempotent
self._tgt_utils.add_disk_to_target(volume['name'], target_name)
return updates
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
target_name = self._get_target_name(volume)
self._tgt_utils.delete_iscsi_target(target_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and create a volume using it."""
# Convert to VHD and file back to VHD
vhd_type = self._tgt_utils.get_supported_vhd_type()
with image_utils.temporary_file(suffix='.vhd') as tmp:
volume_path = self.local_path(volume)
image_utils.fetch_to_vhd(context, image_service, image_id, tmp,
self.configuration.volume_dd_blocksize)
# The vhd must be disabled and deleted before being replaced with
# the desired image.
self._tgt_utils.change_wt_disk_status(volume['name'],
enabled=False)
os.unlink(volume_path)
self._vhdutils.convert_vhd(tmp, volume_path,
vhd_type)
self._vhdutils.resize_vhd(volume_path,
volume['size'] << 30,
is_file_max_size=False)
self._tgt_utils.change_wt_disk_status(volume['name'],
enabled=True)
@contextlib.contextmanager
def _temporary_snapshot(self, volume_name):
try:
snap_uuid = uuidutils.generate_uuid()
snapshot_name = '%s-tmp-snapshot-%s' % (volume_name, snap_uuid)
self._tgt_utils.create_snapshot(volume_name, snapshot_name)
yield snapshot_name
finally:
self._tgt_utils.delete_snapshot(snapshot_name)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
disk_format = self._tgt_utils.get_supported_disk_format()
temp_vhd_path = os.path.join(CONF.image_conversion_dir,
str(image_meta['id']) + '.' + disk_format)
try:
with self._temporary_snapshot(volume['name']) as tmp_snap_name:
# qemu-img cannot access VSS snapshots, for which reason it
# must be exported first.
self._tgt_utils.export_snapshot(tmp_snap_name, temp_vhd_path)
image_utils.upload_volume(context, image_service, image_meta,
temp_vhd_path, 'vhd')
finally:
fileutils.delete_if_exists(temp_vhd_path)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
src_vol_name = src_vref['name']
vol_name = volume['name']
vol_size = volume['size']
new_vhd_path = self.local_path(volume)
with self._temporary_snapshot(src_vol_name) as tmp_snap_name:
self._tgt_utils.export_snapshot(tmp_snap_name, new_vhd_path)
self._vhdutils.resize_vhd(new_vhd_path, vol_size << 30,
is_file_max_size=False)
self._tgt_utils.import_wt_disk(new_vhd_path, vol_name)
def _get_capacity_info(self):
drive = os.path.splitdrive(
self.configuration.windows_iscsi_lun_path)[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / units.Gi
free_gb = free_space / units.Gi
return (total_gb, free_gb)
def _update_volume_stats(self):
"""Retrieve stats info for Windows device."""
LOG.debug("Updating volume stats")
total_gb, free_gb = self._get_capacity_info()
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Microsoft'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def extend_volume(self, volume, new_size):
"""Extend an Existing Volume."""
old_size = volume['size']
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
additional_size_mb = (new_size - old_size) * 1024
self._tgt_utils.extend_wt_disk(volume['name'], additional_size_mb)
| apache-2.0 |
rfdougherty/dipy | dipy/io/dpy.py | 11 | 4636 | ''' A class for handling large tractography datasets.
It is built using the pytables tools which in turn implement
key features of the HDF5 (hierachical data format) API [1]_.
References
----------
.. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html
'''
import numpy as np
# Conditional import machinery for pytables
from ..utils.optpkg import optional_package
# Allow import, but disable doctests, if we don't have pytables
tables, have_tables, setup_module = optional_package('tables')
# Make sure not to carry across setup module from * import
__all__ = ['Dpy']
class Dpy(object):
def __init__(self,fname,mode='r',compression=0):
''' Advanced storage system for tractography based on HDF5
Parameters
------------
fname : str, full filename
mode : 'r' read
'w' write
'r+' read and write only if file already exists
'a' read and write even if file doesn't exist (not used yet)
compression : 0 no compression to 9 maximum compression
Examples
----------
>>> import os
>>> from tempfile import mkstemp #temp file
>>> from dipy.io.dpy import Dpy
>>> fd,fname = mkstemp()
>>> fname = fname + '.dpy' #add correct extension
>>> dpw = Dpy(fname,'w')
>>> A=np.ones((5,3))
>>> B=2*A.copy()
>>> C=3*A.copy()
>>> dpw.write_track(A)
>>> dpw.write_track(B)
>>> dpw.write_track(C)
>>> dpw.close()
>>> dpr = Dpy(fname,'r')
>>> A=dpr.read_track()
>>> B=dpr.read_track()
>>> T=dpr.read_tracksi([0,1,2,0,0,2])
>>> dpr.close()
>>> os.remove(fname) #delete file from disk
'''
self.mode=mode
self.f = tables.openFile(fname, mode = self.mode)
self.N = 5*10**9
self.compression = compression
if self.mode=='w':
self.streamlines=self.f.createGroup(self.f.root,'streamlines')
#create a version number
self.version=self.f.createArray(self.f.root,'version',['0.0.1'],'Dpy Version Number')
self.tracks = self.f.createEArray(self.f.root.streamlines, 'tracks',tables.Float32Atom(), (0, 3),
"scalar Float32 earray", tables.Filters(self.compression),expectedrows=self.N)
self.offsets = self.f.createEArray(self.f.root.streamlines, 'offsets',tables.Int64Atom(), (0,),
"scalar Int64 earray", tables.Filters(self.compression), expectedrows=self.N+1)
self.curr_pos=0
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
if self.mode=='r':
self.tracks=self.f.root.streamlines.tracks
self.offsets=self.f.root.streamlines.offsets
self.track_no=len(self.offsets)-1
self.offs_pos=0
def version(self):
ver=self.f.root.version[:]
return ver[0]
def write_track(self,track):
''' write on track each time
'''
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def write_tracks(self,T):
''' write many tracks together
'''
for track in T:
self.tracks.append(track.astype(np.float32))
self.curr_pos+=track.shape[0]
self.offsets.append(np.array([self.curr_pos]).astype(np.int64))
def read_track(self):
''' read one track each time
'''
off0,off1=self.offsets[self.offs_pos:self.offs_pos+2]
self.offs_pos+=1
return self.tracks[off0:off1]
def read_tracksi(self,indices):
''' read tracks with specific indices
'''
T=[]
for i in indices:
#print(self.offsets[i:i+2])
off0,off1=self.offsets[i:i+2]
T.append(self.tracks[off0:off1])
return T
def read_tracks(self):
''' read the entire tractography
'''
I=self.offsets[:]
TR=self.tracks[:]
T=[]
for i in range(len(I)-1):
off0,off1=I[i:i+2]
T.append(TR[off0:off1])
return T
def close(self):
self.f.close()
if __name__ == '__main__':
pass
| bsd-3-clause |
edx/edx-platform | common/djangoapps/track/management/tracked_command.py | 3 | 2220 | """Provides management command calling info to tracking context."""
from django.core.management.base import BaseCommand
from eventtracking import tracker
class TrackedCommand(BaseCommand): # lint-amnesty, pylint: disable=abstract-method
"""
Provides management command calling info to tracking context.
Information provided to context includes the following value:
'command': the program name and the subcommand used to run a management command.
In future, other values (such as args and options) could be added as needed.
An example tracking log entry resulting from running the 'create_user' management command:
{
"username": "anonymous",
"host": "",
"event_source": "server",
"event_type": "edx.course.enrollment.activated",
"context": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"org_id": "edX",
"command": "./manage.py create_user",
},
"time": "2014-01-06T15:59:49.599522+00:00",
"ip": "",
"event": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"user_id": 29,
"mode": "verified"
},
"agent": "",
"page": null
}
The name of the context used to add (and remove) these values is "edx.mgmt.command".
The context name is used to allow the context additions to be scoped, but doesn't
appear in the context itself.
"""
prog_name = 'unknown'
def create_parser(self, prog_name, subcommand): # lint-amnesty, pylint: disable=arguments-differ
"""Wraps create_parser to snag command line info."""
self.prog_name = f"{prog_name} {subcommand}"
return super().create_parser(prog_name, subcommand) # lint-amnesty, pylint: disable=super-with-arguments
def execute(self, *args, **options):
"""Wraps base execute() to add command line to tracking context."""
context = {
'command': self.prog_name,
}
COMMAND_CONTEXT_NAME = 'edx.mgmt.command'
with tracker.get_tracker().context(COMMAND_CONTEXT_NAME, context):
super().execute(*args, **options) # lint-amnesty, pylint: disable=super-with-arguments
| agpl-3.0 |
dmlc/mxnet | python/mxnet/gluon/model_zoo/vision/resnet.py | 5 | 19976 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""ResNets, implemented in Gluon."""
from __future__ import division
__all__ = ['ResNetV1', 'ResNetV2',
'BasicBlockV1', 'BasicBlockV2',
'BottleneckV1', 'BottleneckV2',
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'get_resnet']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from .... import base
# Helpers
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
# Blocks
class BasicBlockV1(HybridBlock):
r"""BasicBlock V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(_conv3x3(channels, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels, 1, channels))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(residual+x, act_type='relu')
return x
class BottleneckV1(HybridBlock):
r"""Bottleneck V1 from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
This is used for ResNet V1 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV1, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, 1, channels//4))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=1))
self.body.add(nn.BatchNorm())
if downsample:
self.downsample = nn.HybridSequential(prefix='')
self.downsample.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.downsample.add(nn.BatchNorm())
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.body(x)
if self.downsample:
residual = self.downsample(residual)
x = F.Activation(x + residual, act_type='relu')
return x
class BasicBlockV2(HybridBlock):
r"""BasicBlock V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 18, 34 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BasicBlockV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = _conv3x3(channels, stride, in_channels)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels, 1, channels)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
return x + residual
class BottleneckV2(HybridBlock):
r"""Bottleneck V2 from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
This is used for ResNet V2 for 50, 101, 152 layers.
Parameters
----------
channels : int
Number of output channels.
stride : int
Stride size.
downsample : bool, default False
Whether to downsample the input.
in_channels : int, default 0
Number of input channels. Default is 0, to infer from the graph.
"""
def __init__(self, channels, stride, downsample=False, in_channels=0, **kwargs):
super(BottleneckV2, self).__init__(**kwargs)
self.bn1 = nn.BatchNorm()
self.conv1 = nn.Conv2D(channels//4, kernel_size=1, strides=1, use_bias=False)
self.bn2 = nn.BatchNorm()
self.conv2 = _conv3x3(channels//4, stride, channels//4)
self.bn3 = nn.BatchNorm()
self.conv3 = nn.Conv2D(channels, kernel_size=1, strides=1, use_bias=False)
if downsample:
self.downsample = nn.Conv2D(channels, 1, stride, use_bias=False,
in_channels=in_channels)
else:
self.downsample = None
def hybrid_forward(self, F, x):
residual = x
x = self.bn1(x)
x = F.Activation(x, act_type='relu')
if self.downsample:
residual = self.downsample(x)
x = self.conv1(x)
x = self.bn2(x)
x = F.Activation(x, act_type='relu')
x = self.conv2(x)
x = self.bn3(x)
x = F.Activation(x, act_type='relu')
x = self.conv3(x)
return x + residual
# Nets
class ResNetV1(HybridBlock):
r"""ResNet V1 model from
`"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV1, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=channels[i]))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes, in_units=channels[-1])
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class ResNetV2(HybridBlock):
r"""ResNet V2 model from
`"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
block : HybridBlock
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
channels : list of int
Numbers of channels in each block. Length should be one larger than layers list.
classes : int, default 1000
Number of classification classes.
thumbnail : bool, default False
Enable thumbnail.
"""
def __init__(self, block, layers, channels, classes=1000, thumbnail=False, **kwargs):
super(ResNetV2, self).__init__(**kwargs)
assert len(layers) == len(channels) - 1
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.BatchNorm(scale=False, center=False))
if thumbnail:
self.features.add(_conv3x3(channels[0], 1, 0))
else:
self.features.add(nn.Conv2D(channels[0], 7, 2, 3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(3, 2, 1))
in_channels = channels[0]
for i, num_layer in enumerate(layers):
stride = 1 if i == 0 else 2
self.features.add(self._make_layer(block, num_layer, channels[i+1],
stride, i+1, in_channels=in_channels))
in_channels = channels[i+1]
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.GlobalAvgPool2D())
self.features.add(nn.Flatten())
self.output = nn.Dense(classes, in_units=in_channels)
def _make_layer(self, block, layers, channels, stride, stage_index, in_channels=0):
layer = nn.HybridSequential(prefix='stage%d_'%stage_index)
with layer.name_scope():
layer.add(block(channels, stride, channels != in_channels, in_channels=in_channels,
prefix=''))
for _ in range(layers-1):
layer.add(block(channels, 1, False, in_channels=channels, prefix=''))
return layer
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
resnet_spec = {18: ('basic_block', [2, 2, 2, 2], [64, 64, 128, 256, 512]),
34: ('basic_block', [3, 4, 6, 3], [64, 64, 128, 256, 512]),
50: ('bottle_neck', [3, 4, 6, 3], [64, 256, 512, 1024, 2048]),
101: ('bottle_neck', [3, 4, 23, 3], [64, 256, 512, 1024, 2048]),
152: ('bottle_neck', [3, 8, 36, 3], [64, 256, 512, 1024, 2048])}
resnet_net_versions = [ResNetV1, ResNetV2]
resnet_block_versions = [{'basic_block': BasicBlockV1, 'bottle_neck': BottleneckV1},
{'basic_block': BasicBlockV2, 'bottle_neck': BottleneckV2}]
# Constructor
def get_resnet(version, num_layers, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""ResNet V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
ResNet V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
version : int
Version of ResNet. Options are 1, 2.
num_layers : int
Numbers of layers. Options are 18, 34, 50, 101, 152.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
assert num_layers in resnet_spec, \
"Invalid number of layers: %d. Options are %s"%(
num_layers, str(resnet_spec.keys()))
block_type, layers, channels = resnet_spec[num_layers]
assert version >= 1 and version <= 2, \
"Invalid resnet version: %d. Options are 1 and 2."%version
resnet_class = resnet_net_versions[version-1]
block_class = resnet_block_versions[version-1][block_type]
net = resnet_class(block_class, layers, channels, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('resnet%d_v%d'%(num_layers, version),
root=root), ctx=ctx)
return net
def resnet18_v1(**kwargs):
r"""ResNet-18 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 18, **kwargs)
def resnet34_v1(**kwargs):
r"""ResNet-34 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 34, **kwargs)
def resnet50_v1(**kwargs):
r"""ResNet-50 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 50, **kwargs)
def resnet101_v1(**kwargs):
r"""ResNet-101 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 101, **kwargs)
def resnet152_v1(**kwargs):
r"""ResNet-152 V1 model from `"Deep Residual Learning for Image Recognition"
<http://arxiv.org/abs/1512.03385>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(1, 152, **kwargs)
def resnet18_v2(**kwargs):
r"""ResNet-18 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 18, **kwargs)
def resnet34_v2(**kwargs):
r"""ResNet-34 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 34, **kwargs)
def resnet50_v2(**kwargs):
r"""ResNet-50 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 50, **kwargs)
def resnet101_v2(**kwargs):
r"""ResNet-101 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 101, **kwargs)
def resnet152_v2(**kwargs):
r"""ResNet-152 V2 model from `"Identity Mappings in Deep Residual Networks"
<https://arxiv.org/abs/1603.05027>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
"""
return get_resnet(2, 152, **kwargs)
| apache-2.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/runserver.py | 243 | 1343 | from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import Command as RunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(RunserverCommand):
option_list = RunserverCommand.option_list + (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development and also serves static files."
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
| apache-2.0 |
Alwnikrotikz/cortex-vfx | test/IECore/ImageCompositeOpTest.py | 12 | 3848 | ##########################################################################
#
# Copyright (c) 2008-2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import math
import unittest
from IECore import *
class ImageCompositeOpTest( unittest.TestCase ) :
def __test( self, operation, expectedResultFileName ) :
op = ImageCompositeOp()
imageA = Reader.create( "test/IECore/data/exrFiles/checker1Premult.exr" ).read()
imageB = Reader.create( "test/IECore/data/exrFiles/checker2Premult.exr" ).read()
result = op(
input = imageB,
imageA = imageA,
operation = operation
)
expectedResult = Reader.create( expectedResultFileName ).read()
diffOp = ImageDiffOp()
diff = diffOp(
imageA = result,
imageB = expectedResult
)
self.failIf( diff.value )
def testConstruction( self ) :
op = ImageCompositeOp()
self.assertEqual( op.parameters()["operation"].getValue().value, ImageCompositeOp.Operation.Over )
self.assertEqual( op.parameters()["alphaChannelName"].getValue().value, "A" )
self.assertEqual( op.parameters()["channels"].getValue(), StringVectorData( [ "R", "G", "B" ] ) )
def testChannelSubset( self ):
op = ImageCompositeOp()
imageA = Reader.create( "test/IECore/data/exrFiles/checker1Premult.exr" ).read()
imageB = Reader.create( "test/IECore/data/exrFiles/checker2Premult.exr" ).read()
result = op(
input = imageB,
imageA = imageA,
channels = StringVectorData( [ "R", "G" ] ),
operation = ImageCompositeOp.Operation.Over
)
self.assert_( result.arePrimitiveVariablesValid() )
def testOver( self ) :
self.__test( ImageCompositeOp.Operation.Over, "test/IECore/data/expectedResults/imageCompositeOpOver.exr" )
def testMax( self ) :
self.__test( ImageCompositeOp.Operation.Max, "test/IECore/data/expectedResults/imageCompositeOpMax.exr" )
def testMin( self ) :
self.__test( ImageCompositeOp.Operation.Min, "test/IECore/data/expectedResults/imageCompositeOpMin.exr" )
def testMultiply( self ) :
self.__test( ImageCompositeOp.Operation.Multiply, "test/IECore/data/expectedResults/imageCompositeOpMultiply.exr" )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
avneesh91/django | django/contrib/gis/feeds.py | 62 | 5733 | from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin:
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, return a string
GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""Add a GeoRSS XML element using the given item and handler."""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super().root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super().rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super().add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super().add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self._get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self._get_dynamic_attr('item_geometry', item)}
| bsd-3-clause |
zouyapeng/horizon_change | openstack_dashboard/dashboards/project/data_processing/clusters/views.py | 17 | 3693 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing.clusters. \
tables as c_tables
import openstack_dashboard.dashboards.project.data_processing.clusters.tabs \
as _tabs
import openstack_dashboard.dashboards.project.data_processing.clusters. \
workflows.create as create_flow
import openstack_dashboard.dashboards.project.data_processing.clusters. \
workflows.scale as scale_flow
LOG = logging.getLogger(__name__)
class ClustersView(tables.DataTableView):
table_class = c_tables.ClustersTable
template_name = 'project/data_processing.clusters/clusters.html'
def get_data(self):
try:
clusters = saharaclient.cluster_list(self.request)
except Exception:
clusters = []
exceptions.handle(self.request,
_("Unable to fetch cluster list"))
return clusters
class ClusterDetailsView(tabs.TabView):
tab_group_class = _tabs.ClusterDetailsTabs
template_name = 'project/data_processing.clusters/details.html'
def get_context_data(self, **kwargs):
context = super(ClusterDetailsView, self)\
.get_context_data(**kwargs)
return context
class CreateClusterView(workflows.WorkflowView):
workflow_class = create_flow.CreateCluster
success_url = \
"horizon:project:data_processing.clusters:create-cluster"
classes = ("ajax-modal")
template_name = "project/data_processing.clusters/create.html"
class ConfigureClusterView(workflows.WorkflowView):
workflow_class = create_flow.ConfigureCluster
success_url = "horizon:project:data_processing.clusters"
template_name = "project/data_processing.clusters/configure.html"
class ScaleClusterView(workflows.WorkflowView):
workflow_class = scale_flow.ScaleCluster
success_url = "horizon:project:data_processing.clusters"
classes = ("ajax-modal")
template_name = "project/data_processing.clusters/scale.html"
def get_context_data(self, **kwargs):
context = super(ScaleClusterView, self)\
.get_context_data(**kwargs)
context["cluster_id"] = kwargs["cluster_id"]
return context
def get_object(self, *args, **kwargs):
if not hasattr(self, "_object"):
template_id = self.kwargs['cluster_id']
try:
template = saharaclient.cluster_template_get(self.request,
template_id)
except Exception:
template = None
exceptions.handle(self.request,
_("Unable to fetch cluster template."))
self._object = template
return self._object
def get_initial(self):
initial = super(ScaleClusterView, self).get_initial()
initial.update({'cluster_id': self.kwargs['cluster_id']})
return initial
| apache-2.0 |
wskplho/sl4a | python/src/Lib/test/test_cfgparser.py | 48 | 18620 | import ConfigParser
import StringIO
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
result = self.items()
return [i[1] for i in values]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class()
else:
self.cf = self.config_class(defaults)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
cf = self.fromstring(
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
])
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
self.failIf('__name__' in cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.failUnless(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existance of option")
self.failIf(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.failIf(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existance of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.failUnless(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.failUnless(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.failUnless(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\noption-without-value\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.failIf(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.failUnless(cf.getboolean('BOOLTEST', 't%d' % x))
self.failIf(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
cf = self.fromstring(
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line"
)
output = StringIO.StringIO()
cf.write(output)
self.assertEqual(
output.getvalue(),
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
def test_set_string_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
try:
unicode
except NameError:
pass
else:
cf.set("sect", "option1", unicode("splat"))
cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistant-file"])
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1)
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistant-file"])
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([])
self.assertEqual(parsed_files, [])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(With7)s\n"
"with7=%(WITH6)s\n"
"with6=%(with5)s\n"
"With5=%(with4)s\n"
"WITH4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
cf = self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13, '%('])
cf.set('non-string', 'dict', {'pi': 3.14159, '%(': 1,
'%(list)': '%(list)'})
cf.set('non-string', 'string_with_interpolation', '%(list)s')
self.assertEqual(cf.get('non-string', 'int', raw=True), 1)
self.assertRaises(TypeError, cf.get, 'non-string', 'int')
self.assertEqual(cf.get('non-string', 'list', raw=True),
[0, 1, 1, 2, 3, 5, 8, 13, '%('])
self.assertRaises(TypeError, cf.get, 'non-string', 'list')
self.assertEqual(cf.get('non-string', 'dict', raw=True),
{'pi': 3.14159, '%(': 1, '%(list)': '%(list)'})
self.assertRaises(TypeError, cf.get, 'non-string', 'dict')
self.assertEqual(cf.get('non-string', 'string_with_interpolation',
raw=True), '%(list)s')
self.assertRaises(ValueError, cf.get, 'non-string',
'string_with_interpolation', raw=False)
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'),
[0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_set_malformatted_interpolation(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
def test_add_section_default_1(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "default")
def test_add_section_default_2(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "DEFAULT")
class SortedTestCase(RawConfigParserTestCase):
def newconfig(self, defaults=None):
self.cf = self.config_class(defaults=defaults, dict_type=SortedDict)
return self.cf
def test_sorted(self):
self.fromstring("[b]\n"
"o4=1\n"
"o3=2\n"
"o2=3\n"
"o1=4\n"
"[a]\n"
"k=v\n")
output = StringIO.StringIO()
self.cf.write(output)
self.assertEquals(output.getvalue(),
"[a]\n"
"k = v\n\n"
"[b]\n"
"o1 = 4\n"
"o2 = 3\n"
"o3 = 2\n"
"o4 = 1\n\n")
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase,
SortedTestCase
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
brython-dev/brython | www/src/Lib/_sysconfigdata.py | 731 | 18167 | build_time_vars={'HAVE_SYS_WAIT_H': 1, 'HAVE_UTIL_H': 0, 'HAVE_SYMLINKAT': 1, 'HAVE_LIBSENDFILE': 0, 'SRCDIRS': 'Parser Grammar Objects Python Modules Mac', 'SIZEOF_OFF_T': 8, 'BASECFLAGS': '-Wno-unused-result', 'HAVE_UTIME_H': 1, 'EXTRAMACHDEPPATH': '', 'HAVE_SYS_TIME_H': 1, 'CFLAGSFORSHARED': '-fPIC', 'HAVE_HYPOT': 1, 'PGSRCS': '\\', 'HAVE_LIBUTIL_H': 0, 'HAVE_COMPUTED_GOTOS': 1, 'HAVE_LUTIMES': 1, 'HAVE_MAKEDEV': 1, 'HAVE_REALPATH': 1, 'HAVE_LINUX_TIPC_H': 1, 'MULTIARCH': 'i386-linux-gnu', 'HAVE_GETWD': 1, 'HAVE_GCC_ASM_FOR_X64': 0, 'HAVE_INET_PTON': 1, 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, 'SIZEOF__BOOL': 1, 'HAVE_ZLIB_COPY': 1, 'ASDLGEN': 'python3.3 ../Parser/asdl_c.py', 'GRAMMAR_INPUT': '../Grammar/Grammar', 'HOST_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_SCHED_RR_GET_INTERVAL': 1, 'HAVE_BLUETOOTH_H': 0, 'HAVE_MKFIFO': 1, 'TIMEMODULE_LIB': 0, 'LIBM': '-lm', 'PGENOBJS': '\\ \\', 'PYTHONFRAMEWORK': '', 'GETPGRP_HAVE_ARG': 0, 'HAVE_MMAP': 1, 'SHLIB_SUFFIX': '.so', 'SIZEOF_FLOAT': 4, 'HAVE_RENAMEAT': 1, 'HAVE_LANGINFO_H': 1, 'HAVE_STDLIB_H': 1, 'PY_CORE_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security -I. -IInclude -I../Include -D_FORTIFY_SOURCE=2 -fPIC -DPy_BUILD_CORE', 'HAVE_BROKEN_PIPE_BUF': 0, 'HAVE_CONFSTR': 1, 'HAVE_SIGTIMEDWAIT': 1, 'HAVE_FTELLO': 1, 'READELF': 'readelf', 'HAVE_SIGALTSTACK': 1, 'TESTTIMEOUT': 3600, 'PYTHONPATH': ':plat-i386-linux-gnu', 'SIZEOF_WCHAR_T': 4, 'LIBOBJS': '', 'HAVE_SYSCONF': 1, 'MAKESETUP': '../Modules/makesetup', 'HAVE_UTIMENSAT': 1, 'HAVE_FCHOWNAT': 1, 'HAVE_WORKING_TZSET': 1, 'HAVE_FINITE': 1, 'HAVE_ASINH': 1, 'HAVE_SETEUID': 1, 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'HAVE_SETGROUPS': 1, 'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o', 'HAVE_MBRTOWC': 1, 'SIZEOF_INT': 4, 'HAVE_STDARG_PROTOTYPES': 1, 'TM_IN_SYS_TIME': 0, 'HAVE_SYS_TIMES_H': 1, 'HAVE_LCHOWN': 1, 'HAVE_SSIZE_T': 1, 'HAVE_PAUSE': 1, 'SYSLIBS': '-lm', 'POSIX_SEMAPHORES_NOT_ENABLED': 0, 'HAVE_DEVICE_MACROS': 1, 'BLDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\', 'HAVE_SYS_UN_H': 1, 'HAVE_SYS_STAT_H': 1, 'VPATH': '..', 'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.3m /usr/include/python3.3m', 'HAVE_BROKEN_SEM_GETVALUE': 0, 'HAVE_TIMEGM': 1, 'PACKAGE_VERSION': 0, 'MAJOR_IN_SYSMACROS': 0, 'HAVE_ATANH': 1, 'HAVE_GAI_STRERROR': 1, 'HAVE_SYS_POLL_H': 1, 'SIZEOF_PTHREAD_T': 4, 'SIZEOF_FPOS_T': 16, 'HAVE_CTERMID': 1, 'HAVE_TMPFILE': 1, 'HAVE_SETUID': 1, 'CXX': 'i686-linux-gnu-g++ -pthread', 'srcdir': '..', 'HAVE_UINT32_T': 1, 'HAVE_ADDRINFO': 1, 'HAVE_GETSPENT': 1, 'SIZEOF_DOUBLE': 8, 'HAVE_INT32_T': 1, 'LIBRARY_OBJS_OMIT_FROZEN': '\\', 'HAVE_FUTIMES': 1, 'CONFINCLUDEPY': '/usr/include/python3.3m', 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, 'LIBFFI_INCLUDEDIR': '', 'HAVE_SETGID': 1, 'HAVE_UINT64_T': 1, 'EXEMODE': 755, 'UNIVERSALSDK': '', 'HAVE_LIBDL': 1, 'HAVE_GETNAMEINFO': 1, 'HAVE_STDINT_H': 1, 'COREPYTHONPATH': ':plat-i386-linux-gnu', 'HAVE_SOCKADDR_STORAGE': 1, 'HAVE_WAITID': 1, 'EXTRAPLATDIR': '@EXTRAPLATDIR@', 'HAVE_ACCEPT4': 1, 'RUNSHARED': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared:', 'EXE': '', 'HAVE_SIGACTION': 1, 'HAVE_CHOWN': 1, 'HAVE_GETLOGIN': 1, 'HAVE_TZNAME': 0, 'PACKAGE_NAME': 0, 'HAVE_GETPGID': 1, 'HAVE_GLIBC_MEMMOVE_BUG': 0, 'BUILD_GNU_TYPE': 'i686-pc-linux-gnu', 'HAVE_LINUX_CAN_H': 1, 'DYNLOADFILE': 'dynload_shlib.o', 'HAVE_PWRITE': 1, 'BUILDEXE': '', 'HAVE_OPENPTY': 1, 'HAVE_LOCKF': 1, 'HAVE_COPYSIGN': 1, 'HAVE_PREAD': 1, 'HAVE_DLOPEN': 1, 'HAVE_SYS_KERN_CONTROL_H': 0, 'PY_FORMAT_LONG_LONG': '"ll"', 'HAVE_TCSETPGRP': 1, 'HAVE_SETSID': 1, 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, 'HAVE_STRING_H': 1, 'LDLIBRARY': 'libpython3.3m.so', 'INSTALL_SCRIPT': '/usr/bin/install -c', 'HAVE_SYS_XATTR_H': 1, 'HAVE_CURSES_IS_TERM_RESIZED': 1, 'HAVE_TMPNAM_R': 1, 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", 'WANT_SIGFPE_HANDLER': 1, 'HAVE_INT64_T': 1, 'HAVE_STAT_TV_NSEC': 1, 'HAVE_SYS_MKDEV_H': 0, 'HAVE_BROKEN_POLL': 0, 'HAVE_IF_NAMEINDEX': 1, 'HAVE_GETPWENT': 1, 'PSRCS': '\\', 'RANLIB': 'ranlib', 'HAVE_WCSCOLL': 1, 'WITH_NEXT_FRAMEWORK': 0, 'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py', 'HAVE_RL_PRE_INPUT_HOOK': 1, 'PACKAGE_URL': 0, 'SHLIB_EXT': 0, 'HAVE_SYS_LOADAVG_H': 0, 'HAVE_LIBIEEE': 0, 'HAVE_SEM_OPEN': 1, 'HAVE_TERM_H': 1, 'IO_OBJS': '\\', 'IO_H': 'Modules/_io/_iomodule.h', 'HAVE_STATVFS': 1, 'VERSION': '3.3', 'HAVE_GETC_UNLOCKED': 1, 'MACHDEPS': 'plat-i386-linux-gnu @EXTRAPLATDIR@', 'SUBDIRSTOO': 'Include Lib Misc', 'HAVE_SETREUID': 1, 'HAVE_ERFC': 1, 'HAVE_SETRESUID': 1, 'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions', 'HAVE_SYS_TYPES_H': 1, 'HAVE_GETPAGESIZE': 1, 'HAVE_SETEGID': 1, 'HAVE_PTY_H': 1, 'HAVE_STRUCT_STAT_ST_FLAGS': 0, 'HAVE_WCHAR_H': 1, 'HAVE_FSEEKO': 1, 'Py_ENABLE_SHARED': 1, 'HAVE_SIGRELSE': 1, 'HAVE_PTHREAD_INIT': 0, 'FILEMODE': 644, 'HAVE_SYS_RESOURCE_H': 1, 'HAVE_READLINKAT': 1, 'PYLONG_BITS_IN_DIGIT': 0, 'LINKCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SETLOCALE': 1, 'HAVE_CHROOT': 1, 'HAVE_OPENAT': 1, 'HAVE_FEXECVE': 1, 'LDCXXSHARED': 'i686-linux-gnu-g++ -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions', 'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Ext-dummy', 'HAVE_MKNOD': 1, 'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_BROKEN_MBSTOWCS': 0, 'LIBRARY_OBJS': '\\', 'HAVE_LOG1P': 1, 'SIZEOF_VOID_P': 4, 'HAVE_FCHOWN': 1, 'PYTHONFRAMEWORKPREFIX': '', 'HAVE_LIBDLD': 0, 'HAVE_TGAMMA': 1, 'HAVE_ERRNO_H': 1, 'HAVE_IO_H': 0, 'OTHER_LIBTOOL_OPT': '', 'HAVE_POLL_H': 1, 'PY_CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', 'GRAMMAR_H': 'Include/graminit.h', 'TANH_PRESERVES_ZERO_SIGN': 1, 'HAVE_GETLOADAVG': 1, 'UNICODE_DEPS': '\\ \\', 'HAVE_GETCWD': 1, 'MANDIR': '/usr/share/man', 'MACHDESTLIB': '/usr/lib/python3.3', 'GRAMMAR_C': 'Python/graminit.c', 'PGOBJS': '\\', 'HAVE_DEV_PTMX': 1, 'HAVE_UINTPTR_T': 1, 'HAVE_SCHED_SETAFFINITY': 1, 'PURIFY': '', 'HAVE_DECL_ISINF': 1, 'HAVE_RL_CALLBACK': 1, 'HAVE_WRITEV': 1, 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, 'HAVE_SYS_AUDIOIO_H': 0, 'EXT_SUFFIX': '.cpython-33m.so', 'SIZEOF_LONG_LONG': 8, 'DLINCLDIR': '.', 'HAVE_PATHCONF': 1, 'HAVE_UNLINKAT': 1, 'MKDIR_P': '/bin/mkdir -p', 'HAVE_ALTZONE': 0, 'SCRIPTDIR': '/usr/lib', 'OPCODETARGETGEN_FILES': '\\', 'HAVE_GETSPNAM': 1, 'HAVE_SYS_TERMIO_H': 0, 'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0, 'HAVE_PTHREAD_H': 1, 'Py_DEBUG': 0, 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, 'X87_DOUBLE_ROUNDING': 1, 'SIZEOF_TIME_T': 4, 'HAVE_DYNAMIC_LOADING': 1, 'HAVE_DIRECT_H': 0, 'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py', 'HAVE_GETADDRINFO': 1, 'HAVE_BROKEN_NICE': 0, 'HAVE_DIRENT_H': 1, 'HAVE_WCSXFRM': 1, 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, 'HAVE_FSTATVFS': 1, 'PYTHON': 'python', 'HAVE_OSX105_SDK': 0, 'BINDIR': '/usr/bin', 'TESTPYTHON': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python', 'ARFLAGS': 'rc', 'PLATDIR': 'plat-i386-linux-gnu', 'HAVE_ASM_TYPES_H': 1, 'PY3LIBRARY': 'libpython3.so', 'HAVE_PLOCK': 0, 'FLOCK_NEEDS_LIBBSD': 0, 'WITH_TSC': 0, 'HAVE_LIBREADLINE': 1, 'MACHDEP': 'linux', 'HAVE_SELECT': 1, 'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'HAVE_HSTRERROR': 1, 'SOABI': 'cpython-33m', 'HAVE_GETTIMEOFDAY': 1, 'HAVE_LIBRESOLV': 0, 'HAVE_UNSETENV': 1, 'HAVE_TM_ZONE': 1, 'HAVE_GETPGRP': 1, 'HAVE_FLOCK': 1, 'HAVE_SYS_BSDTTY_H': 0, 'SUBDIRS': '', 'PYTHONFRAMEWORKINSTALLDIR': '', 'PACKAGE_BUGREPORT': 0, 'HAVE_CLOCK': 1, 'HAVE_GETPEERNAME': 1, 'SIZEOF_PID_T': 4, 'HAVE_CONIO_H': 0, 'HAVE_FSTATAT': 1, 'HAVE_NETPACKET_PACKET_H': 1, 'HAVE_WAIT3': 1, 'DESTPATH': '', 'HAVE_STAT_TV_NSEC2': 0, 'HAVE_GETRESGID': 1, 'HAVE_UCS4_TCL': 0, 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, 'HAVE_TIMES': 1, 'HAVE_UNAME': 1, 'HAVE_ERF': 1, 'SIZEOF_SHORT': 2, 'HAVE_NCURSES_H': 1, 'HAVE_SYS_SENDFILE_H': 1, 'HAVE_CTERMID_R': 0, 'HAVE_TMPNAM': 1, 'prefix': '/usr', 'HAVE_NICE': 1, 'WITH_THREAD': 1, 'LN': 'ln', 'TESTRUNNER': 'LD_LIBRARY_PATH=/build/buildd/python3.3-3.3.1/build-shared: ./python ../Tools/scripts/run_tests.py', 'HAVE_SIGINTERRUPT': 1, 'HAVE_SETPGID': 1, 'RETSIGTYPE': 'void', 'HAVE_SCHED_GET_PRIORITY_MAX': 1, 'HAVE_SYS_SYS_DOMAIN_H': 0, 'HAVE_SYS_DIR_H': 0, 'HAVE__GETPTY': 0, 'HAVE_BLUETOOTH_BLUETOOTH_H': 1, 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, 'HAVE_POLL': 1, 'PYTHON_OBJS': '\\', 'HAVE_WAITPID': 1, 'USE_INLINE': 1, 'HAVE_FUTIMENS': 1, 'USE_COMPUTED_GOTOS': 1, 'MAINCC': 'i686-linux-gnu-gcc -pthread', 'HAVE_SOCKETPAIR': 1, 'HAVE_PROCESS_H': 0, 'HAVE_SETVBUF': 1, 'HAVE_FDOPENDIR': 1, 'CONFINCLUDEDIR': '/usr/include', 'BINLIBDEST': '/usr/lib/python3.3', 'HAVE_SYS_IOCTL_H': 1, 'HAVE_SYSEXITS_H': 1, 'LDLAST': '', 'HAVE_SYS_FILE_H': 1, 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, 'HAVE_RL_COMPLETION_MATCHES': 1, 'HAVE_TCGETPGRP': 1, 'SIZEOF_SIZE_T': 4, 'HAVE_EPOLL_CREATE1': 1, 'HAVE_SYS_SELECT_H': 1, 'HAVE_CLOCK_GETTIME': 1, 'CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HAVE_SNPRINTF': 1, 'BLDLIBRARY': '-lpython3.3m', 'PARSER_HEADERS': '\\', 'SO': '.so', 'LIBRARY': 'libpython3.3m.a', 'HAVE_FPATHCONF': 1, 'HAVE_TERMIOS_H': 1, 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, 'AST_H': 'Include/Python-ast.h', 'HAVE_GCC_UINT128_T': 0, 'HAVE_ACOSH': 1, 'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/timemodule.o Modules/_randommodule.o Modules/atexitmodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/operator.o Modules/_collectionsmodule.o Modules/itertoolsmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o Modules/symtablemodule.o Modules/xxsubtype.o', 'AST_C': 'Python/Python-ast.c', 'HAVE_SYS_NDIR_H': 0, 'DESTDIRS': '/usr /usr/lib /usr/lib/python3.3 /usr/lib/python3.3/lib-dynload', 'HAVE_SIGNAL_H': 1, 'PACKAGE_TARNAME': 0, 'HAVE_GETPRIORITY': 1, 'INCLUDEDIR': '/usr/include', 'HAVE_INTTYPES_H': 1, 'SIGNAL_OBJS': '', 'HAVE_READV': 1, 'HAVE_SETHOSTNAME': 1, 'MODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'CC': 'i686-linux-gnu-gcc -pthread', 'HAVE_LCHMOD': 0, 'SIZEOF_UINTPTR_T': 4, 'LIBPC': '/usr/lib/i386-linux-gnu/pkgconfig', 'BYTESTR_DEPS': '\\', 'HAVE_MKDIRAT': 1, 'LIBPL': '/usr/lib/python3.3/config-3.3m-i386-linux-gnu', 'HAVE_SHADOW_H': 1, 'HAVE_SYS_EVENT_H': 0, 'INSTALL': '/usr/bin/install -c', 'HAVE_GCC_ASM_FOR_X87': 1, 'HAVE_BROKEN_UNSETENV': 0, 'BASECPPFLAGS': '', 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, 'HAVE_STRUCT_STAT_ST_RDEV': 1, 'HAVE_SEM_UNLINK': 1, 'BUILDPYTHON': 'python', 'HAVE_RL_CATCH_SIGNAL': 1, 'HAVE_DECL_TZNAME': 0, 'RESSRCDIR': 'Mac/Resources/framework', 'HAVE_PTHREAD_SIGMASK': 1, 'HAVE_UTIMES': 1, 'DISTDIRS': 'Include Lib Misc Ext-dummy', 'HAVE_FDATASYNC': 1, 'HAVE_USABLE_WCHAR_T': 0, 'PY_FORMAT_SIZE_T': '"z"', 'HAVE_SCHED_SETSCHEDULER': 1, 'VA_LIST_IS_ARRAY': 0, 'HAVE_LINUX_NETLINK_H': 1, 'HAVE_SETREGID': 1, 'HAVE_STROPTS_H': 1, 'LDVERSION': '3.3m', 'abs_builddir': '/build/buildd/python3.3-3.3.1/build-shared', 'SITEPATH': '', 'HAVE_GETHOSTBYNAME': 0, 'HAVE_SIGPENDING': 1, 'HAVE_KQUEUE': 0, 'HAVE_SYNC': 1, 'HAVE_GETSID': 1, 'HAVE_ROUND': 1, 'HAVE_STRFTIME': 1, 'AST_H_DIR': 'Include', 'HAVE_PIPE2': 1, 'AST_C_DIR': 'Python', 'TESTPYTHONOPTS': '', 'HAVE_DEV_PTC': 0, 'GETTIMEOFDAY_NO_TZ': 0, 'HAVE_NET_IF_H': 1, 'HAVE_SENDFILE': 1, 'HAVE_SETPGRP': 1, 'HAVE_SEM_GETVALUE': 1, 'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro', 'DLLLIBRARY': '', 'PYTHON_FOR_BUILD': './python -E', 'SETPGRP_HAVE_ARG': 0, 'HAVE_INET_ATON': 1, 'INSTALL_SHARED': '/usr/bin/install -c -m 555', 'WITH_DOC_STRINGS': 1, 'OPCODETARGETS_H': '\\', 'HAVE_INITGROUPS': 1, 'HAVE_LINKAT': 1, 'BASEMODLIBS': '', 'SGI_ABI': '', 'HAVE_SCHED_SETPARAM': 1, 'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes', 'HAVE_POSIX_FADVISE': 1, 'datarootdir': '/usr/share', 'HAVE_MEMRCHR': 1, 'HGTAG': '', 'HAVE_MEMMOVE': 1, 'HAVE_GETRESUID': 1, 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, 'HAVE_LSTAT': 1, 'AR': 'ar', 'HAVE_WAIT4': 1, 'HAVE_SYS_MODEM_H': 0, 'INSTSONAME': 'libpython3.3m.so.1.0', 'HAVE_SYS_STATVFS_H': 1, 'HAVE_LGAMMA': 1, 'HAVE_PROTOTYPES': 1, 'HAVE_SYS_UIO_H': 1, 'MAJOR_IN_MKDEV': 0, 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', 'HAVE_SYS_DEVPOLL_H': 0, 'HAVE_CHFLAGS': 0, 'HAVE_FSYNC': 1, 'HAVE_FCHMOD': 1, 'INCLUDEPY': '/usr/include/python3.3m', 'HAVE_SEM_TIMEDWAIT': 1, 'LDLIBRARYDIR': '', 'HAVE_STRUCT_TM_TM_ZONE': 1, 'HAVE_CURSES_H': 1, 'TIME_WITH_SYS_TIME': 1, 'HAVE_DUP2': 1, 'ENABLE_IPV6': 1, 'WITH_VALGRIND': 0, 'HAVE_SETITIMER': 1, 'THREADOBJ': 'Python/thread.o', 'LOCALMODLIBS': '-lrt -lexpat -L/usr/lib -lz -lexpat', 'HAVE_MEMORY_H': 1, 'HAVE_GETITIMER': 1, 'HAVE_C99_BOOL': 1, 'INSTALL_DATA': '/usr/bin/install -c -m 644', 'PGEN': 'Parser/pgen', 'HAVE_GRP_H': 1, 'HAVE_WCSFTIME': 1, 'AIX_GENUINE_CPLUSPLUS': 0, 'HAVE_LIBINTL_H': 1, 'SHELL': '/bin/sh', 'HAVE_UNISTD_H': 1, 'EXTRATESTOPTS': '', 'HAVE_EXECV': 1, 'HAVE_FSEEK64': 0, 'MVWDELCH_IS_EXPRESSION': 1, 'DESTSHARED': '/usr/lib/python3.3/lib-dynload', 'OPCODETARGETGEN': '\\', 'LIBDEST': '/usr/lib/python3.3', 'CCSHARED': '-fPIC', 'HAVE_EXPM1': 1, 'HAVE_DLFCN_H': 1, 'exec_prefix': '/usr', 'HAVE_READLINK': 1, 'WINDOW_HAS_FLAGS': 1, 'HAVE_FTELL64': 0, 'HAVE_STRLCPY': 0, 'MACOSX_DEPLOYMENT_TARGET': '', 'HAVE_SYS_SYSCALL_H': 1, 'DESTLIB': '/usr/lib/python3.3', 'LDSHARED': 'i686-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'HGVERSION': '', 'PYTHON_HEADERS': '\\', 'HAVE_STRINGS_H': 1, 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, 'HAVE_POSIX_FALLOCATE': 1, 'HAVE_DIRFD': 1, 'HAVE_LOG2': 1, 'HAVE_GETPID': 1, 'HAVE_ALARM': 1, 'MACHDEP_OBJS': '', 'HAVE_SPAWN_H': 1, 'HAVE_FORK': 1, 'HAVE_SETRESGID': 1, 'HAVE_FCHMODAT': 1, 'HAVE_CLOCK_GETRES': 1, 'MACHDEPPATH': ':plat-i386-linux-gnu', 'STDC_HEADERS': 1, 'HAVE_SETPRIORITY': 1, 'LIBC': '', 'HAVE_SYS_EPOLL_H': 1, 'HAVE_SYS_UTSNAME_H': 1, 'HAVE_PUTENV': 1, 'HAVE_CURSES_RESIZE_TERM': 1, 'HAVE_FUTIMESAT': 1, 'WITH_DYLD': 0, 'INSTALL_PROGRAM': '/usr/bin/install -c', 'LIBS': '-lpthread -ldl -lutil', 'HAVE_TRUNCATE': 1, 'TESTOPTS': '', 'PROFILE_TASK': '../Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck', 'HAVE_CURSES_RESIZETERM': 1, 'ABIFLAGS': 'm', 'HAVE_GETGROUPLIST': 1, 'OBJECT_OBJS': '\\', 'HAVE_MKNODAT': 1, 'HAVE_ST_BLOCKS': 1, 'HAVE_STRUCT_STAT_ST_GEN': 0, 'SYS_SELECT_WITH_SYS_TIME': 1, 'SHLIBS': '-lpthread -ldl -lutil', 'HAVE_GETGROUPS': 1, 'MODULE_OBJS': '\\', 'PYTHONFRAMEWORKDIR': 'no-framework', 'HAVE_FCNTL_H': 1, 'HAVE_LINK': 1, 'HAVE_SIGWAIT': 1, 'HAVE_GAMMA': 1, 'HAVE_SYS_LOCK_H': 0, 'HAVE_FORKPTY': 1, 'HAVE_SOCKADDR_SA_LEN': 0, 'HAVE_TEMPNAM': 1, 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, 'HAVE_MKFIFOAT': 1, 'HAVE_SIGWAITINFO': 1, 'HAVE_FTIME': 1, 'HAVE_EPOLL': 1, 'HAVE_SYS_SOCKET_H': 1, 'HAVE_LARGEFILE_SUPPORT': 1, 'CONFIGURE_CFLAGS': '-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security', 'HAVE_PTHREAD_DESTRUCTOR': 0, 'CONFIGURE_CPPFLAGS': '-D_FORTIFY_SOURCE=2', 'HAVE_SYMLINK': 1, 'HAVE_LONG_LONG': 1, 'HAVE_IEEEFP_H': 0, 'LIBDIR': '/usr/lib', 'HAVE_PTHREAD_KILL': 1, 'TESTPATH': '', 'HAVE_STRDUP': 1, 'POBJS': '\\', 'NO_AS_NEEDED': '-Wl,--no-as-needed', 'HAVE_LONG_DOUBLE': 1, 'HGBRANCH': '', 'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in', 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, 'HAVE_FACCESSAT': 1, 'AST_ASDL': '../Parser/Python.asdl', 'CPPFLAGS': '-I. -IInclude -I../Include -D_FORTIFY_SOURCE=2', 'HAVE_MKTIME': 1, 'HAVE_NDIR_H': 0, 'PY_CFLAGS': '-Wno-unused-result -DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes -g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ', 'LIBOBJDIR': 'Python/', 'HAVE_LINUX_CAN_RAW_H': 1, 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, 'PACKAGE_STRING': 0, 'GNULD': 'yes', 'LOG1P_DROPS_ZERO_SIGN': 0, 'HAVE_FTRUNCATE': 1, 'WITH_LIBINTL': 0, 'HAVE_MREMAP': 1, 'HAVE_DECL_ISNAN': 1, 'HAVE_KILLPG': 1, 'SIZEOF_LONG': 4, 'HAVE_DECL_ISFINITE': 1, 'HAVE_IPA_PURE_CONST_BUG': 0, 'WITH_PYMALLOC': 1, 'abs_srcdir': '/build/buildd/python3.3-3.3.1/build-shared/..', 'HAVE_FCHDIR': 1, 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, 'AC_APPLE_UNIVERSAL_BUILD': 0, 'PGENSRCS': '\\ \\', 'DIRMODE': 755, 'HAVE_GETHOSTBYNAME_R': 1, 'HAVE_LCHFLAGS': 0, 'HAVE_SYS_PARAM_H': 1, 'SIZEOF_LONG_DOUBLE': 12, 'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--with-system-expat' '--with-system-ffi' '--with-fpectl' 'CC=i686-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector --param=ssp-buffer-size=4 -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' 'CPPFLAGS=-D_FORTIFY_SOURCE=2'", 'HAVE_SCHED_H': 1, 'HAVE_KILL': 1}
| bsd-3-clause |
chenc10/Spark-PAF | dist/ec2/lib/boto-2.34.0/boto/vpc/vpc.py | 34 | 3200 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Virtual Private Cloud.
"""
from boto.ec2.ec2object import TaggedEC2Object
class VPC(TaggedEC2Object):
def __init__(self, connection=None):
"""
Represents a VPC.
:ivar id: The unique ID of the VPC.
:ivar dhcp_options_id: The ID of the set of DHCP options you've associated with the VPC
(or default if the default options are associated with the VPC).
:ivar state: The current state of the VPC.
:ivar cidr_block: The CIDR block for the VPC.
:ivar is_default: Indicates whether the VPC is the default VPC.
:ivar instance_tenancy: The allowed tenancy of instances launched into the VPC.
"""
super(VPC, self).__init__(connection)
self.id = None
self.dhcp_options_id = None
self.state = None
self.cidr_block = None
self.is_default = None
self.instance_tenancy = None
def __repr__(self):
return 'VPC:%s' % self.id
def endElement(self, name, value, connection):
if name == 'vpcId':
self.id = value
elif name == 'dhcpOptionsId':
self.dhcp_options_id = value
elif name == 'state':
self.state = value
elif name == 'cidrBlock':
self.cidr_block = value
elif name == 'isDefault':
self.is_default = True if value == 'true' else False
elif name == 'instanceTenancy':
self.instance_tenancy = value
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_vpc(self.id)
def _update(self, updated):
self.__dict__.update(updated.__dict__)
def update(self, validate=False, dry_run=False):
vpc_list = self.connection.get_all_vpcs(
[self.id],
dry_run=dry_run
)
if len(vpc_list):
updated_vpc = vpc_list[0]
self._update(updated_vpc)
elif validate:
raise ValueError('%s is not a valid VPC ID' % (self.id,))
return self.state
| apache-2.0 |
ykaneko/quantum | quantum/db/routedserviceinsertion_db.py | 5 | 3637 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc
import sqlalchemy as sa
from sqlalchemy import event
from quantum.common import exceptions as qexception
from quantum.db import model_base
from quantum.extensions import routedserviceinsertion as rsi
class ServiceRouterBinding(model_base.BASEV2):
resource_id = sa.Column(sa.String(36),
primary_key=True)
resource_type = sa.Column(sa.String(36),
primary_key=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id'),
nullable=False)
class AttributeException(qexception.QuantumException):
message = _("Resource type '%(resource_type)s' is longer "
"than %(maxlen)d characters")
@event.listens_for(ServiceRouterBinding.resource_type, 'set', retval=True)
def validate_resource_type(target, value, oldvalue, initiator):
"""Make sure the resource type fit the resource_type column."""
maxlen = ServiceRouterBinding.resource_type.property.columns[0].type.length
if len(value) > maxlen:
raise AttributeException(resource_type=value, maxlen=maxlen)
return value
class RoutedServiceInsertionDbMixin(object):
"""Mixin class to add router service insertion."""
def _process_create_resource_router_id(self, context, resource, model):
with context.session.begin(subtransactions=True):
db = ServiceRouterBinding(
resource_id=resource['id'],
resource_type=model.__tablename__,
router_id=resource[rsi.ROUTER_ID])
context.session.add(db)
return self._make_resource_router_id_dict(db, model)
def _extend_resource_router_id_dict(self, context, resource, model):
binding = self._get_resource_router_id_binding(
context, resource['resource_id'], model)
resource[rsi.ROUTER_ID] = binding['router_id']
def _get_resource_router_id_binding(self, context, resource_id, model):
query = self._model_query(context, ServiceRouterBinding)
query = query.filter(
ServiceRouterBinding.resource_id == resource_id,
ServiceRouterBinding.resource_type == model.__tablename__)
return query.first()
def _make_resource_router_id_dict(self, resource_router_binding, model,
fields=None):
resource = {'resource_id': resource_router_binding['resource_id'],
'resource_type': model.__tablename__,
rsi.ROUTER_ID: resource_router_binding[rsi.ROUTER_ID]}
return self._fields(resource, fields)
def _delete_resource_router_id_binding(self, context, resource_id, model):
with context.session.begin(subtransactions=True):
binding = self._get_resource_router_id_binding(
context, resource_id, model)
if binding:
context.session.delete(binding)
| apache-2.0 |
vdrey/Toolbox | Python/GHP/Source/my_ioctl_fuzzer.py | 3 | 2856 | import pickle
import sys
import random
from ctypes import *
kernel32 = windll.kernel32
# Defines for Win32 API Calls
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
OPEN_EXISTING = 0x3
# Open the pickle and retrieve the dictionary
fd = open(sys.argv[1], "rb")
master_list = pickle.load(fd)
ioctl_list = master_list["ioctl_list"]
device_list = master_list["device_list"]
fd.close()
# Now test that we can retrieve valid handles to all
# device names, any that don't pass we remove from our test cases
valid_devices = []
for device_name in device_list:
# Make sure the device is accessed properly
device_file = u"\\\\.\\%s" % device_name.split("\\")[::-1][0]
print "[*] Testing for device: %s" % device_file
driver_handle = kernel32.CreateFileW(device_file,GENERIC_READ|
GENERIC_WRITE,0,None,OPEN_EXISTING,0,None)
if driver_handle:
print "[*] Success! %s is a valid device!"
if device_file not in valid_devices:
valid_devices.append( device_file )
kernel32.CloseHandle( driver_handle )
else:
print "[*] Failed! %s NOT a valid device."
if not len(valid_devices):
print "[*] No valid devices found. Exiting..."
sys.exit(0)
# Now let's begin feeding the driver test cases until we can't bear it anymore!
# CTRL-C to exit the loop and stop fuzzing
while 1:
# Open the log file first
fd = open("my_ioctl_fuzzer.log","a")
# Pick a random device name
current_device = valid_devices[ random.randint(0, len(valid_devices)-1 ) ]
fd.write("[*] Fuzzing: %s" % current_device)
# Pick a random IOCTL code
current_ioctl = ioctl_list[ random.randint(0, len(ioctl_list)-1)]
fd.write("[*] With IOCTL: 0x%08x" % current_ioctl)
# Choose a random length
current_length = random.randint(0, 10000) y
fd.write("[*] Buffer length: %d" % current_length)
# Let's test with a buffer of repeating A's
# Feel free to create your own test cases here
in_buffer = "A" * current_length
# Give the IOCTL run an out_buffer
out_buf = (c_char * current_length)()
bytes_returned = c_ulong(current_length)
# Obtain a handle
driver_handle = kernel32.CreateFileW(device_file, GENERIC_READ|
GENERIC_WRITE,0,None,OPEN_EXISTING,0,None)
fd.write("!!FUZZ!!")
# Run the test case
kernel32.DeviceIoControl( driver_handle, current_ioctl, in_buffer,
current_length, byref(out_buf),
current_length, byref(bytes_returned),
None )
fd.write( "[*] Test case finished. %d bytes returned.\n" % bytes_returned.value )
# Close the handle and carry on!
kernel32.CloseHandle( driver_handle )
fd.close() | mit |
Shaswat27/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
gawel/irc3 | irc3/plugins/feeds.py | 1 | 8429 | # -*- coding: utf-8 -*-
import os
import time
import irc3
import datetime
from irc3.compat import asyncio
from concurrent.futures import ThreadPoolExecutor
from operator import itemgetter
__doc__ = '''
==========================================
:mod:`irc3.plugins.feeds` Feeds plugin
==========================================
Send a notification on channel on new feed entry.
Your config must looks like this:
.. code-block:: ini
[bot]
includes =
irc3.plugins.feeds
[irc3.plugins.feeds]
channels = #irc3 # global channel to notify
delay = 5 # delay to check feeds
directory = ~/.irc3/feeds # directory to store feeds
hook = irc3.plugins.feeds.default_hook # dotted name to a callable
fmt = [{name}] {entry.title} - {entry.link} # formatter
# some feeds: name = url
github/irc3 = https://github.com/gawel/irc3/commits/master.atom#irc3
# custom formatter for the feed
github/irc3.fmt = [{feed.name}] New commit: {entry.title} - {entry.link}
# custom channels
github/irc3.channels = #irc3dev #irc3
# custom delay
github/irc3.delay = 10
Hook is a dotted name refering to a callable (function or class) wich take a
list of entries as argument. It should yield the entries you want really show:
.. code-block:: python
>>> def hook(entries):
... for entry in entries:
... if 'something bad' not in entry.title:
... yield entry
>>> class Hook:
... def __init__(self, bot):
... self.bot = bot
... def __call__(self, entries):
... for entry in entries:
... if 'something bad' not in entry.title:
... yield entry
Here is a more complete hook used on freenode#irc3:
.. literalinclude:: ../../examples/freenode_irc3.py
:pyobject: FeedsHook
'''
def default_hook(entries):
"""Default hook called for each entry"""
return entries
def default_dispatcher(bot): # pragma: no cover
"""Default messages dispatcher"""
def dispatcher(messages):
bot.call_many('privmsg', messages)
return dispatcher
def fetch(args):
"""fetch a feed"""
session = args['session']
for feed, filename in zip(args['feeds'], args['filenames']):
try:
resp = session.get(feed, timeout=5)
content = resp.content
except Exception: # pragma: no cover
pass
else:
with open(filename, 'wb') as fd:
fd.write(content)
return args['name']
def parse(feedparser, args):
"""parse a feed using feedparser"""
entries = []
args = irc3.utils.Config(args)
max_date = datetime.datetime.now() - datetime.timedelta(days=2)
for filename in args['filenames']:
try:
with open(filename + '.updated') as fd:
updated = fd.read().strip()
except (OSError, IOError):
updated = '0'
feed = feedparser.parse(filename)
for e in feed.entries:
if e.updated <= updated:
# skip already sent entries
continue
try:
updated_parsed = e.updated_parsed
except AttributeError:
continue
if datetime.datetime(*updated_parsed[:7]) < max_date:
# skip entries older than 2 days
continue
e['filename'] = filename
e['feed'] = args
entries.append((e.updated, e))
if entries:
entries = sorted(entries, key=itemgetter(0))
with open(filename + '.updated', 'w') as fd:
fd.write(str(entries[-1][0]))
return entries
@irc3.plugin
class Feeds:
"""Feeds plugin"""
PoolExecutor = ThreadPoolExecutor
headers = {
'User-Agent': 'python-requests/irc3/feeds',
'Cache-Control': 'max-age=0',
'Pragma': 'no-cache',
}
def __init__(self, bot):
bot.feeds = self
self.bot = bot
config = bot.config.get(__name__, {})
self.directory = os.path.expanduser(
config.get('directory', '~/.irc3/feeds'))
if not os.path.isdir(self.directory):
os.makedirs(self.directory)
hook = config.get('hook', default_hook)
hook = irc3.utils.maybedotted(hook)
if isinstance(hook, type):
hook = hook(bot)
self.hook = hook
dispatcher = config.get('dispatcher', default_dispatcher)
dispatcher = irc3.utils.maybedotted(dispatcher)
self.dispatcher = dispatcher(bot)
self.max_workers = int(config.get('max_workers', 5))
delay = int(config.get('delay', 5))
self.delay = delay * 60
feed_config = dict(
fmt=config.get('fmt', '[{feed.name}] {entry.title} {entry.link}'),
delay=delay,
channels=config.get('channels', ''),
headers=self.headers,
time=0,
)
self.feeds = {}
for name, feed in config.items():
if str(feed).startswith('http'):
feeds = []
filenames = []
for i, feed in enumerate(irc3.utils.as_list(feed)):
filename = os.path.join(self.directory,
name.replace('/', '_'))
filenames.append('{0}.{1}.feed'.format(filename, i))
feeds.append(feed)
feed = dict(
feed_config,
name=str(name),
feeds=feeds,
filenames=filenames,
**irc3.utils.extract_config(config, str(name))
)
feed['delay'] = feed['delay'] * 60
channels = irc3.utils.as_list(feed['channels'])
feed['channels'] = [irc3.utils.as_channel(c) for c in channels]
self.bot.log.debug(feed)
self.feeds[name] = feed
self.imports()
def connection_made(self):
"""Initialize checkings"""
self.bot.loop.call_later(10, self.update)
def imports(self):
"""show some warnings if needed"""
try:
import feedparser
self.feedparser = feedparser
except ImportError: # pragma: no cover
self.bot.log.critical('feedparser is not installed')
self.feedparser = None
try:
import requests
except ImportError: # pragma: no cover
self.bot.log.critical('requests is not installed')
self.session = None
else:
self.session = requests.Session()
self.session.headers.update(self.headers)
def parse(self, *args):
"""parse pre-fetched feeds and notify new entries"""
entries = []
for feed in self.feeds.values():
self.bot.log.debug('Parsing feed %s', feed['name'])
entries.extend(parse(self.feedparser, feed))
def messages():
for entry in self.hook([e for u, e in sorted(entries)]):
if entry:
feed = entry.feed
message = feed['fmt'].format(feed=feed, entry=entry)
for c in feed['channels']:
yield c, message
self.dispatcher(messages())
def update_time(self, future):
name = future.result()
self.bot.log.debug('Feed %s fetched', name)
feed = self.feeds[name]
feed['time'] = time.time()
def update(self):
"""update feeds"""
loop = self.bot.loop
loop.call_later(self.delay, self.update)
now = time.time()
session = self.session
feeds = [dict(f, session=session) for f in self.feeds.values()
if f['time'] < now - f['delay']]
if feeds:
self.bot.log.info('Fetching feeds %s',
', '.join([f['name'] for f in feeds]))
tasks = []
for feed in feeds:
task = loop.run_in_executor(None, fetch, feed)
task.add_done_callback(self.update_time)
tasks.append(task)
task = self.bot.create_task(
asyncio.wait(tasks, timeout=len(feeds) * 2, loop=loop))
task.add_done_callback(self.parse)
| mit |
tortxof/OpenBazaar | features/test_util.py | 4 | 2112 | import json
import time
from tornado.ioloop import IOLoop
from tornado import gen
from tornado.websocket import websocket_connect
from node.db_store import Obdb
def ip_address(i):
return '127.0.0.%s' % str(i + 1)
def nickname(i):
return ''
def get_db_path(i):
return 'db/ob-test-%s.db' % i
def node_uri(node_index):
return 'tcp://127.0.0.%s:12345' % str(node_index + 1)
def set_store_description(i):
ws_send(i, 'update_settings',
{'settings':
{'storeDescription': store_description(i),
'nickname': nickname(i)}})
def store_description(i):
return 'store %s' % i
def remove_peers_from_db(i):
Obdb(get_db_path(i)).delete_entries('peers')
def node_to_ws_port(node_index):
return node_index + 8888
def ws_connect(node_index):
port = str(node_to_ws_port(node_index))
@gen.coroutine
def client():
client = yield websocket_connect('ws://localhost:%s/ws' % port)
message = yield client.read_message()
raise gen.Return(json.loads(message))
return IOLoop.current().run_sync(client)
def ws_send_raw(port, string):
@gen.coroutine
def client():
client = yield websocket_connect('ws://localhost:%s/ws' % port)
# skip myself message
message = yield client.read_message()
client.write_message(json.dumps(string))
message = yield client.read_message()
raise gen.Return(json.loads(message))
return IOLoop.current().run_sync(client)
def ws_send(node_index, command, params=None):
if params is None:
params = {}
port = node_to_ws_port(node_index)
cmd = {'command': command,
'id': 1,
'params': params}
ret = ws_send_raw(port, cmd)
time.sleep(0.1)
return ret
def ws_receive_myself(node_index):
port = node_to_ws_port(node_index)
@gen.coroutine
def client():
client = yield websocket_connect('ws://localhost:%s/ws' % port)
message = yield client.read_message()
raise gen.Return(json.loads(message))
return IOLoop.current().run_sync(client)
| mit |
liorvh/raspberry_pwn | src/pentest/fimap/language.py | 8 | 26222 | #
# This file is part of fimap.
#
# Copyright(c) 2009-2010 Iman Karim([email protected]).
# http://fimap.googlecode.com
#
# This file may be licensed under the terms of of the
# GNU General Public License Version 2 (the ``GPL'').
#
# Software distributed under the License is distributed
# on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
# express or implied. See the GPL for the specific language
# governing rights and limitations.
#
# You should have received a copy of the GPL along with this
# program. If not, go to http://www.gnu.org/licenses/gpl.html
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import xml.dom.minidom
import base64
import sys, os
from baseClass import baseClass
from baseTools import baseTools
import random
def getXMLNode(item, nodename):
for child in item.childNodes:
if (child.nodeName != "#text"):
if (child.nodeName == nodename):
return(child)
return(None)
def getXMLNodes(item, nodename):
ret = []
for child in item.childNodes:
if (child.nodeName != "#text"):
if (child.nodeName == nodename):
ret.append(child)
return(ret)
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def convertString(txt, isBase64):
ret = None
if isBase64:
ret = base64.b64decode(txt)
else:
ret = str(txt)
return(ret)
class XML2Config(baseClass):
def _load(self):
self.langsets = {}
self.xml_file = os.path.join(sys.path[0], "config", "generic.xml")
self.XML_Generic = None
self.XML_Rootitem = None
self.version = -1
self.relative_files = []
self.absolute_files = []
self.remote_files = []
self.log_files = []
self.blind_files = []
self.blind_min = 0
self.blind_max = 0
self.commandConcat_unix = None
self.shellquiz_code_unix = None
self.kernelversion_code_unix = None
self.currentdir_code_unix = None
self.currentuser_code_unix = None
self.cd_code_unix = None
self.commandConcat_win = None
self.shellquiz_code_win = None
self.kernelversion_code_win = None
self.currentdir_code_win = None
self.currentuser_code_win = None
self.cd_code_win = None
self.__init_xmlresult()
#sys.exit(0)
def __init_xmlresult(self):
xmlfile = self.xml_file
if (os.path.exists(xmlfile)):
self.XML_Generic = xml.dom.minidom.parse(xmlfile)
self.XML_Rootitem = self.XML_Generic.firstChild
self.version = int(self.XML_Rootitem.getAttribute("revision"))
rel_node = getXMLNode(self.XML_Rootitem, "relative_files")
rel_files = getXMLNodes(rel_node, "file")
for f in rel_files:
self.relative_files.append(fiFile(f, self.config))
abs_node = getXMLNode(self.XML_Rootitem, "absolute_files")
abs_files = getXMLNodes(abs_node, "file")
for f in abs_files:
self.absolute_files.append(fiFile(f, self.config))
rem_node = getXMLNode(self.XML_Rootitem, "remote_files")
rem_files = getXMLNodes(rem_node, "file")
for f in rem_files:
self.remote_files.append(fiFile(f, self.config))
log_node = getXMLNode(self.XML_Rootitem, "log_files")
log_files = getXMLNodes(log_node, "file")
for f in log_files:
self.log_files.append(fiFile(f, self.config))
blind_node = getXMLNode(self.XML_Rootitem, "blind_files")
mindepth = blind_node.getAttribute("mindepth")
maxdepth = blind_node.getAttribute("maxdepth")
try:
mindepth = int(mindepth)
maxdepth = int(maxdepth)
except:
print "Mindepth and Maxdepth for blindmode have non-integer values!"
print "Fix it in the generic.xml!"
print "Committing suicide..."
sys.exit(1)
if (mindepth > maxdepth):
print "Logic isn't your best friend eh?"
print "The mindepth value is greater than the maxdepth value!"
print "Fix that in the generic.xml!"
print "Committing suicide..."
sys.exit(1)
self._log("Mindepth (%d) and Maxdepth (%d) loaded from generic.xml."%(mindepth, maxdepth), self.LOG_DEBUG)
self.blind_min = mindepth
self.blind_max = maxdepth
blind_files = getXMLNodes(blind_node, "file")
for f in blind_files:
self.blind_files.append(fiFile(f, self.config))
methods_node = getXMLNode(self.XML_Rootitem, "methods")
unix_node = getXMLNode(methods_node, "unix")
self.commandConcat_unix = str(unix_node.getAttribute("concatcommand"))
quiz_node = getXMLNode(unix_node, "shellquiz")
self.shellquiz_code_unix = base64.b64decode(quiz_node.getAttribute("source"))
kernel_node = getXMLNode(unix_node, "kernelversion")
self.kernelversion_code_unix = str(kernel_node.getAttribute("source"))
curdir_node = getXMLNode(unix_node, "currentdir")
self.currentdir_code_unix = str(curdir_node.getAttribute("source"))
curusr_node = getXMLNode(unix_node, "currentuser")
self.currentuser_code_unix = str(curusr_node.getAttribute("source"))
cd_node = getXMLNode(unix_node, "cd")
self.cd_code_unix = str(cd_node.getAttribute("source"))
win_node = getXMLNode(methods_node, "windows")
self.commandConcat_win = str(win_node.getAttribute("concatcommand"))
quiz_node = getXMLNode(win_node, "shellquiz")
self.shellquiz_code_win = base64.b64decode(quiz_node.getAttribute("source"))
kernel_node = getXMLNode(win_node, "kernelversion")
self.kernelversion_code_win = str(kernel_node.getAttribute("source"))
curdir_node = getXMLNode(win_node, "currentdir")
self.currentdir_code_win = str(curdir_node.getAttribute("source"))
curusr_node = getXMLNode(win_node, "currentuser")
self.currentuser_code_win = str(curusr_node.getAttribute("source"))
cd_node = getXMLNode(win_node, "cd")
self.cd_code_win = str(cd_node.getAttribute("source"))
self.__loadLanguageSets()
else:
print "generic.xml file not found! This file is very important!"
sys.exit(1)
def getRealFile(self):
return(self.xml_file)
def __loadLanguageSets(self):
langnodes = getXMLNode(self.XML_Rootitem, "languagesets")
for c in langnodes.childNodes:
if (c.nodeName == "language"):
langname = str(c.getAttribute("name"))
langfile = str(c.getAttribute("langfile"))
langClass = baseLanguage(langname, langfile, self.config)
self.langsets[langname] = langClass
self._log("Loaded XML-LD for '%s' at revision %d by %s" %(langname, langClass.getRevision(), langClass.getAutor()), self.LOG_DEBUG)
def getVersion(self):
return(self.version)
def generateShellQuiz(self, isUnix=True):
ret = None
if (isUnix):
exec(self.shellquiz_code_unix)
else:
exec(self.shellquiz_code_win)
return(ret)
def getAllLangSets(self):
return(self.langsets)
def getAllReadfileRegex(self):
ret = []
langs = self.getAllLangSets()
for k,v in langs.items():
readfile_regex = v.getReadfileDetectors()
for reg in readfile_regex:
ret.append((k, reg))
return(ret)
def getAllSniperRegex(self):
ret = []
langs = self.getAllLangSets()
for k,v in langs.items():
readfile_regex = v.getSniper()
ret.append((k, readfile_regex))
return(ret)
def getKernelCode(self, isUnix=True):
if (isUnix):
return(self.kernelversion_code_unix)
else:
return(self.kernelversion_code_win)
def getRelativeFiles(self, lang=None):
ret = []
for f in self.relative_files:
ret.append(f)
if (lang != None):
for f in self.langsets[lang].getRelativeFiles():
ret.append(f)
return(ret)
def getAbsoluteFiles(self, lang=None):
ret = []
for f in self.absolute_files:
ret.append(f)
if (lang != None):
for f in self.langsets[lang].getAbsoluteFiles():
ret.append(f)
return(ret)
def getLogFiles(self, lang=None):
ret = []
for f in self.log_files:
ret.append(f)
if (lang != None):
for f in self.langsets[lang].getLogFiles():
ret.append(f)
return(ret)
def getRemoteFiles(self, lang=None):
ret = []
for f in self.remote_files:
ret.append(f)
if (lang != None):
for f in self.langsets[lang].getRemoteFiles():
ret.append(f)
return(ret)
def getBlindFiles(self):
ret = []
for f in self.blind_files:
ret.append(f)
return(ret)
def getBlindMax(self):
return(self.blind_max)
def getBlindMin(self):
return(self.blind_min)
def getCurrentDirCode(self, isUnix=True):
if (isUnix):
return(self.currentdir_code_unix)
else:
return(self.currentdir_code_win)
def getCurrentUserCode(self, isUnix=True):
if (isUnix):
return(self.currentuser_code_unix)
else:
return(self.currentuser_code_win)
def getConcatSymbol(self, isUnix=True):
if (isUnix):
return(self.commandConcat_unix)
else:
return(self.commandConcat_win)
def concatCommands(self, commands, isUnix=True):
symbol = " %s " %(self.getConcatSymbol(isUnix))
return(symbol.join(commands))
def generateChangeDirectoryCommand(self, Directory, isUnix=True):
code = self.cd_code_unix
if (not isUnix):
code = self.cd_code_win
code = code.replace("__DIR__", Directory)
return(code)
class baseLanguage(baseTools):
def __init__(self, langname, langfile, config):
self.initLog(config)
langfile = os.path.join(sys.path[0], "config", langfile)
self.RealFile = langfile
self.XML_Langfile = None
self.XML_Rootitem = None
if (os.path.exists(langfile)):
self.XML_Langfile = xml.dom.minidom.parse(langfile)
self.XML_Rootitem = self.XML_Langfile.firstChild
else:
print "%s file not found!" %(langfile)
sys.exit(1)
self.LanguageName = langname
self.XMLRevision = None
self.XMLAutor = None
self.relative_files = []
self.absolute_files = []
self.remote_files = []
self.log_files = []
self.exec_methods = []
self.payloads = []
self.sniper_regex = None
self.quiz_function = None
self.print_function = None
self.eval_kickstarter = None
self.write_file = None
self.detector_include = []
self.detector_readfile = []
self.detector_extentions = []
self.do_force_inclusion_test = False
self.__populate()
def getLangFile(self):
return(self.RealFile)
def getName(self):
return(self.LanguageName)
def getVersion(self):
return(self.XMLRevision)
def getRevision(self):
return(self.XMLRevision)
def getAutor(self):
return(self.XMLAutor)
def getSniper(self):
return(self.sniper_regex)
def doForceInclusionTest(self):
return(self.do_force_inclusion_test)
def getExecMethods(self):
return(self.exec_methods)
def getPayloads(self):
return(self.payloads)
def getRelativeFiles(self):
return(self.relative_files)
def getAbsoluteFiles(self):
return(self.absolute_files)
def getRemoteFiles(self):
return(self.remote_files)
def getLogFiles(self):
return(self.log_files)
def getIncludeDetectors(self):
return(self.detector_include)
def getReadfileDetectors(self):
return(self.detector_readfile)
def getExtentions(self):
return(self.detector_extentions)
def getQuizSource(self):
return(self.quiz_function)
def generateWriteFileCode(self, remotefilepath, mode, b64data):
code = self.write_file
code = code.replace("__FILE__", remotefilepath)
code = code.replace("__MODE__", mode)
code = code.replace("__B64_DATA__", b64data)
return(code)
def generateQuiz(self):
ret = None
try:
exec(self.quiz_function)
except:
boxarr = []
boxheader = "[!!!] BAAAAAAAAAAAAAAAANG - Welcome back to reality [!!!]"
boxarr.append("The quiz function defined in one of the XML-Language-Definition files")
boxarr.append("just failed! If you are coding your own XML then fix that!")
boxarr.append("If not please report this bug at http://fimap.googlecode.com (!) Thanks!")
self.drawBox(boxheader, boxarr)
raise
return(ret)
def generatePrint(self, data):
ret = self.print_function.replace("__PLACEHOLDER__", data)
return(ret)
def getEvalKickstarter(self):
return(self.eval_kickstarter)
def __populate(self):
self.XMLRevision = int(self.XML_Rootitem.getAttribute("revision"))
self.XMLAutor = self.XML_Rootitem.getAttribute("autor")
self.do_force_inclusion_test = self.XML_Rootitem.getAttribute("force_inclusion_test") == "1"
rel_node = getXMLNode(self.XML_Rootitem, "relative_files")
rel_files = getXMLNodes(rel_node, "file")
for f in rel_files:
self.relative_files.append(fiFile(f, self.config))
abs_node = getXMLNode(self.XML_Rootitem, "absolute_files")
abs_files = getXMLNodes(abs_node, "file")
for f in abs_files:
self.absolute_files.append(fiFile(f, self.config))
rem_node = getXMLNode(self.XML_Rootitem, "remote_files")
rem_files = getXMLNodes(rem_node, "file")
for f in rem_files:
self.remote_files.append(fiFile(f, self.config))
log_node = getXMLNode(self.XML_Rootitem, "log_files")
log_files = getXMLNodes(log_node, "file")
for f in log_files:
self.log_files.append(fiFile(f, self.config))
exec_methods = getXMLNode(self.XML_Rootitem, "exec_methods")
exec_nodes = getXMLNodes(exec_methods, "exec")
for f in exec_nodes:
self.exec_methods.append(fiExecMethod(f, self.config))
if (len(self.exec_methods) == 0):
self._log("XML-LD has no exec-method(s) defined!", self.LOG_ERROR)
self._log(" This XML-LD can't be used to go into exploit mode!", self.LOG_ERROR)
payloads = getXMLNode(self.XML_Rootitem, "payloads")
payload_nodes = getXMLNodes(payloads, "payload")
for f in payload_nodes:
self.payloads.append(fiPayload(f, self.config, self.getName()))
if (len(self.payloads) == 0):
self._log("XML-LD has no payload(s) defined!", self.LOG_DEBUG)
self.sniper_regex = str(getXMLNode(self.XML_Rootitem, "snipe").getAttribute("regex"))
if (self.sniper_regex == None or self.sniper_regex.strip() == ""):
self._log("XML-LD has no sniper regex! So this XML-LD can only be used in blind-mode!", self.LOG_WARN)
methods_node = getXMLNode(self.XML_Rootitem, "methods")
quiz_node = getXMLNode(methods_node, "quiz")
if (quiz_node == None):
self._log("FATAL! XML-Language-Definition (%s) has no quiz function defined!"%(self.getName()), self.LOG_ERROR)
self._log("Please fix that in order to run fimap without problems!", self.LOG_ERROR)
self._log("Committing suicide :-O", self.LOG_ERROR)
sys.exit(1)
else:
isbase64 = quiz_node.getAttribute("isbase64")=="1"
quiz_code = quiz_node.getAttribute("source")
quiz_code = convertString(quiz_code, isbase64)
if (quiz_code == None or quiz_code.strip() == ""):
self._log("FATAL! XML-Language-Definition (%s) has no quiz function defined!"%(self.getName()), self.LOG_ERROR)
self._log("Please fix that in order to run fimap without problems!", self.LOG_ERROR)
self._log("Committing suicide :-O", self.LOG_ERROR)
sys.exit(1)
self.quiz_function = str(quiz_code)
print_node = getXMLNode(methods_node, "print")
if (print_node == None):
self._log("FATAL! XML-Language-Definition (%s) has no print function defined!"%(self.getName()), self.LOG_ERROR)
self._log("Please fix that in order to run fimap without problems!", self.LOG_ERROR)
self._log("Committing suicide :-O", self.LOG_ERROR)
sys.exit(1)
else:
isbase64 = print_node.getAttribute("isbase64")=="1"
print_code = print_node.getAttribute("source")
print_code = convertString(print_code, isbase64)
if (print_code == None or print_code.strip() == ""):
self._log("FATAL! XML-Language-Definition (%s) has no print function defined!"%(self.getName()), self.LOG_ERROR)
self._log("Please fix that in order to run fimap without problems!", self.LOG_ERROR)
self._log("Committing suicide :-O", self.LOG_ERROR)
sys.exit(1)
self.print_function = str(print_code)
eval_node = getXMLNode(methods_node, "eval_kickstarter")
if (eval_node == None):
self._log("XML-LD (%s) has no eval_kickstarter method defined."%(self.getName()), self.LOG_DEBUG)
self._log("Language will not be able to use logfile-injection.", self.LOG_DEBUG)
else:
isbase64 = eval_node.getAttribute("isbase64")=="1"
eval_code = eval_node.getAttribute("source")
eval_code = convertString(eval_code, isbase64)
if (eval_code == None or eval_code.strip() == ""):
self._log("XML-LD (%s) has no eval_kickstarter method defined."%(self.getName()), self.LOG_DEBUG)
self._log("Language will not be able to use logfile-injection."%(self.getName()), self.LOG_DEBUG)
self.eval_kickstarter = str(eval_code)
write_node = getXMLNode(methods_node, "write_file")
if (write_node == None):
self._log("XML-LD (%s) has no write_file method defined."%(self.getName()), self.LOG_DEBUG)
self._log("Language will not be able to write files.", self.LOG_DEBUG)
else:
isbase64 = write_node.getAttribute("isbase64")=="1"
write_code = write_node.getAttribute("source")
write_code = convertString(write_code, isbase64)
if (write_code == None or write_code.strip() == ""):
self._log("XML-LD (%s) has no eval_kickstarter method defined."%(self.getName()), self.LOG_DEBUG)
self._log("Language will not be able to use logfile-injection."%(self.getName()), self.LOG_DEBUG)
self.write_file = str(write_code)
detectors_node = getXMLNode(self.XML_Rootitem, "detectors")
include_patterns = getXMLNode(detectors_node, "include_patterns")
pattern_nodes = getXMLNodes(include_patterns, "pattern")
for f in pattern_nodes:
self.detector_include.append(str(f.getAttribute("regex")))
if (len(self.detector_include) == 0):
self._log("XML-LD has no include patterns defined!", self.LOG_WARN)
self._log(" Only blindmode will work because they are used to retrieve informations out of the error message!", self.LOG_DEBUG)
readfile_patterns = getXMLNode(detectors_node, "readfile_patterns")
pattern_nodes = getXMLNodes(readfile_patterns, "pattern")
for f in pattern_nodes:
self.detector_readfile.append(str(f.getAttribute("regex")))
if (len(self.detector_readfile) == 0):
self._log("XML-LD has no readfile patterns defined!", self.LOG_DEBUG)
self._log(" No readfile bugs can be scanned if this is not defined.", self.LOG_DEBUG)
extentions = getXMLNode(detectors_node, "extentions")
extention_nodes = getXMLNodes(extentions, "extention")
for f in extention_nodes:
self.detector_extentions.append(str(f.getAttribute("ext")))
if (len(self.detector_readfile) == 0):
self._log("XML-LD has no extentions defined!", self.LOG_DEBUG)
class fiPayload(baseTools):
def __init__(self, xmlPayload, config, ParentName):
self.initLog(config)
self.name = xmlPayload.getAttribute("name")
self.doBase64 = (xmlPayload.getAttribute("dobase64") == "1")
self.inshell = (xmlPayload.getAttribute("inshell") == "1")
self.unix = (xmlPayload.getAttribute("unix") == "1")
self.win = (xmlPayload.getAttribute("win") == "1")
self.inputlist = getXMLNodes(xmlPayload, "input")
self.source = str(getXMLNode(xmlPayload, "code").getAttribute("source"))
self.ParentName = ParentName
self._log("fimap PayloadObject loaded: %s" %(self.name), self.LOG_DEVEL)
def isForWindows(self):
return(self.win)
def isForUnix(self):
return(self.unix)
def getParentName(self):
return(self.ParentName)
def doInShell(self):
return(self.inshell)
def getName(self):
return(self.name)
def getSource(self):
return(self.source)
def generatePayload(self):
ret = self.source
for q in self.inputlist:
type_ = q.getAttribute("type")
if (type_ == "question"):
question = q.getAttribute("text")
placeholder = q.getAttribute("placeholder")
inp = raw_input(question)
if (self.doBase64):
inp = base64.b64encode(inp)
ret = ret.replace(placeholder, inp)
elif (type_ == "info"):
info = q.getAttribute("text")
print info
elif (type_ == "wait"):
info = q.getAttribute("text")
raw_input(info)
return(ret)
class fiExecMethod(baseTools):
def __init__(self, xmlExecMethod, config):
self.initLog(config)
self.execname = xmlExecMethod.getAttribute("name")
self.execsource = xmlExecMethod.getAttribute("source")
self.dobase64 = xmlExecMethod.getAttribute("dobase64")=="1"
self.isunix = xmlExecMethod.getAttribute("unix")=="1"
self.iswin = xmlExecMethod.getAttribute("win")=="1"
self._log("fimap ExecObject loaded: %s" %(self.execname), self.LOG_DEVEL)
def getSource(self):
return(self.execsource)
def getName(self):
return(self.execname)
def generatePayload(self, command):
if (self.dobase64):
command = base64.b64encode(command)
payload = self.getSource().replace("__PAYLOAD__", command)
return(payload)
def isUnix(self):
return(self.isunix)
def isWindows(self):
return(self.iswin)
class fiFile(baseTools):
def __init__(self, xmlFile, config):
self.initLog(config)
self.filepath = str(xmlFile.getAttribute("path"))
self.postdata = str(xmlFile.getAttribute("post"))
self.findstr = str(xmlFile.getAttribute("find"))
self.flags = str(xmlFile.getAttribute("flags"))
self.isunix = str(xmlFile.getAttribute("unix")) == "1"
self.iswin = str(xmlFile.getAttribute("windows")) == "1"
self._log("fimap FileObject loaded: %s" %(self.filepath), self.LOG_DEVEL)
def getFilepath(self):
return(self.filepath)
def getPostData(self):
return(self.postdata)
def getFindStr(self):
return(self.findstr)
def getFlags(self):
return(self.flags)
def containsFlag(self, flag):
return (flag in self.flags)
def isInjected(self, content):
return (content.find(self.findstr) != -1)
def isUnix(self):
return(self.isunix)
def isBreakable(self):
return(self.filepath.find("://") == -1)
def isWindows(self):
return(self.iswin)
def getBackSymbols(self):
if (self.isUnix()):
return("/..")
else:
return("\\..") | gpl-3.0 |
wdzhou/mantid | Framework/PythonInterface/test/python/mantid/kernel/UnitFactoryTest.py | 3 | 1396 | from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import UnitFactory, UnitFactoryImpl, Unit
class UnitFactoryTest(unittest.TestCase):
def test_alias_is_of_type_UnitFactoryImpl(self):
self.assertTrue(isinstance(UnitFactory, UnitFactoryImpl))
def test_known_unit_can_be_created(self):
energy = UnitFactory.create("Energy")
self.assertTrue(isinstance(energy, Unit))
def test_unknown_unit_raises_error(self):
self.assertRaises(RuntimeError, UnitFactory.create,
"NotAUnit")
def test_keys_returns_a_non_empty_python_list_of_unit_keys(self):
known_units = UnitFactory.getKeys()
self.assertEquals(type(known_units), list)
# Check length is at least the known core units
# but allow for others to be added
core_units = ['Empty', 'Label', 'TOF', 'Wavelength','Energy',
'Energy_inWavenumber', 'dSpacing', 'MomentumTransfer',
'QSquared', 'DeltaE', 'DeltaE_inWavenumber',
'DeltaE_inFrequency', 'Momentum', 'dSpacingPerpendicular']
self.assertTrue(len(core_units) <= len(known_units))
for unit in core_units:
self.assertTrue(unit in known_units, "%s unit not found in UnitFactory keys" % unit)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ESOedX/edx-platform | common/djangoapps/student/tests/test_events.py | 2 | 7039 | # -*- coding: utf-8 -*-
"""
Test that various events are fired for models in the student app.
"""
from __future__ import absolute_import
import mock
from django.db.utils import IntegrityError
from django.test import TestCase
from django_countries.fields import Country
from student.models import CourseEnrollmentAllowed
from student.tests.factories import CourseEnrollmentAllowedFactory, UserFactory
from student.tests.tests import UserSettingsEventTestMixin
class TestUserProfileEvents(UserSettingsEventTestMixin, TestCase):
"""
Test that we emit field change events when UserProfile models are changed.
"""
def setUp(self):
super(TestUserProfileEvents, self).setUp()
self.table = 'auth_userprofile'
self.user = UserFactory.create()
self.profile = self.user.profile
self.reset_tracker()
def test_change_one_field(self):
"""
Verify that we emit an event when a single field changes on the user
profile.
"""
self.profile.year_of_birth = 1900
self.profile.save()
self.assert_user_setting_event_emitted(setting='year_of_birth', old=None, new=self.profile.year_of_birth)
# Verify that we remove the temporary `_changed_fields` property from
# the model after we're done emitting events.
with self.assertRaises(AttributeError):
self.profile._changed_fields # pylint: disable=pointless-statement, protected-access
def test_change_many_fields(self):
"""
Verify that we emit one event per field when many fields change on the
user profile in one transaction.
"""
self.profile.gender = u'o'
self.profile.bio = 'test bio'
self.profile.save()
self.assert_user_setting_event_emitted(setting='bio', old=None, new=self.profile.bio)
self.assert_user_setting_event_emitted(setting='gender', old=u'm', new=u'o')
def test_unicode(self):
"""
Verify that the events we emit can handle unicode characters.
"""
old_name = self.profile.name
self.profile.name = u'Dånîél'
self.profile.save()
self.assert_user_setting_event_emitted(setting='name', old=old_name, new=self.profile.name)
def test_country(self):
"""
Verify that we properly serialize the JSON-unfriendly Country field.
"""
self.profile.country = Country(u'AL', 'dummy_flag_url')
self.profile.save()
self.assert_user_setting_event_emitted(setting='country', old=None, new=self.profile.country)
def test_excluded_field(self):
"""
Verify that we don't emit events for ignored fields.
"""
self.profile.meta = {u'foo': u'bar'}
self.profile.save()
self.assert_no_events_were_emitted()
@mock.patch('student.models.UserProfile.save', side_effect=IntegrityError)
def test_no_event_if_save_failed(self, _save_mock):
"""
Verify no event is triggered if the save does not complete. Note that the pre_save
signal is not called in this case either, but the intent is to make it clear that this model
should never emit an event if save fails.
"""
self.profile.gender = "unknown"
with self.assertRaises(IntegrityError):
self.profile.save()
self.assert_no_events_were_emitted()
class TestUserEvents(UserSettingsEventTestMixin, TestCase):
"""
Test that we emit field change events when User models are changed.
"""
def setUp(self):
super(TestUserEvents, self).setUp()
self.user = UserFactory.create()
self.reset_tracker()
self.table = 'auth_user'
def test_change_one_field(self):
"""
Verify that we emit an event when a single field changes on the user.
"""
old_username = self.user.username
self.user.username = u'new username'
self.user.save()
self.assert_user_setting_event_emitted(setting='username', old=old_username, new=self.user.username)
def test_change_many_fields(self):
"""
Verify that we emit one event per field when many fields change on the
user in one transaction.
"""
old_email = self.user.email
old_is_staff = self.user.is_staff
self.user.email = u'[email protected]'
self.user.is_staff = True
self.user.save()
self.assert_user_setting_event_emitted(setting='email', old=old_email, new=self.user.email)
self.assert_user_setting_event_emitted(setting='is_staff', old=old_is_staff, new=self.user.is_staff)
def test_password(self):
"""
Verify that password values are not included in the event payload.
"""
self.user.password = u'new password'
self.user.save()
self.assert_user_setting_event_emitted(setting='password', old=None, new=None)
def test_related_fields_ignored(self):
"""
Verify that we don't emit events for related fields.
"""
self.user.loginfailures_set.create()
self.user.save()
self.assert_no_events_were_emitted()
@mock.patch('django.contrib.auth.models.User.save', side_effect=IntegrityError)
def test_no_event_if_save_failed(self, _save_mock):
"""
Verify no event is triggered if the save does not complete. Note that the pre_save
signal is not called in this case either, but the intent is to make it clear that this model
should never emit an event if save fails.
"""
self.user.password = u'new password'
with self.assertRaises(IntegrityError):
self.user.save()
self.assert_no_events_were_emitted()
def test_no_first_and_last_name_events(self):
"""
Verify that first_name and last_name events are not emitted.
"""
self.user.first_name = "Donald"
self.user.last_name = "Duck"
self.user.save()
self.assert_no_events_were_emitted()
def test_enrolled_after_email_change(self):
"""
Test that when a user's email changes, the user is enrolled in pending courses.
"""
pending_enrollment = CourseEnrollmentAllowedFactory(auto_enroll=True)
# the e-mail will change to [email protected] (from something else)
self.assertNotEquals(self.user.email, '[email protected]')
# there's a CEA for the new e-mail
self.assertEquals(CourseEnrollmentAllowed.objects.count(), 1)
self.assertEquals(CourseEnrollmentAllowed.objects.filter(email='[email protected]').count(), 1)
# Changing the e-mail to the enrollment-allowed e-mail should enroll
self.user.email = '[email protected]'
self.user.save()
self.assert_user_enrollment_occurred('edX/toy/2012_Fall')
# CEAs shouldn't have been affected
self.assertEquals(CourseEnrollmentAllowed.objects.count(), 1)
self.assertEquals(CourseEnrollmentAllowed.objects.filter(email='[email protected]').count(), 1)
| agpl-3.0 |
to266/hyperspy | hyperspy/io_plugins/hdf5.py | 1 | 24002 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from distutils.version import StrictVersion
import warnings
import datetime
import logging
import h5py
import numpy as np
from traits.api import Undefined
from hyperspy.misc.utils import ensure_unicode
from hyperspy.axes import AxesManager
_logger = logging.getLogger(__name__)
# Plugin characteristics
# ----------------------
format_name = 'HDF5'
description = \
'The default file format for HyperSpy based on the HDF5 standard'
full_support = False
# Recognised file extension
file_extensions = ['hdf', 'h4', 'hdf4', 'h5', 'hdf5', 'he4', 'he5']
default_extension = 4
# Writing capabilities
writes = True
version = "2.0"
# -----------------------
# File format description
# -----------------------
# The root must contain a group called Experiments
# The experiments group can contain any number of subgroups
# Each subgroup is an experiment or signal
# Each subgroup must contain at least one dataset called data
# The data is an array of arbitrary dimension
# In addition a number equal to the number of dimensions of the data
# dataset + 1 of empty groups called coordinates followed by a number
# must exists with the following attributes:
# 'name'
# 'offset'
# 'scale'
# 'units'
# 'size'
# 'index_in_array'
# The experiment group contains a number of attributes that will be
# directly assigned as class attributes of the Signal instance. In
# addition the experiment groups may contain 'original_metadata' and
# 'metadata'subgroup that will be
# assigned to the same name attributes of the Signal instance as a
# Dictionary Browsers
# The Experiments group can contain attributes that may be common to all
# the experiments and that will be accessible as attributes of the
# Experiments instance
#
# New in v1.3
# -----------
# - Added support for lists, tuples and binary strings
not_valid_format = 'The file is not a valid HyperSpy hdf5 file'
current_file_version = None # Format version of the file being read
default_version = StrictVersion(version)
def get_hspy_format_version(f):
if "file_format_version" in f.attrs:
version = f.attrs["file_format_version"]
if isinstance(version, bytes):
version = version.decode()
if isinstance(version, float):
version = str(round(version, 2))
elif "Experiments" in f:
# Chances are that this is a HSpy hdf5 file version 1.0
version = "1.0"
elif "Analysis" in f:
# Starting version 2.0 we have "Analysis" field as well
version = "2.0"
else:
raise IOError(not_valid_format)
return StrictVersion(version)
def file_reader(filename, record_by, mode='r', driver='core',
backing_store=False, load_to_memory=True, **kwds):
f = h5py.File(filename, mode=mode, driver=driver, **kwds)
# Getting the format version here also checks if it is a valid HSpy
# hdf5 file, so the following two lines must not be deleted or moved
# elsewhere.
global current_file_version
current_file_version = get_hspy_format_version(f)
global default_version
if current_file_version > default_version:
warnings.warn(
"This file was written using a newer version of the "
"HyperSpy hdf5 file format. I will attempt to load it, but, "
"if I fail, it is likely that I will be more successful at "
"this and other tasks if you upgrade me.")
models_with_signals = []
standalone_models = []
if 'Analysis/models' in f:
try:
m_gr = f.require_group('Analysis/models')
for model_name in m_gr:
if '_signal' in m_gr[model_name].attrs:
key = m_gr[model_name].attrs['_signal']
# del m_gr[model_name].attrs['_signal']
res = hdfgroup2dict(
m_gr[model_name],
load_to_memory=load_to_memory)
del res['_signal']
models_with_signals.append((key, {model_name: res}))
else:
standalone_models.append(
{model_name: hdfgroup2dict(
m_gr[model_name], load_to_memory=load_to_memory)})
except TypeError:
raise IOError(not_valid_format)
experiments = []
exp_dict_list = []
if 'Experiments' in f:
for ds in f['Experiments']:
if isinstance(f['Experiments'][ds], h5py.Group):
if 'data' in f['Experiments'][ds]:
experiments.append(ds)
# Parse the file
for experiment in experiments:
exg = f['Experiments'][experiment]
exp = hdfgroup2signaldict(exg, load_to_memory)
# assign correct models, if found:
_tmp = {}
for (key, _dict) in reversed(models_with_signals):
if key == exg.name:
_tmp.update(_dict)
models_with_signals.remove((key, _dict))
exp['models'] = _tmp
exp_dict_list.append(exp)
for _, m in models_with_signals:
standalone_models.append(m)
exp_dict_list.extend(standalone_models)
if not len(exp_dict_list):
raise IOError('This is not a valid HyperSpy HDF5 file. '
'You can still load the data using a hdf5 reader, '
'e.g. h5py, and manually create a Signal. '
'Please, refer to the User Guide for details')
if load_to_memory:
f.close()
return exp_dict_list
def hdfgroup2signaldict(group, load_to_memory=True):
global current_file_version
global default_version
if current_file_version < StrictVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
exp = {'metadata': hdfgroup2dict(
group[metadata], load_to_memory=load_to_memory),
'original_metadata': hdfgroup2dict(
group[original_metadata], load_to_memory=load_to_memory)
}
data = group['data']
if load_to_memory:
data = np.asanyarray(data)
exp['data'] = data
axes = []
for i in range(len(exp['data'].shape)):
try:
axes.append(dict(group['axis-%i' % i].attrs))
axis = axes[-1]
for key, item in axis.items():
axis[key] = ensure_unicode(item)
except KeyError:
break
if len(axes) != len(exp['data'].shape): # broke from the previous loop
try:
axes = [i for k, i in sorted(iter(hdfgroup2dict(
group['_list_' + str(len(exp['data'].shape)) + '_axes'],
load_to_memory=load_to_memory).items()))]
except KeyError:
raise IOError(not_valid_format)
exp['axes'] = axes
exp['attributes'] = {}
if 'learning_results' in group.keys():
exp['attributes']['learning_results'] = \
hdfgroup2dict(
group['learning_results'],
load_to_memory=load_to_memory)
if 'peak_learning_results' in group.keys():
exp['attributes']['peak_learning_results'] = \
hdfgroup2dict(
group['peak_learning_results'],
load_to_memory=load_to_memory)
# If the title was not defined on writing the Experiment is
# then called __unnamed__. The next "if" simply sets the title
# back to the empty string
if "General" in exp["metadata"] and "title" in exp["metadata"]["General"]:
if '__unnamed__' == exp['metadata']['General']['title']:
exp['metadata']["General"]['title'] = ''
if current_file_version < StrictVersion("1.1"):
# Load the decomposition results written with the old name,
# mva_results
if 'mva_results' in group.keys():
exp['attributes']['learning_results'] = hdfgroup2dict(
group['mva_results'], load_to_memory=load_to_memory)
if 'peak_mva_results' in group.keys():
exp['attributes']['peak_learning_results'] = hdfgroup2dict(
group['peak_mva_results'], load_to_memory=load_to_memory)
# Replace the old signal and name keys with their current names
if 'signal' in exp['metadata']:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp['metadata']["Signal"]['signal_type'] = \
exp['metadata']['signal']
del exp['metadata']['signal']
if 'name' in exp['metadata']:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp['metadata']['General']['title'] = \
exp['metadata']['name']
del exp['metadata']['name']
if current_file_version < StrictVersion("1.2"):
if '_internal_parameters' in exp['metadata']:
exp['metadata']['_HyperSpy'] = \
exp['metadata']['_internal_parameters']
del exp['metadata']['_internal_parameters']
if 'stacking_history' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Stacking_history"] = \
exp['metadata']['_HyperSpy']['stacking_history']
del exp['metadata']['_HyperSpy']["stacking_history"]
if 'folding' in exp['metadata']['_HyperSpy']:
exp['metadata']['_HyperSpy']["Folding"] = \
exp['metadata']['_HyperSpy']['folding']
del exp['metadata']['_HyperSpy']["folding"]
if 'Variance_estimation' in exp['metadata']:
if "Noise_properties" not in exp["metadata"]:
exp["metadata"]["Noise_properties"] = {}
exp['metadata']['Noise_properties']["Variance_linear_model"] = \
exp['metadata']['Variance_estimation']
del exp['metadata']['Variance_estimation']
if "TEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["TEM"] = \
exp["metadata"]["TEM"]
del exp["metadata"]["TEM"]
tem = exp["metadata"]["Acquisition_instrument"]["TEM"]
if "EELS" in tem:
if "dwell_time" in tem:
tem["EELS"]["dwell_time"] = tem["dwell_time"]
del tem["dwell_time"]
if "dwell_time_units" in tem:
tem["EELS"]["dwell_time_units"] = tem["dwell_time_units"]
del tem["dwell_time_units"]
if "exposure" in tem:
tem["EELS"]["exposure"] = tem["exposure"]
del tem["exposure"]
if "exposure_units" in tem:
tem["EELS"]["exposure_units"] = tem["exposure_units"]
del tem["exposure_units"]
if "Detector" not in tem:
tem["Detector"] = {}
tem["Detector"] = tem["EELS"]
del tem["EELS"]
if "EDS" in tem:
if "Detector" not in tem:
tem["Detector"] = {}
if "EDS" not in tem["Detector"]:
tem["Detector"]["EDS"] = {}
tem["Detector"]["EDS"] = tem["EDS"]
del tem["EDS"]
del tem
if "SEM" in exp["metadata"]:
if "Acquisition_instrument" not in exp["metadata"]:
exp["metadata"]["Acquisition_instrument"] = {}
exp["metadata"]["Acquisition_instrument"]["SEM"] = \
exp["metadata"]["SEM"]
del exp["metadata"]["SEM"]
sem = exp["metadata"]["Acquisition_instrument"]["SEM"]
if "EDS" in sem:
if "Detector" not in sem:
sem["Detector"] = {}
if "EDS" not in sem["Detector"]:
sem["Detector"]["EDS"] = {}
sem["Detector"]["EDS"] = sem["EDS"]
del sem["EDS"]
del sem
if "Sample" in exp["metadata"] and "Xray_lines" in exp[
"metadata"]["Sample"]:
exp["metadata"]["Sample"]["xray_lines"] = exp[
"metadata"]["Sample"]["Xray_lines"]
del exp["metadata"]["Sample"]["Xray_lines"]
for key in ["title", "date", "time", "original_filename"]:
if key in exp["metadata"]:
if "General" not in exp["metadata"]:
exp["metadata"]["General"] = {}
exp["metadata"]["General"][key] = exp["metadata"][key]
del exp["metadata"][key]
for key in ["record_by", "signal_origin", "signal_type"]:
if key in exp["metadata"]:
if "Signal" not in exp["metadata"]:
exp["metadata"]["Signal"] = {}
exp["metadata"]["Signal"][key] = exp["metadata"][key]
del exp["metadata"][key]
return exp
def dict2hdfgroup(dictionary, group, compression=None):
from hyperspy.misc.utils import DictionaryTreeBrowser
from hyperspy.signal import Signal
def parse_structure(key, group, value, _type, compression):
try:
# Here we check if there are any signals in the container, as
# casting a long list of signals to a numpy array takes a very long
# time. So we check if there are any, and save numpy the trouble
if np.any([isinstance(t, Signal) for t in value]):
tmp = np.array([[0]])
else:
tmp = np.array(value)
except ValueError:
tmp = np.array([[0]])
if tmp.dtype is np.dtype('O') or tmp.ndim is not 1:
dict2hdfgroup(dict(zip(
[str(i) for i in range(len(value))], value)),
group.create_group(_type + str(len(value)) + '_' + key),
compression=compression)
elif tmp.dtype.type is np.unicode_:
group.create_dataset(_type + key,
tmp.shape,
dtype=h5py.special_dtype(vlen=str),
compression=compression)
group[_type + key][:] = tmp[:]
else:
group.create_dataset(
_type + key,
data=tmp,
compression=compression)
for key, value in dictionary.items():
if isinstance(value, dict):
dict2hdfgroup(value, group.create_group(key),
compression=compression)
elif isinstance(value, DictionaryTreeBrowser):
dict2hdfgroup(value.as_dictionary(),
group.create_group(key),
compression=compression)
elif isinstance(value, Signal):
if key.startswith('_sig_'):
try:
write_signal(value, group[key])
except:
write_signal(value, group.create_group(key))
else:
write_signal(value, group.create_group('_sig_' + key))
elif isinstance(value, np.ndarray):
group.create_dataset(key,
data=value,
compression=compression)
elif value is None:
group.attrs[key] = '_None_'
elif isinstance(value, bytes):
try:
# binary string if has any null characters (otherwise not
# supported by hdf5)
value.index(b'\x00')
group.attrs['_bs_' + key] = np.void(value)
except ValueError:
group.attrs[key] = value.decode()
elif isinstance(value, str):
group.attrs[key] = value
elif isinstance(value, AxesManager):
dict2hdfgroup(value.as_dictionary(),
group.create_group('_hspy_AxesManager_' + key),
compression=compression)
elif isinstance(value, (datetime.date, datetime.time)):
group.attrs["_datetime_" + key] = repr(value)
elif isinstance(value, list):
if len(value):
parse_structure(key, group, value, '_list_', compression)
else:
group.attrs['_list_empty_' + key] = '_None_'
elif isinstance(value, tuple):
if len(value):
parse_structure(key, group, value, '_tuple_', compression)
else:
group.attrs['_tuple_empty_' + key] = '_None_'
elif value is Undefined:
continue
else:
try:
group.attrs[key] = value
except:
_logger.exception(
"The hdf5 writer could not write the following "
"information in the file: %s : %s", key, value)
def hdfgroup2dict(group, dictionary=None, load_to_memory=True):
if dictionary is None:
dictionary = {}
for key, value in group.attrs.items():
if isinstance(value, bytes):
value = value.decode()
if isinstance(value, (np.string_, str)):
if value == '_None_':
value = None
elif isinstance(value, np.bool_):
value = bool(value)
elif isinstance(value, np.ndarray) and value.dtype.char == "S":
# Convert strings to unicode
value = value.astype("U")
if value.dtype.str.endswith("U1"):
value = value.tolist()
# skip signals - these are handled below.
if key.startswith('_sig_'):
pass
elif key.startswith('_list_empty_'):
dictionary[key[len('_list_empty_'):]] = []
elif key.startswith('_tuple_empty_'):
dictionary[key[len('_tuple_empty_'):]] = ()
elif key.startswith('_bs_'):
dictionary[key[len('_bs_'):]] = value.tostring()
elif key.startswith('_datetime_'):
dictionary[key.replace("_datetime_", "")] = eval(value)
else:
dictionary[key] = value
if not isinstance(group, h5py.Dataset):
for key in group.keys():
if key.startswith('_sig_'):
from hyperspy.io import dict2signal
dictionary[key[len('_sig_'):]] = (
dict2signal(hdfgroup2signaldict(
group[key], load_to_memory=load_to_memory)))
elif isinstance(group[key], h5py.Dataset):
ans = np.array(group[key])
if ans.dtype.char == "S":
try:
ans = ans.astype("U")
except UnicodeDecodeError:
# There are some strings that must stay in binary,
# for example dill pickles. This will obviously also
# let "wrong" binary string fail somewhere else...
pass
kn = key
if key.startswith("_list_"):
ans = ans.tolist()
kn = key[6:]
elif key.startswith("_tuple_"):
ans = tuple(ans.tolist())
kn = key[7:]
elif load_to_memory:
kn = key
else:
# leave as h5py dataset
ans = group[key]
kn = key
dictionary[kn] = ans
elif key.startswith('_hspy_AxesManager_'):
dictionary[key[len('_hspy_AxesManager_'):]] = AxesManager(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], load_to_memory=load_to_memory).items()
))])
elif key.startswith('_list_'):
dictionary[key[7 + key[6:].find('_'):]] = \
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], load_to_memory=load_to_memory).items()
))]
elif key.startswith('_tuple_'):
dictionary[key[8 + key[7:].find('_'):]] = tuple(
[i for k, i in sorted(iter(
hdfgroup2dict(
group[key], load_to_memory=load_to_memory).items()
))])
else:
dictionary[key] = {}
hdfgroup2dict(
group[key],
dictionary[key],
load_to_memory=load_to_memory)
return dictionary
def write_signal(signal, group, compression='gzip'):
if default_version < StrictVersion("1.2"):
metadata = "mapped_parameters"
original_metadata = "original_parameters"
else:
metadata = "metadata"
original_metadata = "original_metadata"
group.create_dataset('data',
data=signal.data,
compression=compression)
for axis in signal.axes_manager._axes:
axis_dict = axis.get_axis_dictionary()
# For the moment we don't store the navigate attribute
del(axis_dict['navigate'])
coord_group = group.create_group(
'axis-%s' % axis.index_in_array)
dict2hdfgroup(axis_dict, coord_group, compression=compression)
mapped_par = group.create_group(metadata)
metadata_dict = signal.metadata.as_dictionary()
if default_version < StrictVersion("1.2"):
metadata_dict["_internal_parameters"] = \
metadata_dict.pop("_HyperSpy")
dict2hdfgroup(metadata_dict,
mapped_par, compression=compression)
original_par = group.create_group(original_metadata)
dict2hdfgroup(signal.original_metadata.as_dictionary(),
original_par, compression=compression)
learning_results = group.create_group('learning_results')
dict2hdfgroup(signal.learning_results.__dict__,
learning_results, compression=compression)
if hasattr(signal, 'peak_learning_results'):
peak_learning_results = group.create_group(
'peak_learning_results')
dict2hdfgroup(signal.peak_learning_results.__dict__,
peak_learning_results, compression=compression)
if len(signal.models):
model_group = group.file.require_group('Analysis/models')
dict2hdfgroup(signal.models._models.as_dictionary(),
model_group,
compression=compression)
for model in model_group.values():
model.attrs['_signal'] = group.name
def file_writer(filename,
signal,
compression='gzip',
*args, **kwds):
with h5py.File(filename, mode='w') as f:
f.attrs['file_format'] = "HyperSpy"
f.attrs['file_format_version'] = version
exps = f.create_group('Experiments')
group_name = signal.metadata.General.title if \
signal.metadata.General.title else '__unnamed__'
expg = exps.create_group(group_name)
write_signal(signal, expg, compression=compression)
| gpl-3.0 |
vvovo/vvo | model/reply.py | 1 | 2457 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright 2014 vvovo.com
# Very way to victory.
# Let the dream set sail.
import time
from lib.query import Query
class ReplyModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "reply"
super(ReplyModel, self).__init__()
def get_all_replies_by_topic_id(self, topic_id, num = 16, current_page = 1):
where = "topic_id = %s" % topic_id
join = "LEFT JOIN user ON reply.author_id = user.uid LEFT JOIN college ON reply.college_id = college.id"
order = "id ASC"
field = "reply.*, \
user.username as author_username, \
user.nickname as author_nickname, \
user.avatar as author_avatar,\
college.id as author_collegeid,\
college.name as author_collegename"
return self.where(where).order(order).join(join).field(field).pages(current_page = current_page, list_rows = num)
def add_new_reply(self, reply_info):
return self.data(reply_info).add()
def get_user_all_replies_count(self, uid):
where = "author_id = %s" % uid
return self.where(where).count()
def get_all_replies_count(self):
return self.count()
def get_user_all_replies(self, uid, num = 16, current_page = 1):
where = "reply.author_id = %s" % uid
join = "LEFT JOIN topic ON reply.topic_id = topic.id \
LEFT JOIN user AS topic_author_user ON topic.author_id = topic_author_user.uid"
order = "reply.id DESC"
field = "reply.*, \
topic.title as topic_title, \
topic_author_user.username as topic_author_username, \
topic_author_user.nickname as topic_author_nickname, \
topic_author_user.avatar as topic_author_avatar"
group = "reply.topic_id"
return self.where(where).field(field).join(join).group(group).order(order).pages(current_page = current_page, list_rows = num)
def get_user_reply_by_topic_id(self, uid, topic_id):
where = "author_id = %s AND topic_id = %s" % (uid, topic_id)
return self.where(where).find()
def get_reply_by_reply_id(self, reply_id):
where = "id = %s" % reply_id
return self.where(where).find()
def update_reply_by_reply_id(self, reply_id, reply_info):
where = "id = %s" % reply_id
return self.where(where).data(reply_info).save()
| bsd-3-clause |
tbeadle/django | django/contrib/admin/actions.py | 66 | 3365 | """
Built-in, globally-available admin actions.
"""
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.utils import get_deleted_objects, model_ngettext
from django.core.exceptions import PermissionDenied
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _, ugettext_lazy
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page which shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post') and not protected:
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = dict(
modeladmin.admin_site.each_context(request),
title=title,
objects_name=objects_name,
deletable_objects=[deletable_objects],
model_count=dict(model_count).items(),
queryset=queryset,
perms_lacking=perms_needed,
protected=protected,
opts=opts,
action_checkbox_name=helpers.ACTION_CHECKBOX_NAME,
media=modeladmin.media,
)
request.current_app = modeladmin.admin_site.name
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| bsd-3-clause |
buntyke/Flask | microblog/flask/lib/python2.7/site-packages/pbr/find_package.py | 101 | 1043 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import setuptools
def smart_find_packages(package_list):
"""Run find_packages the way we intend."""
packages = []
for pkg in package_list.strip().split("\n"):
pkg_path = pkg.replace('.', os.path.sep)
packages.append(pkg)
packages.extend(['%s.%s' % (pkg, f)
for f in setuptools.find_packages(pkg_path)])
return "\n".join(set(packages))
| mit |
lmco/dart | missions/extras/helpers/analytics.py | 1 | 7283 | # Copyright 2017 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
from collections import Counter, defaultdict
import logging
from missions.models import TestDetail
logger = logging.getLogger(__name__)
class MissionAnalytics(object):
def __init__(self, mission_id):
self.mission_id = mission_id
# All analytics exclude hidden test cases so we don't report
# numbers of tests greater than what the customer ultimately sees
self.testcases = TestDetail.objects.filter(mission=self.mission_id).exclude(test_case_include_flag=False)
self.test_case_types_by_mission_week = self._count_of_test_case_types_by_mission_week()
def count_of_findings(self):
count = self.testcases.exclude(findings=u'').count()
return count
def count_of_test_cases(self):
count = self.testcases.count()
return count
def count_of_executed_test_cases(self):
count = self.testcases.exclude(execution_status=u'N').count()
return count
def count_of_test_cases_approved(self):
count = self.testcases.filter(test_case_status=u'FINAL').count()
return count
def mission_execution_percentage(self):
if self.count_of_test_cases() == 0:
# prevent division by 0
percentage = 0
else:
percentage = self.count_of_executed_test_cases() / self.count_of_test_cases()
return '{:.0%}'.format(percentage)
def mission_completion_percentage(self):
if self.count_of_test_cases() == 0:
# prevent division by 0
percentage = 0
else:
percentage = self.count_of_test_cases_approved() / self.count_of_test_cases()
return '{:.0%}'.format(percentage)
def count_of_test_cases_by_result(self):
"""
Summation of test cases which have each result type.
:return: A list containing 2 lists:
- a list of test result type identifiers
- an identical length list of integer quantities of TCs which have the corresponding result
"""
# Get the test result type tuple & convert to list
output_list = [[], [], []] # Lookup value list, Display text list, Count list
def nested_tuple_to_shallow_list(tup):
output_list[0].append(tup[0])
output_list[1].append(tup[1])
output_list[2].append(0)
for result in TestDetail.EXECUTION_STATUS_OPTIONS:
nested_tuple_to_shallow_list(result)
for tc_result in self.testcases.values('execution_status'):
index = output_list[0].index(tc_result['execution_status'])
output_list[2][index] += 1
return output_list[1:] # No need to return the lookup values list
def count_of_test_cases_by_mission_week(self):
"""
Counts total test cases executed per week.
:return: A zero-indexed list of the number of test cases executed per week.
"""
if self.count_of_executed_test_cases() == 0:
return [0]
# Get the execution date for each test case in the mission
tc_dates = self.testcases.exclude(execution_status=u'N').values('attack_time_date')
# Create a hashmap of the count of TCs per iso calendar week
weekly_count = Counter()
for tc_date in tc_dates:
isocalendar_week = tc_date['attack_time_date'].isocalendar()[1] # Grab the isocalendar Week #
weekly_count[isocalendar_week] += 1
# Get the lowest & highest key values - these are week 1 and the last week respectively
# This allows for skipped weeks (just in case)
first_week = min(weekly_count.keys())
last_week = max(weekly_count.keys())
week_delta = last_week - first_week + 1
# Build a hashmap of week # and the associated count
zero_indexed_weekly_count = [0] * week_delta
for week, count in weekly_count.items():
zero_indexed_weekly_count[week - first_week] = count
return zero_indexed_weekly_count
def _count_of_test_case_types_by_mission_week(self):
"""
Weekly totals broken out by attack phase; includes total row.
:return: A list of lists: Each list within the containing list is guaranteed to be the same length as all others
and will have the test case Phase at index 0, followed by the number of test cases of that type
completed each week of the mission's execution. The last inner list is a total row.
Example of 2 week execution:
[['R&D', 2, 0],['EXP', 1, 2],['TOTAL', 3, 2]]
"""
if self.count_of_executed_test_cases() == 0:
return [['No TCs have been executed yet!']]
# Get the execution date & type for each executed test case in the mission
tc_records = self.testcases.exclude(execution_status=u'N').values('attack_time_date', 'attack_phase')
# Create a hashmap of the count of TCs per iso calendar week
weekly_count = defaultdict(Counter)
for tc_record in tc_records:
isocalendar_week = tc_record['attack_time_date'].isocalendar()[1] # Grab the isocalendar Week #
attack_phase = tc_record['attack_phase']
weekly_count[isocalendar_week][attack_phase] += 1
# Get the lowest & highest key values - these are week 1 and the last week respectively
# This allows for skipped weeks (just in case)
first_week = min(weekly_count.keys())
last_week = max(weekly_count.keys())
week_delta = last_week - first_week + 1
# Build a hashmap of week # and the associated count
zero_indexed_phase_count_by_week = []
header_row = list()
header_row.append('')
header_row.extend(range(1, week_delta))
total_row = list()
total_row.append('TOTAL')
total_row.extend([0] * week_delta)
for phase_tuple in TestDetail.ATTACK_PHASES:
phase = phase_tuple[0]
phase_row = [0] * (week_delta + 1)
phase_row[0] = phase
for week, attack_phase_counter in weekly_count.items():
column = week - first_week + 1
phase_row[column] = attack_phase_counter[phase]
total_row[column] += attack_phase_counter[phase]
zero_indexed_phase_count_by_week.append(phase_row)
zero_indexed_phase_count_by_week.append(total_row)
logger.debug(zero_indexed_phase_count_by_week)
return zero_indexed_phase_count_by_week
def count_of_test_case_types_by_mission_week(self):
return self.test_case_types_by_mission_week
| apache-2.0 |
wskplho/sl4a | python/gdata/tests/gdata_tests/webmastertools_test.py | 87 | 19356 | #!/usr/bin/python
#
# Copyright (C) 2008 Yu-Jie Lin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'livibetter (Yu-Jie Lin)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata
from gdata import test_data
import gdata.webmastertools as webmastertools
class IndexedTest(unittest.TestCase):
def setUp(self):
self.indexed = webmastertools.Indexed()
def testToAndFromString(self):
self.indexed.text = 'true'
self.assert_(self.indexed.text == 'true')
new_indexed = webmastertools.IndexedFromString(self.indexed.ToString())
self.assert_(self.indexed.text == new_indexed.text)
class CrawledTest(unittest.TestCase):
def setUp(self):
self.crawled = webmastertools.Crawled()
def testToAndFromString(self):
self.crawled.text = 'true'
self.assert_(self.crawled.text == 'true')
new_crawled = webmastertools.CrawledFromString(self.crawled.ToString())
self.assert_(self.crawled.text == new_crawled.text)
class GeoLocationTest(unittest.TestCase):
def setUp(self):
self.geolocation = webmastertools.GeoLocation()
def testToAndFromString(self):
self.geolocation.text = 'US'
self.assert_(self.geolocation.text == 'US')
new_geolocation = webmastertools.GeoLocationFromString(
self.geolocation.ToString())
self.assert_(self.geolocation.text == new_geolocation.text)
class PreferredDomainTest(unittest.TestCase):
def setUp(self):
self.preferred_domain = webmastertools.PreferredDomain()
def testToAndFromString(self):
self.preferred_domain.text = 'none'
self.assert_(self.preferred_domain.text == 'none')
new_preferred_domain = webmastertools.PreferredDomainFromString(
self.preferred_domain.ToString())
self.assert_(self.preferred_domain.text == new_preferred_domain.text)
class CrawlRateTest(unittest.TestCase):
def setUp(self):
self.crawl_rate = webmastertools.CrawlRate()
def testToAndFromString(self):
self.crawl_rate.text = 'normal'
self.assert_(self.crawl_rate.text == 'normal')
new_crawl_rate = webmastertools.CrawlRateFromString(
self.crawl_rate.ToString())
self.assert_(self.crawl_rate.text == new_crawl_rate.text)
class EnhancedImageSearchTest(unittest.TestCase):
def setUp(self):
self.enhanced_image_search = webmastertools.EnhancedImageSearch()
def testToAndFromString(self):
self.enhanced_image_search.text = 'true'
self.assert_(self.enhanced_image_search.text == 'true')
new_enhanced_image_search = webmastertools.EnhancedImageSearchFromString(
self.enhanced_image_search.ToString())
self.assert_(self.enhanced_image_search.text ==
new_enhanced_image_search.text)
class VerifiedTest(unittest.TestCase):
def setUp(self):
self.verified = webmastertools.Verified()
def testToAndFromString(self):
self.verified.text = 'true'
self.assert_(self.verified.text == 'true')
new_verified = webmastertools.VerifiedFromString(self.verified.ToString())
self.assert_(self.verified.text == new_verified.text)
class VerificationMethodMetaTest(unittest.TestCase):
def setUp(self):
self.meta = webmastertools.VerificationMethodMeta()
def testToAndFromString(self):
self.meta.name = 'verify-vf1'
self.meta.content = 'a2Ai'
self.assert_(self.meta.name == 'verify-vf1')
self.assert_(self.meta.content == 'a2Ai')
new_meta = webmastertools.VerificationMethodMetaFromString(
self.meta.ToString())
self.assert_(self.meta.name == new_meta.name)
self.assert_(self.meta.content == new_meta.content)
class VerificationMethodTest(unittest.TestCase):
def setUp(self):
pass
def testMetaTagToAndFromString(self):
self.method = webmastertools.VerificationMethod()
self.method.type = 'metatag'
self.method.in_use = 'false'
self.assert_(self.method.type == 'metatag')
self.assert_(self.method.in_use == 'false')
self.method.meta = webmastertools.VerificationMethodMeta(name='verify-vf1',
content='a2Ai')
self.assert_(self.method.meta.name == 'verify-vf1')
self.assert_(self.method.meta.content == 'a2Ai')
new_method = webmastertools.VerificationMethodFromString(
self.method.ToString())
self.assert_(self.method.type == new_method.type)
self.assert_(self.method.in_use == new_method.in_use)
self.assert_(self.method.meta.name == new_method.meta.name)
self.assert_(self.method.meta.content == new_method.meta.content)
def testHtmlPageToAndFromString(self):
self.method = webmastertools.VerificationMethod()
self.method.type = 'htmlpage'
self.method.in_use = 'false'
self.method.text = '456456-google.html'
self.assert_(self.method.type == 'htmlpage')
self.assert_(self.method.in_use == 'false')
self.assert_(self.method.text == '456456-google.html')
self.assert_(self.method.meta is None)
new_method = webmastertools.VerificationMethodFromString(
self.method.ToString())
self.assert_(self.method.type == new_method.type)
self.assert_(self.method.in_use == new_method.in_use)
self.assert_(self.method.text == new_method.text)
self.assert_(self.method.meta is None)
def testConvertActualData(self):
feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED)
self.assert_(len(feed.entry[0].verification_method) == 2)
check = 0
for method in feed.entry[0].verification_method:
self.assert_(isinstance(method, webmastertools.VerificationMethod))
if method.type == 'metatag':
self.assert_(method.in_use == 'false')
self.assert_(method.text is None)
self.assert_(method.meta.name == 'verify-v1')
self.assert_(method.meta.content == 'a2Ai')
check = check | 1
elif method.type == 'htmlpage':
self.assert_(method.in_use == 'false')
self.assert_(method.text == '456456-google.html')
check = check | 2
else:
self.fail('Wrong Verification Method: %s' % method.type)
self.assert_(check == 2 ** 2 - 1,
'Should only have two Verification Methods, metatag and htmlpage')
class MarkupLanguageTest(unittest.TestCase):
def setUp(self):
self.markup_language = webmastertools.MarkupLanguage()
def testToAndFromString(self):
self.markup_language.text = 'HTML'
self.assert_(self.markup_language.text == 'HTML')
new_markup_language = webmastertools.MarkupLanguageFromString(
self.markup_language.ToString())
self.assert_(self.markup_language.text == new_markup_language.text)
class SitemapMobileTest(unittest.TestCase):
def setUp(self):
self.sitemap_mobile = webmastertools.SitemapMobile()
def testToAndFromString(self):
self.sitemap_mobile.markup_language.append(webmastertools.MarkupLanguage(
text = 'HTML'))
self.assert_(self.sitemap_mobile.text is None)
self.assert_(self.sitemap_mobile.markup_language[0].text == 'HTML')
new_sitemap_mobile = webmastertools.SitemapMobileFromString(
self.sitemap_mobile.ToString())
self.assert_(new_sitemap_mobile.text is None)
self.assert_(self.sitemap_mobile.markup_language[0].text ==
new_sitemap_mobile.markup_language[0].text)
def testConvertActualData(self):
feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(feed.sitemap_mobile.text.strip() == '')
self.assert_(len(feed.sitemap_mobile.markup_language) == 2)
check = 0
for markup_language in feed.sitemap_mobile.markup_language:
self.assert_(isinstance(markup_language, webmastertools.MarkupLanguage))
if markup_language.text == "HTML":
check = check | 1
elif markup_language.text == "WAP":
check = check | 2
else:
self.fail('Unexpected markup language: %s' % markup_language.text)
self.assert_(check == 2 ** 2 - 1, "Something is wrong with markup language")
class SitemapMobileMarkupLanguageTest(unittest.TestCase):
def setUp(self):
self.sitemap_mobile_markup_language =\
webmastertools.SitemapMobileMarkupLanguage()
def testToAndFromString(self):
self.sitemap_mobile_markup_language.text = 'HTML'
self.assert_(self.sitemap_mobile_markup_language.text == 'HTML')
new_sitemap_mobile_markup_language =\
webmastertools.SitemapMobileMarkupLanguageFromString(
self.sitemap_mobile_markup_language.ToString())
self.assert_(self.sitemap_mobile_markup_language.text ==\
new_sitemap_mobile_markup_language.text)
class PublicationLabelTest(unittest.TestCase):
def setUp(self):
self.publication_label = webmastertools.PublicationLabel()
def testToAndFromString(self):
self.publication_label.text = 'Value1'
self.assert_(self.publication_label.text == 'Value1')
new_publication_label = webmastertools.PublicationLabelFromString(
self.publication_label.ToString())
self.assert_(self.publication_label.text == new_publication_label.text)
class SitemapNewsTest(unittest.TestCase):
def setUp(self):
self.sitemap_news = webmastertools.SitemapNews()
def testToAndFromString(self):
self.sitemap_news.publication_label.append(webmastertools.PublicationLabel(
text = 'Value1'))
self.assert_(self.sitemap_news.text is None)
self.assert_(self.sitemap_news.publication_label[0].text == 'Value1')
new_sitemap_news = webmastertools.SitemapNewsFromString(
self.sitemap_news.ToString())
self.assert_(new_sitemap_news.text is None)
self.assert_(self.sitemap_news.publication_label[0].text ==
new_sitemap_news.publication_label[0].text)
def testConvertActualData(self):
feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(len(feed.sitemap_news.publication_label) == 3)
check = 0
for publication_label in feed.sitemap_news.publication_label:
if publication_label.text == "Value1":
check = check | 1
elif publication_label.text == "Value2":
check = check | 2
elif publication_label.text == "Value3":
check = check | 4
else:
self.fail('Unexpected publication label: %s' % markup_language.text)
self.assert_(check == 2 ** 3 - 1,
'Something is wrong with publication label')
class SitemapNewsPublicationLabelTest(unittest.TestCase):
def setUp(self):
self.sitemap_news_publication_label =\
webmastertools.SitemapNewsPublicationLabel()
def testToAndFromString(self):
self.sitemap_news_publication_label.text = 'LabelValue'
self.assert_(self.sitemap_news_publication_label.text == 'LabelValue')
new_sitemap_news_publication_label =\
webmastertools.SitemapNewsPublicationLabelFromString(
self.sitemap_news_publication_label.ToString())
self.assert_(self.sitemap_news_publication_label.text ==\
new_sitemap_news_publication_label.text)
class SitemapLastDownloadedTest(unittest.TestCase):
def setUp(self):
self.sitemap_last_downloaded = webmastertools.SitemapLastDownloaded()
def testToAndFromString(self):
self.sitemap_last_downloaded.text = '2006-11-18T19:27:32.543Z'
self.assert_(self.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
new_sitemap_last_downloaded =\
webmastertools.SitemapLastDownloadedFromString(
self.sitemap_last_downloaded.ToString())
self.assert_(self.sitemap_last_downloaded.text ==\
new_sitemap_last_downloaded.text)
class SitemapTypeTest(unittest.TestCase):
def setUp(self):
self.sitemap_type = webmastertools.SitemapType()
def testToAndFromString(self):
self.sitemap_type.text = 'WEB'
self.assert_(self.sitemap_type.text == 'WEB')
new_sitemap_type = webmastertools.SitemapTypeFromString(
self.sitemap_type.ToString())
self.assert_(self.sitemap_type.text == new_sitemap_type.text)
class SitemapStatusTest(unittest.TestCase):
def setUp(self):
self.sitemap_status = webmastertools.SitemapStatus()
def testToAndFromString(self):
self.sitemap_status.text = 'Pending'
self.assert_(self.sitemap_status.text == 'Pending')
new_sitemap_status = webmastertools.SitemapStatusFromString(
self.sitemap_status.ToString())
self.assert_(self.sitemap_status.text == new_sitemap_status.text)
class SitemapUrlCountTest(unittest.TestCase):
def setUp(self):
self.sitemap_url_count = webmastertools.SitemapUrlCount()
def testToAndFromString(self):
self.sitemap_url_count.text = '0'
self.assert_(self.sitemap_url_count.text == '0')
new_sitemap_url_count = webmastertools.SitemapUrlCountFromString(
self.sitemap_url_count.ToString())
self.assert_(self.sitemap_url_count.text == new_sitemap_url_count.text)
class SitesEntryTest(unittest.TestCase):
def setUp(self):
pass
def testToAndFromString(self):
entry = webmastertools.SitesEntry(
indexed=webmastertools.Indexed(text='true'),
crawled=webmastertools.Crawled(text='2008-09-14T08:59:28.000'),
geolocation=webmastertools.GeoLocation(text='US'),
preferred_domain=webmastertools.PreferredDomain(text='none'),
crawl_rate=webmastertools.CrawlRate(text='normal'),
enhanced_image_search=webmastertools.EnhancedImageSearch(text='true'),
verified=webmastertools.Verified(text='false'),
)
self.assert_(entry.indexed.text == 'true')
self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(entry.geolocation.text == 'US')
self.assert_(entry.preferred_domain.text == 'none')
self.assert_(entry.crawl_rate.text == 'normal')
self.assert_(entry.enhanced_image_search.text == 'true')
self.assert_(entry.verified.text == 'false')
new_entry = webmastertools.SitesEntryFromString(entry.ToString())
self.assert_(new_entry.indexed.text == 'true')
self.assert_(new_entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(new_entry.geolocation.text == 'US')
self.assert_(new_entry.preferred_domain.text == 'none')
self.assert_(new_entry.crawl_rate.text == 'normal')
self.assert_(new_entry.enhanced_image_search.text == 'true')
self.assert_(new_entry.verified.text == 'false')
def testConvertActualData(self):
feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED)
self.assert_(len(feed.entry) == 1)
entry = feed.entry[0]
self.assert_(isinstance(entry, webmastertools.SitesEntry))
self.assert_(entry.indexed.text == 'true')
self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000')
self.assert_(entry.geolocation.text == 'US')
self.assert_(entry.preferred_domain.text == 'none')
self.assert_(entry.crawl_rate.text == 'normal')
self.assert_(entry.enhanced_image_search.text == 'true')
self.assert_(entry.verified.text == 'false')
class SitesFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.webmastertools.SitesFeedFromString(test_data.SITES_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 1)
for entry in self.feed.entry:
self.assert_(isinstance(entry, webmastertools.SitesEntry))
new_feed = webmastertools.SitesFeedFromString(self.feed.ToString())
self.assert_(len(new_feed.entry) == 1)
for entry in new_feed.entry:
self.assert_(isinstance(entry, webmastertools.SitesEntry))
class SitemapsEntryTest(unittest.TestCase):
def testRegularToAndFromString(self):
entry = webmastertools.SitemapsEntry(
sitemap_type=webmastertools.SitemapType(text='WEB'),
sitemap_status=webmastertools.SitemapStatus(text='Pending'),
sitemap_last_downloaded=webmastertools.SitemapLastDownloaded(
text='2006-11-18T19:27:32.543Z'),
sitemap_url_count=webmastertools.SitemapUrlCount(text='102'),
)
self.assert_(entry.sitemap_type.text == 'WEB')
self.assert_(entry.sitemap_status.text == 'Pending')
self.assert_(entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(entry.sitemap_url_count.text == '102')
new_entry = webmastertools.SitemapsEntryFromString(entry.ToString())
self.assert_(new_entry.sitemap_type.text == 'WEB')
self.assert_(new_entry.sitemap_status.text == 'Pending')
self.assert_(new_entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(new_entry.sitemap_url_count.text == '102')
def testConvertActualData(self):
feed = gdata.webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED)
self.assert_(len(feed.entry) == 3)
for entry in feed.entry:
self.assert_(entry, webmastertools.SitemapsEntry)
self.assert_(entry.sitemap_status, webmastertools.SitemapStatus)
self.assert_(entry.sitemap_last_downloaded,
webmastertools.SitemapLastDownloaded)
self.assert_(entry.sitemap_url_count, webmastertools.SitemapUrlCount)
self.assert_(entry.sitemap_status.text == 'StatusValue')
self.assert_(entry.sitemap_last_downloaded.text ==\
'2006-11-18T19:27:32.543Z')
self.assert_(entry.sitemap_url_count.text == '102')
if entry.id.text == 'http://www.example.com/sitemap-index.xml':
self.assert_(entry.sitemap_type, webmastertools.SitemapType)
self.assert_(entry.sitemap_type.text == 'WEB')
self.assert_(entry.sitemap_mobile_markup_language is None)
self.assert_(entry.sitemap_news_publication_label is None)
elif entry.id.text == 'http://www.example.com/mobile/sitemap-index.xml':
self.assert_(entry.sitemap_mobile_markup_language,
webmastertools.SitemapMobileMarkupLanguage)
self.assert_(entry.sitemap_mobile_markup_language.text == 'HTML')
self.assert_(entry.sitemap_type is None)
self.assert_(entry.sitemap_news_publication_label is None)
elif entry.id.text == 'http://www.example.com/news/sitemap-index.xml':
self.assert_(entry.sitemap_news_publication_label,
webmastertools.SitemapNewsPublicationLabel)
self.assert_(entry.sitemap_news_publication_label.text == 'LabelValue')
self.assert_(entry.sitemap_type is None)
self.assert_(entry.sitemap_mobile_markup_language is None)
class SitemapsFeedTest(unittest.TestCase):
def setUp(self):
self.feed = gdata.webmastertools.SitemapsFeedFromString(
test_data.SITEMAPS_FEED)
def testToAndFromString(self):
self.assert_(len(self.feed.entry) == 3)
for entry in self.feed.entry:
self.assert_(isinstance(entry, webmastertools.SitemapsEntry))
new_feed = webmastertools.SitemapsFeedFromString(self.feed.ToString())
self.assert_(len(new_feed.entry) == 3)
for entry in new_feed.entry:
self.assert_(isinstance(entry, webmastertools.SitemapsEntry))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
edac-epscor/nmepscor-data-collection-form | application/builder/forms.py | 1 | 2579 |
import warnings
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import authenticate
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
# Originally from django.contrib.auth.forms
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields are case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = 'User Name'
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
# Now -- instead we use authDrupal
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
elif not self.user_cache.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
return self.cleaned_data
def check_for_test_cookie(self):
warnings.warn("check_for_test_cookie is deprecated; ensure your login "
"view is CSRF-protected.", DeprecationWarning)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
| mit |
geobricks/pgeo | pgeo/thread/bulk_download_threads_manager.py | 1 | 6781 | from ftplib import FTP
from threading import Thread
from threading import Lock
import Queue
import os
import time
from threading import Timer
from pgeo.utils import log
from pgeo.utils.filesystem import create_filesystem
from pgeo.gis.gdal_calc import calc_layers
log = log.logger('bulk_download_threads_manager.py')
progress_map = {}
exit_flags = {}
class BulkDownloadThread(Thread):
bulk_download_object = None
total_files = 0
downloaded_files = 0
aggregation = None
target_folder = None
def __init__(self, source, thread_name, queue, queue_lock, tab_id, aggregation):
Thread.__init__(self)
self.source = source
self.thread_name = thread_name
self.queue = queue
self.queue_lock = queue_lock
self.tab_id = tab_id
self.aggregation = aggregation
progress_map[self.tab_id] = {}
progress_map[self.tab_id]['status'] = 'WAITING'
def run(self):
while not exit_flags[self.tab_id]:
self.queue_lock.acquire()
if not self.queue.empty():
self.bulk_download_object = self.queue.get()
self.total_files = len(self.bulk_download_object['file_list'])
progress_map[self.tab_id]['total_files'] = self.total_files
progress_map[self.tab_id]['downloaded_files'] = 0
progress_map[self.tab_id]['status'] = 'START'
progress_map[self.tab_id]['progress'] = 0
self.queue_lock.release()
self.target_folder = create_filesystem(self.source, self.bulk_download_object['filesystem_structure'])
ftp = FTP(self.bulk_download_object['ftp_base_url'])
try:
ftp.login()
except Exception, e:
progress_map[self.tab_id]['status'] = 'ERROR'
exit_flags[self.tab_id] = 1
log.error(e)
continue
ftp.cwd(self.bulk_download_object['ftp_data_dir'])
remote_files = ftp.nlst()
for file_name in self.bulk_download_object['file_list']:
log.info('Downloading: ' + file_name)
if file_name in remote_files:
ftp.sendcmd('TYPE i')
file_obj = file_name
local_file = os.path.join(self.target_folder, file_obj)
progress_map[self.tab_id]['status'] = 'ONGOING'
if not os.path.isfile(local_file):
with open(local_file, 'w') as f:
def callback(chunk):
f.write(chunk)
ftp.retrbinary('RETR %s' % file_obj, callback)
self.downloaded_files += 1
progress_map[self.tab_id]['status'] = 'COMPLETE'
progress_map[self.tab_id]['progress'] = self.percent_done()
else:
self.downloaded_files += 1
progress_map[self.tab_id]['status'] = 'COMPLETE'
progress_map[self.tab_id]['progress'] = self.percent_done()
ftp.quit()
log.info('Download Complete. Start aggregation.')
self.aggregate_layers()
else:
self.queue_lock.release()
time.sleep(1)
def percent_done(self):
return float('{0:.2f}'.format(float(self.downloaded_files) / float(self.total_files) * 100))
def aggregate_layers(self):
if self.aggregation is not None:
file_name = self.target_folder + '/'
file_name += self.bulk_download_object['filesystem_structure']['year']
file_name += self.bulk_download_object['filesystem_structure']['month']
file_name += self.bulk_download_object['filesystem_structure']['day']
file_name += '_' + self.aggregation.upper()
file_name += '.geotif'
input_files = [self.target_folder + '/' + x for x in self.bulk_download_object['file_list'] if '.tif' in x]
calc_layers(input_files, file_name, self.aggregation)
class BulkDownloadManager(Thread):
def __init__(self, source, bulk_download_objects, tab_id, aggregation):
"""
@param source: Data provider's name, must match a configuration file, e.g. 'trmm2'
@param bulk_download_objects: An array of objects with the following fields:
'ftp_base_url', 'ftp_data_dir', 'file_list', 'filesystem_structure'. Field 'filesystem_structure' must
describe the target filesystem structure, e.g. e.g. {'product': 'MOD13Q1', 'year': '2014', 'day': '033'}
@param tab_id: An identifier to be used to monitor the progress, e.g. '23'
@param aggregation: 'None', 'sum', 'avg', 'ratio' or 'diff'
"""
Thread.__init__(self)
self.bulk_download_objects = bulk_download_objects
self.tab_id = tab_id
self.source = source
self.target_folder = 'WORK IN PROGRESS'
self.aggregation = aggregation
def run(self):
t = Timer(1, self.start_manager)
t.start()
target_folders = []
for obj in self.bulk_download_objects:
target_folders.append(create_filesystem(self.source, obj['filesystem_structure']))
return target_folders
def start_manager(self):
exit_flags[self.tab_id] = 0
log.info('START | Bulk Download Manager')
thread_list = ['Alpha']
queue_lock = Lock()
work_queue = Queue.Queue(len(self.bulk_download_objects))
threads = []
for thread_name in thread_list:
thread = BulkDownloadThread(self.source, thread_name, work_queue, queue_lock, self.tab_id, self.aggregation)
thread.start()
threads.append(thread)
queue_lock.acquire()
for obj in self.bulk_download_objects:
work_queue.put(obj)
queue_lock.release()
while not work_queue.empty():
pass
exit_flags[self.tab_id] = 1
for t in threads:
t.join()
log.info('END | Bulk Download Manager')
class BulkDownloadObject():
ftp_base_url = None
ftp_data_dir = None
file_list = []
def __init__(self, ftp_base_url, ftp_data_dir, file_list):
self.ftp_base_url = ftp_base_url
self.ftp_data_dir = ftp_data_dir
self.file_list = file_list
def __str__(self):
s = ''
s += str(self.ftp_base_url) + '\n'
s += str(self.ftp_data_dir) + '\n'
s += str(self.file_list)
return s | gpl-2.0 |
djgagne/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
arch1tect0r/root | tutorials/pyroot/hsum.py | 10 | 2306 | #
# To see the output of this macro, click begin_html <a href="gif/hsum.gif" >here</a> end_html
# Simple example illustrating how to use the C++ interpreter
# to fill histograms in a loop and show the graphics results
#
from ROOT import TCanvas, TH1F, TSlider
from ROOT import gROOT, gBenchmark, gRandom
# Create a new canvas, and customize it.
c1 = TCanvas( 'c1', 'The HSUM example', 200, 10, 600, 400 )
c1.SetGrid();
gBenchmark.Start( 'hsum' )
# Create some histograms.
total = TH1F( 'total', 'This is the total distribution', 100, -4, 4 )
main = TH1F( 'main', 'Main contributor', 100, -4, 4 )
s1 = TH1F( 's1', 'This is the first signal', 100, -4, 4 )
s2 = TH1F( 's2', 'This is the second signal', 100, -4, 4 )
total.Sumw2() # this makes sure that the sum of squares of weights will be stored
# Set canvas/frame attributes.
total.SetMarkerStyle( 21 )
total.SetMarkerSize( 0.7 )
main.SetFillColor( 16 )
s1.SetFillColor( 42 )
s2.SetFillColor( 46 )
# Initialize random number generator.
gRandom.SetSeed()
gauss, landau = gRandom.Gaus, gRandom.Landau
# for speed, bind and cache the Fill member functions
histos = [ 'total', 'main', 's1', 's2' ]
for name in histos:
exec '%sFill = %s.Fill' % (name,name)
# Fill histograms randomly
kUPDATE = 500
for i in xrange( 10000 ):
# Generate random values.
xmain = gauss( -1, 1.5 )
xs1 = gauss( -0.5, 0.5 )
xs2 = landau( 1, 0.15 )
mainFill( xmain )
# Fill histograms.
s1Fill( xs1, 0.3 )
s2Fill( xs2, 0.2 )
totalFill( xmain )
totalFill( xs1, 0.3 )
totalFill( xs2, 0.2 )
# Update display every kUPDATE events.
if i and (i%kUPDATE) == 0 :
if i == kUPDATE :
total.Draw( 'e1p' )
main.Draw( 'same' )
s1.Draw( 'same' )
s2.Draw( 'same' )
c1.Update()
slider = TSlider( 'slider', 'test', 4.2, 0, 4.6, total.GetMaximum(), 38 )
slider.SetFillColor( 46 )
if slider:
slider.SetRange( 0, float(i) / 10000. )
c1.Modified()
c1.Update()
# Destroy member functions cache.
for name in histos:
exec 'del %sFill' % name
del histos
# Done, finalized and trigger an update.
slider.SetRange( 0, 1 )
total.Draw( 'sameaxis' ) # to redraw axis hidden by the fill area
c1.Modified()
c1.Update()
gBenchmark.Show( 'hsum' )
| lgpl-2.1 |
johankaito/fufuka | microblog/old-flask/venv/lib/python2.7/site-packages/werkzeug/contrib/lint.py | 295 | 12282 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| apache-2.0 |
balister/GNU-Radio | grc/grc_gnuradio/blks2/selector.py | 7 | 5833 | #
# Copyright 2008,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
class selector(gr.hier_block2):
"""A hier2 block with N inputs and M outputs, where data is only forwarded through input n to output m."""
def __init__(self, item_size, num_inputs, num_outputs, input_index, output_index):
"""
Selector constructor.
Args:
item_size: the size of the gr data stream in bytes
num_inputs: the number of inputs (integer)
num_outputs: the number of outputs (integer)
input_index: the index for the source data
output_index: the index for the destination data
"""
gr.hier_block2.__init__(
self, 'selector',
gr.io_signature(num_inputs, num_inputs, item_size),
gr.io_signature(num_outputs, num_outputs, item_size),
)
#terminator blocks for unused inputs and outputs
self.input_terminators = [blocks.null_sink(item_size) for i in range(num_inputs)]
self.output_terminators = [blocks.head(item_size, 0) for i in range(num_outputs)]
self.copy = blocks.copy(item_size)
#connections
for i in range(num_inputs): self.connect((self, i), self.input_terminators[i])
for i in range(num_outputs): self.connect(blocks.null_source(item_size),
self.output_terminators[i], (self, i))
self.item_size = item_size
self.input_index = input_index
self.output_index = output_index
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self._connect_current()
def _indexes_valid(self):
"""
Are the input and output indexes within range of the number of inputs and outputs?
Returns:
true if input index and output index are in range
"""
return self.input_index in range(self.num_inputs) and self.output_index in range(self.num_outputs)
def _connect_current(self):
"""If the input and output indexes are valid:
disconnect the blocks at the input and output index from their terminators,
and connect them to one another. Then connect the terminators to one another."""
if self._indexes_valid():
self.disconnect((self, self.input_index), self.input_terminators[self.input_index])
self.disconnect(self.output_terminators[self.output_index], (self, self.output_index))
self.connect((self, self.input_index), self.copy)
self.connect(self.copy, (self, self.output_index))
self.connect(self.output_terminators[self.output_index], self.input_terminators[self.input_index])
def _disconnect_current(self):
"""If the input and output indexes are valid:
disconnect the blocks at the input and output index from one another,
and the terminators at the input and output index from one another.
Reconnect the blocks to the terminators."""
if self._indexes_valid():
self.disconnect((self, self.input_index), self.copy)
self.disconnect(self.copy, (self, self.output_index))
self.disconnect(self.output_terminators[self.output_index], self.input_terminators[self.input_index])
self.connect((self, self.input_index), self.input_terminators[self.input_index])
self.connect(self.output_terminators[self.output_index], (self, self.output_index))
def set_input_index(self, input_index):
"""
Change the block to the new input index if the index changed.
Args:
input_index: the new input index
"""
if self.input_index != input_index:
self.lock()
self._disconnect_current()
self.input_index = input_index
self._connect_current()
self.unlock()
def set_output_index(self, output_index):
"""
Change the block to the new output index if the index changed.
Args:
output_index: the new output index
"""
if self.output_index != output_index:
self.lock()
self._disconnect_current()
self.output_index = output_index
self._connect_current()
self.unlock()
class valve(selector):
"""Wrapper for selector with 1 input and 1 output."""
def __init__(self, item_size, open):
"""
Constructor for valve.
Args:
item_size: the size of the gr data stream in bytes
open: true if initial valve state is open
"""
if open: output_index = -1
else: output_index = 0
selector.__init__(self, item_size, 1, 1, 0, output_index)
def set_open(self, open):
"""
Callback to set open state.
Args:
open: true to set valve state to open
"""
if open: output_index = -1
else: output_index = 0
self.set_output_index(output_index)
| gpl-3.0 |
mskrzypkows/servo | tests/wpt/update/update.py | 224 | 1348 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from wptrunner.update.base import Step, StepRunner
from wptrunner.update.update import LoadConfig, SyncFromUpstream, UpdateMetadata
from wptrunner.update.tree import NoVCSTree
from .tree import GitTree, HgTree, GeckoCommit
from .upstream import SyncToUpstream
class LoadTrees(Step):
"""Load gecko tree and sync tree containing web-platform-tests"""
provides = ["local_tree", "sync_tree"]
def create(self, state):
if os.path.exists(state.sync["path"]):
sync_tree = GitTree(root=state.sync["path"])
else:
sync_tree = None
if GitTree.is_type():
local_tree = GitTree(commit_cls=GeckoCommit)
elif HgTree.is_type():
local_tree = HgTree(commit_cls=GeckoCommit)
else:
local_tree = NoVCSTree()
state.update({"local_tree": local_tree,
"sync_tree": sync_tree})
class UpdateRunner(StepRunner):
"""Overall runner for updating web-platform-tests in Gecko."""
steps = [LoadConfig,
LoadTrees,
SyncToUpstream,
SyncFromUpstream,
UpdateMetadata]
| mpl-2.0 |
danielthesoto/stagger | test/alltests.py | 16 | 2119 | #!/usr/bin/env python3
#
# alltests.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
import warnings
import stagger
import test.fileutil
import test.conversion
import test.specs
import test.samples
import test.tag
import test.friendly
import test.id3v1
import test.id3v1_fileop
suite = unittest.TestSuite()
suite.addTest(test.fileutil.suite)
suite.addTest(test.conversion.suite)
suite.addTest(test.specs.suite)
suite.addTest(test.samples.suite)
suite.addTest(test.tag.suite)
suite.addTest(test.friendly.suite)
suite.addTest(test.id3v1.suite)
suite.addTest(test.id3v1_fileop.suite)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
| bsd-2-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/IPython/lib/demo.py | 7 | 20470 | """Module for interactive demos using IPython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to IPython.
Provided classes
----------------
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.demo
:parts: 3
Subclassing
-----------
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
---------
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via::
from IPython.demo import Demo
d = Demo('ex_demo.py')
d()
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an IPython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
-------
The following is a very simple example of a valid demo file.
::
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
from __future__ import unicode_literals
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
from __future__ import print_function
import os
import re
import shlex
import sys
from IPython.utils import io
from IPython.utils.text import marquee
from IPython.utils import openpy
from IPython.utils import py3compat
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
ip = get_ipython() # this is in builtins whenever IPython is running
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.ip_colorize,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
with open(filename, 'r') as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.ip_colorize(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)))
print(self.src_blocks_colored[index])
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)))
print(block, end=' ')
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)))
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'))
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'))
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
self.ip_showtb(filename=self.fname)
else:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print if empty marquees are used
print()
print(mq1)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
self.shell.run_cell(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by IPython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from IPython.utils.terminal import term_clear
term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
| mit |
josenavas/labman | labman/gui/handlers/process_handlers/sequencing_process.py | 1 | 3665 | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import re
import zipfile
from io import BytesIO
from tornado.web import authenticated
from tornado.escape import json_decode
from labman.gui.handlers.base import BaseHandler
from labman.db.user import User
from labman.db.composition import PoolComposition
from labman.db.equipment import Equipment
from labman.db.process import SequencingProcess
class SequencingProcessHandler(BaseHandler):
@authenticated
def get(self):
sequencers = []
for model, lanes in SequencingProcess.sequencer_lanes.items():
for sequencer in Equipment.list_equipment(model):
sequencer['lanes'] = lanes
sequencers.append(sequencer)
self.render('sequencing.html', users=User.list_users(),
sequencers=sequencers)
@authenticated
def post(self):
pools = self.get_argument('pools')
run_name = self.get_argument('run_name')
experiment = self.get_argument('experiment')
sequencer_id = self.get_argument('sequencer')
fwd_cycles = int(self.get_argument('fwd_cycles'))
rev_cycles = int(self.get_argument('rev_cycles'))
pi = self.get_argument('principal_investigator')
contacts = self.get_argument('additional_contacts')
pools = [PoolComposition(x) for x in json_decode(pools)]
contacts = [User(x) for x in json_decode(contacts)]
process = SequencingProcess.create(
self.current_user, pools, run_name, experiment,
Equipment(sequencer_id), fwd_cycles, rev_cycles, User(pi),
contacts)
self.write({'process': process.id})
class DownloadSampleSheetHandler(BaseHandler):
@authenticated
def get(self, process_id):
process = SequencingProcess(int(process_id))
text = process.generate_sample_sheet()
filename = 'SampleSheet_%s_%s.csv' % (
re.sub('[^0-9a-zA-Z\-\_]+', '_', process.run_name), process.id)
self.set_header('Content-Type', 'text/csv')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Content-Disposition',
'attachment; filename=%s' % filename)
self.write(text)
self.finish()
class DownloadPreparationSheetsHandler(BaseHandler):
@authenticated
def get(self, process_id):
process = SequencingProcess(int(process_id))
with BytesIO() as content:
with zipfile.ZipFile(content, mode='w',
compression=zipfile.ZIP_DEFLATED) as zf:
for study, prep in process.generate_prep_information().items():
name = 'PrepSheet_process_%s_study_%s.csv' % (process.id,
study.id)
zf.writestr(name, prep)
zip_name = (re.sub('[^0-9a-zA-Z\-\_]+', '_', process.run_name) +
'_PrepSheets.zip')
self.set_header('Content-Type', 'application/zip')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header("Content-Disposition", "attachment; filename=%s" %
zip_name)
self.write(content.getvalue())
self.finish()
| bsd-3-clause |
Fat-Zer/FreeCAD_sf_master | src/Mod/TechDraw/TDTest/DHatchTest.py | 27 | 1824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# test script for TechDraw module
# creates a page and 1 views
# adds a hatch area to view1
from __future__ import print_function
import FreeCAD
import Part
import Measure
import TechDraw
import os
def DHatchTest():
path = os.path.dirname(os.path.abspath(__file__))
print ('TDHatch path: ' + path)
templateFileSpec = path+'/TestTemplate.svg'
hatchFileSpec = path + '/TestHatch.svg'
FreeCAD.newDocument("TDHatch")
FreeCAD.setActiveDocument("TDHatch")
FreeCAD.ActiveDocument=FreeCAD.getDocument("TDHatch")
#make source feature
box = FreeCAD.ActiveDocument.addObject("Part::Box","Box")
#make a page
page = FreeCAD.ActiveDocument.addObject('TechDraw::DrawPage','Page')
FreeCAD.ActiveDocument.addObject('TechDraw::DrawSVGTemplate','Template')
FreeCAD.ActiveDocument.Template.Template = templateFileSpec
FreeCAD.ActiveDocument.Page.Template = FreeCAD.ActiveDocument.Template
page.Scale = 5.0
# page.ViewObject.show() #unit tests run in console mode
#make Views
view1 = FreeCAD.ActiveDocument.addObject('TechDraw::DrawViewPart','View')
FreeCAD.ActiveDocument.View.Source = [box]
rc = page.addView(view1)
FreeCAD.ActiveDocument.recompute()
#make hatch
print("making hatch")
hatch = FreeCAD.ActiveDocument.addObject('TechDraw::DrawHatch','Hatch')
hatch.Source = (view1,["Face0"])
hatch.HatchPattern = hatchFileSpec #comment out to use default from preferences
print("adding hatch to page")
rc = page.addView(hatch)
print("finished hatch")
FreeCAD.ActiveDocument.recompute()
rc = False
if ("Up-to-date" in hatch.State):
rc = True
FreeCAD.closeDocument("TDHatch")
return rc
if __name__ == '__main__':
DHatchTest()
| lgpl-2.1 |
JeffHoogland/bodhi3packages | python-neet/usr/lib/python2.7/dist-packages/ecfg/parser.py | 4 | 8831 | #!/usr/bin/env python
'''
An Enlightenment config parser.
See: http://wiki.openmoko.org/wiki/Enlightenment_.cfg
Requires pyparsing: https://pyparsing.wikispaces.com/.
Author: Jimmy Campbell <[email protected]>
Version: 0.1.0
License: MIT
'''
import json
import decimal
from collections import OrderedDict
import pyparsing as PP
def indent(text, indent=' '):
'''Indent each line of text.'''
return ''.join(map(lambda line: indent + line, text.splitlines(True)))
class Struct(object):
def __init__(self, name, lists, values):
'''Create a Struct object.
:param name: Struct name.
:type name: string
:param lists: List of List objects in this Struct.
:type lists: list
:param values: List of Value objects in this Struct.
:type values: list
'''
self.name = name
self.lists = lists
self.values = values
for i, _list in enumerate(self.lists):
self.lists[i] = List(*_list)
for i, value in enumerate(self.values):
self.values[i] = Value(*value)
def __repr__(self):
return 'Struct(name=%s, lists=[%s], values=[%s])' % (
repr(self.name),
len(self.lists),
len(self.values)
)
def dict(self):
'''Return the Struct as an OrderedDict.'''
return OrderedDict((
('name', self.name),
('lists', [_list.dict() for _list in self.lists]),
('values', [value.dict() for value in self.values])
))
def text(self):
'''Return the Struct as a config text block.'''
text = 'group "%s" struct {\n%s%s\n}'
lists = '\n'.join(_list.text() for _list in self.lists)
if lists:
lists = indent(lists, ' ')
values = '\n'.join(val.text() for val in self.values)
if values:
values = indent(values, ' ')
if lists:
values = '\n' + values
return text % (self.name, lists, values)
class List(object):
def __init__(self, name, items, values):
'''Create a List object.
:param name: List name.
:type name: string
:param items: List of Struct objects in this List.
:type items: list
:param values: List of Value objects in this List.
:type values: list
'''
self.name = name
self.items = items
self.values = values
for i, items in enumerate(self.items):
self.items[i] = Struct(*items)
for i, value in enumerate(self.values):
self.values[i] = Value(*value)
def __repr__(self):
return 'List(name=%s, items=[%s], values=[%s])' % (
repr(self.name),
len(self.items),
len(self.values)
)
def dict(self):
'''Return the List as an OrderedDict.'''
return OrderedDict((
('name', self.name),
('items', [item.dict() for item in self.items]),
('values', [value.dict() for value in self.values])
))
def text(self):
'''Return the List as a config text block.'''
text = 'group "%s" list {\n%s%s\n}'
items = '\n'.join(item.text() for item in self.items)
if items:
items = indent(items, ' ')
values = '\n'.join(val.text() for val in self.values)
if values:
values = indent(values, ' ')
if items:
values = '\n' + values
return text % (self.name, items, values)
class Value(object):
def __init__(self, name, type, data):
'''Create a Value object.
:param name: Value name.
:type name: string
:param type: Value type: uchar, uint, int, float, double, string.
:type type: string
:param data: The string data as represented in the e.cfg text block.
:type data: string
'''
self.name = name
self.type = type
self.data = data
def __repr__(self):
return 'Value(name=%s, type=%s, data=%s)' % (
repr(self.name),
repr(self.type),
repr(self.data)
)
def dict(self):
'''Return the Value as an OrderedDict.'''
return OrderedDict((
('name', self.name),
('type', self.type),
('data', self.data)
))
def text(self):
''' Return the Value as a config text block.'''
data = self.data
if self.type == 'string':
data = '"%s"' % self.data
return 'value "%s" %s: %s;' % (self.name, self.type, data)
# TODO: We really should do some type enforcement
@property
def value(self):
'''A getter which returns the Value data as its actual Python data
type. Uses the following mapping for each type:
- "uchar", "uint", "int" -> ``int``
- "float", "double" -> ``decimal.Decimal``.
- "string" -> ``str``.
'''
if self.type in ('uchar', 'uint', 'int'):
return int(self.data)
elif self.type in ('float', 'double'):
return decimal.Decimal(self.data)
return self.data
class ParserError(Exception):
pass
class ECfgParser(object):
'''A pyparsing parser for the e.cfg text format.'''
# PRIMITIVES
digits = PP.Word('0123456789')
type_uint = PP.Combine(digits)
type_int = PP.Combine(PP.Optional('-') + digits)
type_float = PP.Combine(type_int + '.' + type_int)
type_str = PP.QuotedString('"')
# VALUES
value_uchar = (
PP.Keyword('uchar').setResultsName('type') +
PP.Suppress(':') +
type_uint.setResultsName('data')
)
value_uint = (
PP.Keyword('uint').setResultsName('type') +
PP.Suppress(':') +
type_uint.setResultsName('data')
)
value_int = (
PP.Keyword('int').setResultsName('type') +
PP.Suppress(':') +
type_int.setResultsName('data')
)
value_float = (
PP.Keyword('float').setResultsName('type') +
PP.Suppress(':') +
type_float.setResultsName('data')
)
value_double = (
PP.Keyword('double').setResultsName('type') +
PP.Suppress(':') +
type_float.setResultsName('data')
)
value_string = (
PP.Keyword('string').setResultsName('type') +
PP.Suppress(':') +
type_str.setResultsName('data')
)
type_value = PP.Group(
PP.Keyword('value').suppress() +
type_str.setResultsName('name') +
PP.MatchFirst((
value_uchar,
value_uint,
value_int,
value_float,
value_double,
value_string
)) +
PP.Suppress(';')
).setResultsName('value')
# STRUCTURES
# placeholder for a later declaration
type_struct = PP.Forward()
type_list = PP.Group(
PP.Keyword('group').suppress() +
type_str.setResultsName('name') +
PP.Keyword('list').suppress() +
PP.Suppress('{') +
PP.Group(PP.ZeroOrMore(type_struct)).setResultsName('items') +
PP.Group(PP.ZeroOrMore(type_value)).setResultsName('values') +
PP.Suppress('}')
).setResultsName('list')
type_struct << PP.Group(
PP.Keyword('group').suppress() +
type_str.setResultsName('name') +
PP.Keyword('struct').suppress() +
PP.Suppress('{') +
PP.Group(PP.ZeroOrMore(type_list)).setResultsName('lists') +
PP.Group(PP.ZeroOrMore(type_value)).setResultsName('values') +
PP.Suppress('}')
).setResultsName('struct')
@classmethod
def parse(cls, text):
'''Create a pyparsing ParseResults object.
:param text: Enlightenment config text.
:type text: string
'''
try:
return cls.type_struct.parseString(text)
except PP.ParseException as e:
raise ParserError(str(e))
class ECfg(object):
'''An Enlightenment config object.'''
def __init__(self, text, parser=ECfgParser):
'''Create an ECfgParser object.
:param text: Enlightenment config text.
:type text: string
:param parser: A Parser class (expects a `parse(text)` method.
:type parser: class
'''
self._parser = parser()
self._parsed = self._parser.parse(text)
self.root = Struct(*self._parsed.asList()[0])
def text(self):
'''Return the Enlightenment config text.'''
return self.root.text()
def xml(self):
'''Return the XML representation of the config.'''
return self._parsed.asXML()
def json(self, **kwargs):
'''Return the JSON representation of the config.'''
return json.dumps(self.root.dict(), **kwargs)
| bsd-3-clause |
kxepal/viivakoodi | barcode/codex.py | 1 | 7633 | # -*- coding: utf-8 -*-
"""Module: barcode.codex
:Provided barcodes: Code 39, Code 128, PZN
"""
from __future__ import unicode_literals
from barcode.base import Barcode
from barcode.charsets import code128, code39
from barcode.errors import *
__docformat__ = 'restructuredtext en'
# Sizes
MIN_SIZE = 0.2
MIN_QUIET_ZONE = 2.54
def check_code(code, name, allowed):
wrong = []
for char in code:
if char not in allowed:
wrong.append(char)
if wrong:
raise IllegalCharacterError(
'The following characters are not '
'valid for {name}: {wrong}'.format(name=name,
wrong=', '.join(wrong)))
class Code39(Barcode):
"""Initializes a new Code39 instance.
:parameters:
code : String
Code 39 string without \* and checksum (added automatically if
`add_checksum` is True).
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
add_checksum : Boolean
Add the checksum to code or not (default: True).
"""
name = 'Code 39'
def __init__(self, code, writer=None, add_checksum=True):
self.code = code.upper()
if add_checksum:
self.code += self.calculate_checksum()
self.writer = writer or Barcode.default_writer()
check_code(self.code, self.name, code39.REF)
def __unicode__(self):
return self.code
__str__ = __unicode__
def get_fullcode(self):
return self.code
def calculate_checksum(self):
check = sum([code39.MAP[x][0] for x in self.code]) % 43
for k, v in code39.MAP.items():
if check == v[0]:
return k
def build(self):
chars = [code39.EDGE]
for char in self.code:
chars.append(code39.MAP[char][1])
chars.append(code39.EDGE)
return [code39.MIDDLE.join(chars)]
def render(self, writer_options):
options = dict(module_width=MIN_SIZE, quiet_zone=MIN_QUIET_ZONE)
options.update(writer_options or {})
return Barcode.render(self, options)
class PZN(Code39):
"""Initializes new German number for pharmaceutical products.
:parameters:
pzn : String
Code to render.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = 'Pharmazentralnummer'
digits = 6
def __init__(self, pzn, writer=None):
pzn = pzn[:self.digits]
if not pzn.isdigit():
raise IllegalCharacterError('PZN can only contain numbers.')
if len(pzn) != self.digits:
raise NumberOfDigitsError('PZN must have {0} digits, not '
'{1}.'.format(self.digits, len(pzn)))
self.pzn = pzn
self.pzn = '{0}{1}'.format(pzn, self.calculate_checksum())
Code39.__init__(self, 'PZN-{0}'.format(self.pzn), writer,
add_checksum=False)
def get_fullcode(self):
return 'PZN-{0}'.format(self.pzn)
def calculate_checksum(self):
sum_ = sum([int(x) * int(y) for x, y in enumerate(self.pzn, start=2)])
checksum = sum_ % 11
if checksum == 10:
raise BarcodeError('Checksum can not be 10 for PZN.')
else:
return checksum
class Code128(Barcode):
"""Initializes a new Code128 instance. The checksum is added automatically
when building the bars.
:parameters:
code : String
Code 128 string without checksum (added automatically).
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = 'Code 128'
def __init__(self, code, writer=None):
self.code = code
self.writer = writer or Barcode.default_writer()
self._charset = 'B'
self._buffer = ''
check_code(self.code, self.name, code128.ALL)
def __unicode__(self):
return self.code
__str__ = __unicode__
@property
def encoded(self):
return self._build()
def get_fullcode(self):
return self.code
def _new_charset(self, which):
if which == 'A':
code = self._convert('TO_A')
elif which == 'B':
code = self._convert('TO_B')
elif which == 'C':
code = self._convert('TO_C')
self._charset = which
return [code]
def _maybe_switch_charset(self, pos):
char = self.code[pos]
next_ = self.code[pos:pos + 10]
def look_next():
digits = 0
for c in next_:
if c.isdigit():
digits += 1
else:
break
return digits > 3
codes = []
if self._charset == 'C' and not char.isdigit():
if char in code128.B:
codes = self._new_charset('B')
elif char in code128.A:
codes = self._new_charset('A')
if len(self._buffer) == 1:
codes.append(self._convert(self._buffer[0]))
self._buffer = ''
elif self._charset == 'B':
if look_next():
codes = self._new_charset('C')
elif char not in code128.B:
if char in code128.A:
codes = self._new_charset('A')
elif self._charset == 'A':
if look_next():
codes = self._new_charset('C')
elif char not in code128.A:
if char in code128.B:
codes = self._new_charset('B')
return codes
def _convert(self, char):
if self._charset == 'A':
return code128.A[char]
elif self._charset == 'B':
return code128.B[char]
elif self._charset == 'C':
if char in code128.C:
return code128.C[char]
elif char.isdigit():
self._buffer += char
if len(self._buffer) == 2:
value = int(self._buffer)
self._buffer = ''
return value
def _try_to_optimize(self, encoded):
if encoded[1] in code128.TO:
encoded[:2] = [code128.TO[encoded[1]]]
return encoded
def _calculate_checksum(self, encoded):
cs = [encoded[0]]
for i, code_num in enumerate(encoded[1:], start=1):
cs.append(i * code_num)
return sum(cs) % 103
def _build(self):
encoded = [code128.START_CODES[self._charset]]
for i, char in enumerate(self.code):
encoded.extend(self._maybe_switch_charset(i))
code_num = self._convert(char)
if code_num is not None:
encoded.append(code_num)
# Finally look in the buffer
if len(self._buffer) == 1:
encoded.extend(self._new_charset('B'))
encoded.append(self._convert(self._buffer[0]))
self._buffer = ''
encoded = self._try_to_optimize(encoded)
return encoded
def build(self):
encoded = self._build()
encoded.append(self._calculate_checksum(encoded))
code = ''
for code_num in encoded:
code += code128.CODES[code_num]
code += code128.STOP
code += '11'
return [code]
def render(self, writer_options):
options = dict(module_width=MIN_SIZE, quiet_zone=MIN_QUIET_ZONE)
options.update(writer_options or {})
return Barcode.render(self, options)
| mit |
co/TheLastRogue | menu.py | 1 | 15853 | import equipactions
import equipment
import graphic
import inputhandler
import geometry as geo
import colors
import gui
import inventory
import menufactory
import messenger
import rectfactory
import settings
import style
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
class Menu(gui.UIElement):
def __init__(self, offset, state_stack,
margin=geo.zero2d(), vertical_space=1, may_escape=True, vi_keys_accepted=True, selected_payload_callback=None):
super(Menu, self).__init__(margin)
self.menu_items = []
self._state_stack = state_stack
self._selected_index = None
self.offset = offset
self._wrap = True
self.may_escape = may_escape
self._item_stack_panel = gui.StackPanelVertical((0, 0), vertical_space=vertical_space)
self.vi_keys_accepted = vi_keys_accepted
self.selected_payload_callback = selected_payload_callback
@property
def rect(self):
return geo.Rect(self.offset, self.width, self.height)
@property
def width(self):
return self._item_stack_panel.width
@property
def height(self):
return self._item_stack_panel.height
@property
def selected_index(self):
return self._selected_index
@selected_index.setter
def selected_index(self, value):
if not value is self._selected_index and not value is None:
self._selected_index = value
self._signal_new_index()
def update(self):
self._recreate_option_list()
if not self.has_valid_option_selected():
self.try_set_index_to_valid_value()
#self._recreate_option_list()
inputhandler.handler.update_keys()
key = inputhandler.handler.get_keypress()
if (key == inputhandler.UP or (self.vi_keys_accepted and key == inputhandler.VI_NORTH)
or (key == inputhandler.TAB and inputhandler.handler.is_special_key_pressed(inputhandler.KEY_SHIFT))):
self.index_decrease()
if(key == inputhandler.DOWN or (self.vi_keys_accepted and key == inputhandler.VI_SOUTH)
or key == inputhandler.TAB):
self.index_increase()
if key == inputhandler.ENTER or key == inputhandler.SPACE:
self.activate()
if key == inputhandler.ESCAPE and self.may_escape:
self._state_stack.pop()
def try_set_index_to_valid_value(self):
if not any(menu_item.can_activate() for menu_item in self.menu_items):
self.selected_index = None
self.selected_index = 0
if not self.has_valid_option_selected():
self.index_increase()
def has_valid_option_selected(self):
return (0 <= self.selected_index < len(self.menu_items) and
self.menu_items[self.selected_index].can_activate())
def _update_menu_items(self):
pass
def _recreate_option_list(self):
self._update_menu_items()
self._item_stack_panel.clear()
for index, item in enumerate(self.menu_items):
if index == self.selected_index:
menu_item = item.selected_ui_representation()
elif item.can_activate():
menu_item = item.unselected_ui_representation()
else:
menu_item = item.inactive_ui_representation()
self._item_stack_panel.append(menu_item)
def can_activate(self):
return (not self.selected_index is None and len(self.menu_items) > 0 and
self.menu_items[self.selected_index].can_activate())
def activate(self):
if self.can_activate():
selected_option = self.menu_items[self.selected_index]
selected_option.activate()
def index_increase(self):
if(not any(item.can_activate() for item in self.menu_items) or
self.selected_index is None):
self.selected_index = None
return
self._offset_index(1)
if not self.menu_items[self.selected_index].can_activate():
self.index_increase()
def index_decrease(self):
if(not any(item.can_activate() for item in self.menu_items) or
self.selected_index is None):
self.selected_index = None
return
self._offset_index(-1)
if not self.menu_items[self.selected_index].can_activate():
self.index_decrease()
def _offset_index(self, offset):
if len(self.menu_items) == 0 or self.selected_index is None:
return
if self._wrap:
# Will behave strangely for when offset is less than -menu_size
self.selected_index = (offset + self.selected_index + len(self.menu_items)) % len(self.menu_items)
else:
self.selected_index = clamp(offset + self.selected_index, 0, len(self.menu_items) - 1)
def draw(self, offset=geo.zero2d()):
real_offset = geo.int_2d(geo.add_2d(geo.add_2d(self.offset, offset), self.margin))
self._item_stack_panel.draw(real_offset)
def call_payload_callback(self):
if self.selected_payload_callback and self.has_valid_option_selected():
payload = self.menu_items[self.selected_index].payload
self.selected_payload_callback(payload)
def _signal_new_index(self):
self.call_payload_callback()
class MenuOption(gui.UIElement):
def __init__(self, text, functions, can_activate=(lambda: True), payload=None):
self._functions = functions
self.can_activate = can_activate
self._selected = gui.TextBox(text, geo.zero2d(), colors.TEXT_SELECTED)
self._unselected = gui.TextBox(text, geo.zero2d(), colors.TEXT_UNSELECTED)
self._inactive = gui.TextBox(text, geo.zero2d(), colors.TEXT_INACTIVE)
self.payload = payload
def activate(self):
for function in self._functions:
function()
return
@property
def width(self):
return self._selected.width
@property
def height(self):
return self._selected.height
def selected_ui_representation(self):
return self._selected
def unselected_ui_representation(self):
return self._unselected
def inactive_ui_representation(self):
return self._inactive
#TODO MenuOption should probably have a graphic representation object
# this should not be solved by subclassing!
class MenuOptionWithSymbols(MenuOption):
def __init__(self, text, selected_graphic_char, unselected_graphic_char,
functions, can_activate=(lambda: True), payload=None):
super(MenuOptionWithSymbols, self).__init__(text, functions, can_activate, payload=payload)
self.selected_graphic_char = selected_graphic_char
self.unselected_graphic_char = unselected_graphic_char
self._selected = gui.StackPanelHorizontal(geo.zero2d(), horizontal_space=1)
self._selected.append(gui.SymbolUIElement(geo.zero2d(), self.selected_graphic_char))
self._selected.append(gui.TextBox(text, geo.zero2d(), colors.TEXT_SELECTED))
self._unselected = gui.StackPanelHorizontal(geo.zero2d(), horizontal_space=1)
self._unselected.append(gui.SymbolUIElement(geo.zero2d(), self.unselected_graphic_char))
self._unselected.append(gui.TextBox(text, geo.zero2d(), colors.TEXT_UNSELECTED))
self._inactive = gui.StackPanelHorizontal(geo.zero2d(), horizontal_space=1)
self._inactive.append(gui.SymbolUIElement(geo.zero2d(), self.unselected_graphic_char))
self._inactive.append(gui.TextBox(text, geo.zero2d(), colors.TEXT_INACTIVE))
class StaticMenu(Menu):
def __init__(self, offset, menu_items, state_stack, margin=geo.zero2d(),
vertical_space=1, may_escape=True, vi_keys_accepted=True, selected_payload_callback=None):
super(StaticMenu, self).__init__(offset, state_stack, margin=margin, vertical_space=vertical_space,
may_escape=may_escape, vi_keys_accepted=vi_keys_accepted,
selected_payload_callback=selected_payload_callback)
self.menu_items = menu_items
self._recreate_option_list()
self.try_set_index_to_valid_value()
class EquipmentMenu(Menu):
def __init__(self, offset, player, state_stack, selected_payload_callback,
margin=geo.zero2d(), may_escape=True):
super(EquipmentMenu, self).__init__(offset, state_stack, margin=margin,
may_escape=may_escape, selected_payload_callback=selected_payload_callback)
self.player = player
def _update_menu_items(self):
self.menu_items = []
for slot in equipment.EquipmentSlots.ALL:
slot_menu = menufactory.equipment_slot_menu(self.player, slot, self._state_stack)
option_func = menufactory.DelayedStatePush(self._state_stack, slot_menu)
item_in_slot = self.player.equipment.get(slot)
if item_in_slot is None:
item_name = "-"
item_graphic = graphic.GraphicChar(None, colors.NOT_EQUIPPED_FG, slot.icon)
else:
item_name = item_in_slot.description.name
item_graphic = item_in_slot.graphic_char
self.menu_items.append(MenuOptionWithSymbols(item_name, item_graphic, item_graphic, [option_func],
payload=item_in_slot))
class EquipSlotMenu(Menu):
def __init__(self, offset, player, equipment_slot, state_stack, selected_payload_callback,
margin=geo.zero2d(), may_escape=True):
super(EquipSlotMenu, self).__init__(offset, state_stack, margin=margin,
may_escape=may_escape, selected_payload_callback=selected_payload_callback)
self.player = player
self.try_set_index_to_valid_value()
self.equipment_slot = equipment_slot
def _update_menu_items(self):
items = self.player.inventory.items_of_equipment_type(self.equipment_slot.equipment_type)
self.menu_items = []
for item in items:
reequip_function = item.reequip_action.delayed_act(source_entity=self.player, target_entity=self.player,
equipment_slot=self.equipment_slot)
stack_pop_function = BackToGameFunction(self._state_stack)
functions = [reequip_function, stack_pop_function]
self.menu_items.append(MenuOptionWithSymbols(item.description.name, item.graphic_char,
item.graphic_char, functions, payload=item))
unequip_function = equipactions.UnequipAction().delayed_act(source_entity=self.player, target_entity=self.player,
equipment_slot=self.equipment_slot)
stack_pop_function = BackToGameFunction(self._state_stack)
unequip_functions = [unequip_function, stack_pop_function]
none_item_graphic = graphic.GraphicChar(None, colors.NOT_EQUIPPED_FG, self.equipment_slot.icon)
self.menu_items.append(MenuOptionWithSymbols("- None -", none_item_graphic,
none_item_graphic, unequip_functions))
#if self.selected_payload_callback.description is None:
#self.call_payload_callback()
self._item_stack_panel.vertical_space = 1 if len(items) * 2 + 2 <= inventory.ITEM_CAPACITY else 0
class OpenItemActionMenuAction(object):
def __init__(self, state_stack, item, player):
self._item = item
self._player = player
self._state_stack = state_stack
def __call__(self):
item_actions_menu = menufactory.item_actions_menu(self._item,
self._player,
self._state_stack)
self._state_stack.push(item_actions_menu)
class ItemActionsMenu(Menu):
def __init__(self, offset, item, player, state_stack,
margin=geo.zero2d(), vertical_space=1, may_escape=True):
super(ItemActionsMenu, self).__init__(offset, state_stack, margin=margin,
vertical_space=vertical_space, may_escape=may_escape)
self._actions = sorted(item.get_children_with_tag("user_action"), key=lambda action: action.display_order)
self._player = player
self.update()
def _update_menu_items(self):
game_state = self._player.game_state.value
self.menu_items = []
for item_action in self._actions:
action_act = item_action.delayed_act(source_entity=self._player,
target_entity=self._player,
game_state=game_state,
target_position=self._player.position.value)
action_can_act = item_action.delayed_can_act(source_entity=self._player,
target_entity=self._player,
game_state=game_state,
target_position=self._player.position.value)
back_to_game_function = BackToGameFunction(self._state_stack)
functions = [action_act, back_to_game_function]
option = MenuOption(item_action.name, functions, can_activate=action_can_act)
self.menu_items.append(option)
class StackPopFunction(object):
def __init__(self, state_stack, states_to_pop):
self._state_stack = state_stack
self._states_to_pop = states_to_pop
def __call__(self):
for _ in range(self._states_to_pop):
self._state_stack.pop()
class BackToGameFunction(object):
def __init__(self, state_stack):
self._state_stack = state_stack
def __call__(self):
self._state_stack.pop_to_game_state()
class AcceptRejectPrompt(gui.UIElement):
def __init__(self, state_stack, message, width=settings.MINIMUM_WIDTH * 0.8,
max_height=settings.MINIMUM_HEIGHT * 0.8):
self.message = message
self._width = width
self.max_height = max_height
self._state_stack = state_stack
margin = style.interface_theme.margin
self.text_stack_panel = gui.StackPanelVertical((-1, -1), vertical_space=1)
self.text_stack_panel.append(gui.TextBoxWrap(message, (0, 0), colors.GRAY, self.width, max_height))
self.text_stack_panel.append(gui.TextBoxWrap(messenger.PRESS_ENTER_TO_ACCEPT, (0, 0),
colors.LIGHT_ORANGE, self.width, max_height))
self.text_stack_panel.update()
rect = rectfactory.ratio_of_screen_rect(self.text_stack_panel.width + margin[0] * 2,
self.text_stack_panel.height + margin[1] * 2,
0.5, 0.3)
self.text_stack_panel.offset = geo.add_2d(rect.top_left, (2, 2))
self.bg_rectangle = gui.StyledRectangle(rect, style.interface_theme.rect_style)
self.result = False
@property
def width(self):
return self._width
def draw(self, offset=geo.zero2d()):
self.bg_rectangle.draw(offset)
self.text_stack_panel.draw(offset)
def update(self):
inputhandler.handler.update_keys()
key = inputhandler.handler.get_keypress()
if key == inputhandler.ENTER or key == inputhandler.SPACE:
self.result = True
self._state_stack.pop()
elif key:
self._state_stack.pop()
| bsd-2-clause |
schwarty/nignore | externals/markdown/extensions/headerid.py | 44 | 5926 | """
HeaderID Extension for Python-Markdown
======================================
Auto-generate id attributes for HTML headers.
Basic usage:
>>> import markdown
>>> text = "# Some Header #"
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header">Some Header</h1>
All header IDs are unique:
>>> text = '''
... #Header
... #Header
... #Header'''
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="header">Header</h1>
<h1 id="header_1">Header</h1>
<h1 id="header_2">Header</h1>
To fit within a html template's hierarchy, set the header base level:
>>> text = '''
... #Some Header
... ## Next Level'''
>>> md = markdown.markdown(text, ['headerid(level=3)'])
>>> print md
<h3 id="some-header">Some Header</h3>
<h4 id="next-level">Next Level</h4>
Works with inline markup.
>>> text = '#Some *Header* with [markup](http://example.com).'
>>> md = markdown.markdown(text, ['headerid'])
>>> print md
<h1 id="some-header-with-markup">Some <em>Header</em> with <a href="http://example.com">markup</a>.</h1>
Turn off auto generated IDs:
>>> text = '''
... # Some Header
... # Another Header'''
>>> md = markdown.markdown(text, ['headerid(forceid=False)'])
>>> print md
<h1>Some Header</h1>
<h1>Another Header</h1>
Use with MetaData extension:
>>> text = '''header_level: 2
... header_forceid: Off
...
... # A Header'''
>>> md = markdown.markdown(text, ['headerid', 'meta'])
>>> print md
<h2>A Header</h2>
Copyright 2007-2011 [Waylan Limberg](http://achinghead.com/).
Project website: <http://packages.python.org/Markdown/extensions/header_id.html>
Contact: [email protected]
License: BSD (see ../docs/LICENSE for details)
Dependencies:
* [Python 2.3+](http://python.org)
* [Markdown 2.0+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
import re
import logging
import unicodedata
logger = logging.getLogger('MARKDOWN')
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def slugify(value, separator):
""" Slugify a string, to make it URL friendly. """
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = re.sub('[^\w\s-]', '', value.decode('ascii')).strip().lower()
return re.sub('[%s\s]+' % separator, separator, value)
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d'% (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d'% (id, 1)
ids.add(id)
return id
def itertext(elem):
""" Loop through all children and return text only.
Reimplements method of same name added to ElementTree in Python 2.7
"""
if elem.text:
yield elem.text
for e in elem:
for s in itertext(e):
yield s
if e.tail:
yield e.tail
class HeaderIdTreeprocessor(Treeprocessor):
""" Assign IDs to headers. """
IDs = set()
def run(self, doc):
start_level, force_id = self._get_meta()
slugify = self.config['slugify']
sep = self.config['separator']
for elem in doc.getiterator():
if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
if force_id:
if "id" in elem.attrib:
id = elem.get('id')
else:
id = slugify(''.join(itertext(elem)), sep)
elem.set('id', unique(id, self.IDs))
if start_level:
level = int(elem.tag[-1]) + start_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def _get_meta(self):
""" Return meta data suported by this ext as a tuple """
level = int(self.config['level']) - 1
force = self._str2bool(self.config['forceid'])
if hasattr(self.md, 'Meta'):
if 'header_level' in self.md.Meta:
level = int(self.md.Meta['header_level'][0]) - 1
if 'header_forceid' in self.md.Meta:
force = self._str2bool(self.md.Meta['header_forceid'][0])
return level, force
def _str2bool(self, s, default=False):
""" Convert a string to a booleen value. """
s = str(s)
if s.lower() in ['0', 'f', 'false', 'off', 'no', 'n']:
return False
elif s.lower() in ['1', 't', 'true', 'on', 'yes', 'y']:
return True
return default
class HeaderIdExtension(Extension):
def __init__(self, configs):
# set defaults
self.config = {
'level' : ['1', 'Base level for headers.'],
'forceid' : ['True', 'Force all headers to have an id.'],
'separator' : ['-', 'Word separator.'],
'slugify' : [slugify, 'Callable to generate anchors'],
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
self.processor = HeaderIdTreeprocessor()
self.processor.md = md
self.processor.config = self.getConfigs()
if 'attr_list' in md.treeprocessors.keys():
# insert after attr_list treeprocessor
md.treeprocessors.add('headerid', self.processor, '>attr_list')
else:
# insert after 'prettify' treeprocessor.
md.treeprocessors.add('headerid', self.processor, '>prettify')
def reset(self):
self.processor.IDs = set()
def makeExtension(configs=None):
return HeaderIdExtension(configs=configs)
| bsd-3-clause |
agreen/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause |
rosswhitfield/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/TransformToIqt.py | 3 | 12292 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,too-many-instance-attributes
from mantid.simpleapi import *
from mantid.api import (PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty,
ITableWorkspaceProperty, PropertyMode, Progress)
from mantid.kernel import Direction, logger, IntBoundedValidator
DEFAULT_ITERATIONS = 50
DEFAULT_SEED = 89631139
class TransformToIqt(PythonAlgorithm):
_sample = None
_resolution = None
_e_min = None
_e_max = None
_e_width = None
_number_points_per_bin = None
_parameter_table = None
_output_workspace = None
_dry_run = None
_calculate_errors = None
_number_of_iterations = None
_seed = None
def category(self):
return "Workflow\\Inelastic;Workflow\\MIDAS"
def summary(self):
return 'Transforms an inelastic reduction to I(Q, t)'
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Input),
doc="Name for the sample workspace.")
self.declareProperty(MatrixWorkspaceProperty('ResolutionWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Input),
doc="Name for the resolution workspace.")
self.declareProperty(name='EnergyMin', defaultValue=-0.5,
doc='Minimum energy for fit. Default=-0.5')
self.declareProperty(name='EnergyMax', defaultValue=0.5,
doc='Maximum energy for fit. Default=0.5')
self.declareProperty(name='BinReductionFactor', defaultValue=10.0,
doc='Decrease total number of spectrum points by this ratio through merging of '
'intensities from neighbouring bins. Default=1')
self.declareProperty('NumberOfIterations', DEFAULT_ITERATIONS, IntBoundedValidator(lower=1),
doc="Number of randomised simulations for monte-carlo error calculation.")
self.declareProperty('SeedValue', DEFAULT_SEED, IntBoundedValidator(lower=1),
doc="Seed for pseudo-random number generator in monte-carlo error calculation.")
self.declareProperty(ITableWorkspaceProperty('ParameterWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='Table workspace for saving TransformToIqt properties')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='Output workspace')
self.declareProperty(name='DryRun', defaultValue=False,
doc='Only calculate and output the parameters')
self.declareProperty('CalculateErrors', defaultValue=True,
doc="Calculate monte-carlo errors.")
def PyExec(self):
self._setup()
self._check_analysers_and_reflection()
self._calculate_parameters()
if not self._dry_run:
self._output_workspace = self._transform()
self._add_logs()
else:
skip_prog = Progress(self, start=0.3, end=1.0, nreports=2)
skip_prog.report('skipping transform')
skip_prog.report('skipping add logs')
logger.information('Dry run, will not run TransformToIqt')
self.setProperty('ParameterWorkspace', self._parameter_table)
self.setProperty('OutputWorkspace', self._output_workspace)
def _setup(self):
"""
Gets algorithm properties.
"""
from IndirectCommon import getWSprefix
self._sample = self.getPropertyValue('SampleWorkspace')
self._resolution = self.getPropertyValue('ResolutionWorkspace')
self._e_min = self.getProperty('EnergyMin').value
self._e_max = self.getProperty('EnergyMax').value
self._number_points_per_bin = self.getProperty(
'BinReductionFactor').value
self._parameter_table = self.getPropertyValue('ParameterWorkspace')
if self._parameter_table == '':
self._parameter_table = getWSprefix(
self._sample) + 'TransformToIqtParameters'
self._calculate_errors = self.getProperty("CalculateErrors").value
self._number_of_iterations = self.getProperty(
"NumberOfIterations").value
self._seed = self.getProperty("SeedValue").value
self._output_workspace = self.getPropertyValue('OutputWorkspace')
if self._output_workspace == '':
self._output_workspace = getWSprefix(self._sample) + 'iqt'
self._dry_run = self.getProperty('DryRun').value
def validateInputs(self):
"""
Validate input properties.
"""
issues = dict()
e_min = self.getProperty('EnergyMin').value
e_max = self.getProperty('EnergyMax').value
# Check for swapped energy values
if e_min > e_max:
energy_swapped = 'EnergyMin is greater than EnergyMax'
issues['EnergyMin'] = energy_swapped
issues['EnergyMax'] = energy_swapped
return issues
def _calculate_parameters(self):
"""
Calculates the TransformToIqt parameters and saves in a table workspace.
"""
from IndirectCommon import getEfixed
end_prog = 0.3 if self._calculate_errors else 0.9
workflow_prog = Progress(self, start=0.0, end=end_prog, nreports=8)
workflow_prog.report('Cropping Workspace')
CropWorkspace(InputWorkspace=self._sample,
OutputWorkspace='__TransformToIqt_sample_cropped',
Xmin=self._e_min,
Xmax=self._e_max)
workflow_prog.report('Calculating table properties')
x_data = mtd['__TransformToIqt_sample_cropped'].readX(0)
number_input_points = len(x_data) - 1
num_bins = int(number_input_points / self._number_points_per_bin)
self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins
workflow_prog.report('Attempting to Access IPF')
try:
workflow_prog.report('Access IPF')
instrument = mtd[self._sample].getInstrument()
analyserName = instrument.getStringParameter('analyser')[0]
analyser = instrument.getComponentByName(analyserName)
if analyser is not None:
logger.debug('Found %s component in instrument %s, will look for resolution there'
% (analyserName, instrument))
resolution = analyser.getNumberParameter('resolution')[0]
else:
logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument'
% (analyserName, instrument))
resolution = instrument.getNumberParameter('resolution')[0]
logger.information('Got resolution from IPF: %f' % resolution)
workflow_prog.report('IPF resolution obtained')
except (AttributeError, IndexError):
workflow_prog.report('Resorting to Default')
resolution = getEfixed(self._sample) * 0.01
logger.warning('Could not get the resolution from the IPF, using 1% of the E Fixed value for the '
'resolution: {0}'.format(resolution))
resolution_bins = int(round((2 * resolution) / self._e_width))
if resolution_bins < 5:
logger.warning(
'Resolution curve has <5 points. Results may be unreliable.')
workflow_prog.report('Creating Parameter table')
param_table = CreateEmptyTableWorkspace(
OutputWorkspace=self._parameter_table)
workflow_prog.report('Populating Parameter table')
param_table.addColumn('int', 'SampleInputBins')
param_table.addColumn('float', 'BinReductionFactor')
param_table.addColumn('int', 'SampleOutputBins')
param_table.addColumn('float', 'EnergyMin')
param_table.addColumn('float', 'EnergyMax')
param_table.addColumn('float', 'EnergyWidth')
param_table.addColumn('float', 'Resolution')
param_table.addColumn('int', 'ResolutionBins')
param_table.addRow([number_input_points, self._number_points_per_bin, num_bins,
self._e_min, self._e_max, self._e_width,
resolution, resolution_bins])
workflow_prog.report('Deleting temp Workspace')
if mtd.doesExist('__TransformToIqt_sample_cropped'):
DeleteWorkspace('__TransformToIqt_sample_cropped')
self.setProperty('ParameterWorkspace', param_table)
def _add_logs(self):
sample_logs = [('iqt_sample_workspace', self._sample),
('iqt_resolution_workspace', self._resolution),
('iqt_binning', '%f,%f,%f' % (self._e_min, self._e_width, self._e_max))]
log_alg = self.createChildAlgorithm(name='AddSampleLogMultiple', startProgress=0.9,
endProgress=1.0, enableLogging=True)
log_alg.setProperty('Workspace', self._output_workspace)
log_alg.setProperty('LogNames', [item[0] for item in sample_logs])
log_alg.setProperty('LogValues', [item[1] for item in sample_logs])
log_alg.execute()
def _transform(self):
"""
Run TransformToIqt.
"""
from IndirectCommon import CheckHistZero, CheckHistSame
# Process resolution data
res_number_of_histograms = CheckHistZero(self._resolution)[0]
sample_number_of_histograms = CheckHistZero(self._sample)[0]
if res_number_of_histograms > 1 and sample_number_of_histograms is not res_number_of_histograms:
CheckHistSame(
self._sample,
'Sample',
self._resolution,
'Resolution')
calculateiqt_alg = self.createChildAlgorithm(name='CalculateIqt', startProgress=0.3,
endProgress=1.0, enableLogging=True)
calculateiqt_alg.setAlwaysStoreInADS(False)
args = {"InputWorkspace": self._sample, "OutputWorkspace": "iqt", "ResolutionWorkspace": self._resolution,
"EnergyMin": self._e_min, "EnergyMax": self._e_max, "EnergyWidth": self._e_width,
"CalculateErrors": self._calculate_errors, "NumberOfIterations": self._number_of_iterations,
"SeedValue": self._seed}
for key, value in args.items():
calculateiqt_alg.setProperty(key, value)
calculateiqt_alg.execute()
iqt = calculateiqt_alg.getProperty("OutputWorkspace").value
# Set Y axis unit and label
iqt.setYUnit('')
iqt.setYUnitLabel('Intensity')
return iqt
def _check_analysers_and_reflection(self):
from IndirectCommon import CheckAnalysersOrEFixed
try:
CheckAnalysersOrEFixed(self._sample, self._resolution)
except ValueError:
# A genuine error the shows that the two runs are incompatible
raise
except BaseException:
# Checking could not be performed due to incomplete or no
# instrument
logger.warning(
'Could not check for matching analyser and reflection')
# Register algorithm with Mantid
AlgorithmFactory.subscribe(TransformToIqt)
| gpl-3.0 |
alexander-barabash/My-QEMU-mirror | scripts/qapi-commands.py | 7 | 11725 | #
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <[email protected]>
# Michael Roth <[email protected]>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def type_visitor(name):
if type(name) == list:
return 'visit_type_%sList' % name[0]
else:
return 'visit_type_%s' % name
def generate_decl_enum(name, members, genlist=True):
return mcgen('''
void %(visitor)s(Visitor *m, %(name)s * obj, const char *name, Error **errp);
''',
visitor=type_visitor(name))
def generate_command_decl(name, args, ret_type):
arglist=""
for argname, argtype, optional, structured in parse_args(args):
argtype = c_type(argtype)
if argtype == "char *":
argtype = "const char *"
if optional:
arglist += "bool has_%s, " % c_var(argname)
arglist += "%s %s, " % (argtype, c_var(argname))
return mcgen('''
%(ret_type)s qmp_%(name)s(%(args)sError **errp);
''',
ret_type=c_type(ret_type), name=c_var(name), args=arglist).strip()
def gen_sync_call(name, args, ret_type, indent=0):
ret = ""
arglist=""
retval=""
if ret_type:
retval = "retval = "
for argname, argtype, optional, structured in parse_args(args):
if optional:
arglist += "has_%s, " % c_var(argname)
arglist += "%s, " % (c_var(argname))
push_indent(indent)
ret = mcgen('''
%(retval)sqmp_%(name)s(%(args)serrp);
''',
name=c_var(name), args=arglist, retval=retval).rstrip()
if ret_type:
ret += "\n" + mcgen(''''
if (!error_is_set(errp)) {
%(marshal_output_call)s
}
''',
marshal_output_call=gen_marshal_output_call(name, ret_type)).rstrip()
pop_indent(indent)
return ret.rstrip()
def gen_marshal_output_call(name, ret_type):
if not ret_type:
return ""
return "qmp_marshal_output_%s(retval, ret, errp);" % c_var(name)
def gen_visitor_output_containers_decl(ret_type):
ret = ""
push_indent()
if ret_type:
ret += mcgen('''
QmpOutputVisitor *mo;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret
def gen_visitor_input_containers_decl(args):
ret = ""
push_indent()
if len(args) > 0:
ret += mcgen('''
QmpInputVisitor *mi;
QapiDeallocVisitor *md;
Visitor *v;
''')
pop_indent()
return ret.rstrip()
def gen_visitor_input_vars_decl(args):
ret = ""
push_indent()
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
bool has_%(argname)s = false;
''',
argname=c_var(argname))
if c_type(argtype).endswith("*"):
ret += mcgen('''
%(argtype)s %(argname)s = NULL;
''',
argname=c_var(argname), argtype=c_type(argtype))
else:
ret += mcgen('''
%(argtype)s %(argname)s;
''',
argname=c_var(argname), argtype=c_type(argtype))
pop_indent()
return ret.rstrip()
def gen_visitor_input_block(args, obj, dealloc=False):
ret = ""
if len(args) == 0:
return ret
push_indent()
if dealloc:
ret += mcgen('''
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
''')
else:
ret += mcgen('''
mi = qmp_input_visitor_new(%(obj)s);
v = qmp_input_get_visitor(mi);
''',
obj=obj)
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
visit_start_optional(v, &has_%(c_name)s, "%(name)s", errp);
if (has_%(c_name)s) {
''',
c_name=c_var(argname), name=argname)
push_indent()
ret += mcgen('''
%(visitor)s(v, &%(c_name)s, "%(name)s", errp);
''',
c_name=c_var(argname), name=argname, argtype=argtype,
visitor=type_visitor(argtype))
if optional:
pop_indent()
ret += mcgen('''
}
visit_end_optional(v, errp);
''')
if dealloc:
ret += mcgen('''
qapi_dealloc_visitor_cleanup(md);
''')
else:
ret += mcgen('''
qmp_input_visitor_cleanup(mi);
''')
pop_indent()
return ret.rstrip()
def gen_marshal_output(name, args, ret_type, middle_mode):
if not ret_type:
return ""
ret = mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_ret_type)s ret_in, QObject **ret_out, Error **errp)
{
QapiDeallocVisitor *md = qapi_dealloc_visitor_new();
QmpOutputVisitor *mo = qmp_output_visitor_new();
Visitor *v;
v = qmp_output_get_visitor(mo);
%(visitor)s(v, &ret_in, "unused", errp);
if (!error_is_set(errp)) {
*ret_out = qmp_output_get_qobject(mo);
}
qmp_output_visitor_cleanup(mo);
v = qapi_dealloc_get_visitor(md);
%(visitor)s(v, &ret_in, "unused", errp);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_ret_type=c_type(ret_type), c_name=c_var(name),
visitor=type_visitor(ret_type))
return ret
def gen_marshal_input_decl(name, args, ret_type, middle_mode):
if middle_mode:
return 'int qmp_marshal_input_%s(Monitor *mon, const QDict *qdict, QObject **ret)' % c_var(name)
else:
return 'static void qmp_marshal_input_%s(QDict *args, QObject **ret, Error **errp)' % c_var(name)
def gen_marshal_input(name, args, ret_type, middle_mode):
hdr = gen_marshal_input_decl(name, args, ret_type, middle_mode)
ret = mcgen('''
%(header)s
{
''',
header=hdr)
if middle_mode:
ret += mcgen('''
Error *local_err = NULL;
Error **errp = &local_err;
QDict *args = (QDict *)qdict;
''')
if ret_type:
if c_type(ret_type).endswith("*"):
retval = " %s retval = NULL;" % c_type(ret_type)
else:
retval = " %s retval;" % c_type(ret_type)
ret += mcgen('''
%(retval)s
''',
retval=retval)
if len(args) > 0:
ret += mcgen('''
%(visitor_input_containers_decl)s
%(visitor_input_vars_decl)s
%(visitor_input_block)s
''',
visitor_input_containers_decl=gen_visitor_input_containers_decl(args),
visitor_input_vars_decl=gen_visitor_input_vars_decl(args),
visitor_input_block=gen_visitor_input_block(args, "QOBJECT(args)"))
else:
ret += mcgen('''
(void)args;
''')
ret += mcgen('''
if (error_is_set(errp)) {
goto out;
}
%(sync_call)s
''',
sync_call=gen_sync_call(name, args, ret_type, indent=4))
ret += mcgen('''
out:
''')
ret += mcgen('''
%(visitor_input_block_cleanup)s
''',
visitor_input_block_cleanup=gen_visitor_input_block(args, None,
dealloc=True))
if middle_mode:
ret += mcgen('''
if (local_err) {
qerror_report_err(local_err);
error_free(local_err);
return -1;
}
return 0;
''')
else:
ret += mcgen('''
return;
''')
ret += mcgen('''
}
''')
return ret
def gen_registry(commands):
registry=""
push_indent()
for cmd in commands:
registry += mcgen('''
qmp_register_command("%(name)s", qmp_marshal_input_%(c_name)s);
''',
name=cmd['command'], c_name=c_var(cmd['command']))
pop_indent()
ret = mcgen('''
static void qmp_init_marshal(void)
{
%(registry)s
}
qapi_init(qmp_init_marshal);
''',
registry=registry.rstrip())
return ret
def gen_command_decl_prologue(header, guard, prefix=""):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <[email protected]>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "%(prefix)sqapi-types.h"
#include "error.h"
''',
header=basename(header), guard=guardname(header), prefix=prefix)
return ret
def gen_command_def_prologue(prefix="", proxy=False):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <[email protected]>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu-objects.h"
#include "qapi/qmp-core.h"
#include "qapi/qapi-visit-core.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-input-visitor.h"
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''',
prefix=prefix)
if not proxy:
ret += '#include "%sqmp-commands.h"' % prefix
return ret + "\n\n"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:m",
["source", "header", "prefix=",
"output-dir=", "type=", "middle"])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
dispatch_type = "sync"
c_file = 'qmp-marshal.c'
h_file = 'qmp-commands.h'
middle_mode = False
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-t", "--type"):
dispatch_type = a
elif o in ("-m", "--middle"):
middle_mode = True
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
exprs = parse_schema(sys.stdin)
commands = filter(lambda expr: expr.has_key('command'), exprs)
commands = filter(lambda expr: not expr.has_key('gen'), commands)
if dispatch_type == "sync":
fdecl = maybe_open(do_h, h_file, 'w')
fdef = maybe_open(do_c, c_file, 'w')
ret = gen_command_decl_prologue(header=basename(h_file), guard=guardname(h_file), prefix=prefix)
fdecl.write(ret)
ret = gen_command_def_prologue(prefix=prefix)
fdef.write(ret)
for cmd in commands:
arglist = []
ret_type = None
if cmd.has_key('data'):
arglist = cmd['data']
if cmd.has_key('returns'):
ret_type = cmd['returns']
ret = generate_command_decl(cmd['command'], arglist, ret_type) + "\n"
fdecl.write(ret)
if ret_type:
ret = gen_marshal_output(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
if middle_mode:
fdecl.write('%s;\n' % gen_marshal_input_decl(cmd['command'], arglist, ret_type, middle_mode))
ret = gen_marshal_input(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
fdecl.write("\n#endif\n");
if not middle_mode:
ret = gen_registry(commands)
fdef.write(ret)
fdef.flush()
fdef.close()
fdecl.flush()
fdecl.close()
| gpl-2.0 |
trueblue2704/AskMeAnything | lib/python2.7/site-packages/gunicorn/http/message.py | 35 | 11293 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import re
import socket
from errno import ENOTCONN
from gunicorn._compat import bytes_to_str
from gunicorn.http.unreader import SocketUnreader
from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body
from gunicorn.http.errors import (InvalidHeader, InvalidHeaderName, NoMoreData,
InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,
LimitRequestLine, LimitRequestHeaders)
from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
from gunicorn.six import BytesIO
from gunicorn.six.moves.urllib.parse import urlsplit
MAX_REQUEST_LINE = 8190
MAX_HEADERS = 32768
MAX_HEADERFIELD_SIZE = 8190
HEADER_RE = re.compile("[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
METH_RE = re.compile(r"[A-Z0-9$-_.]{3,20}")
VERSION_RE = re.compile(r"HTTP/(\d+).(\d+)")
class Message(object):
def __init__(self, cfg, unreader):
self.cfg = cfg
self.unreader = unreader
self.version = None
self.headers = []
self.trailers = []
self.body = None
# set headers limits
self.limit_request_fields = cfg.limit_request_fields
if (self.limit_request_fields <= 0
or self.limit_request_fields > MAX_HEADERS):
self.limit_request_fields = MAX_HEADERS
self.limit_request_field_size = cfg.limit_request_field_size
if (self.limit_request_field_size < 0
or self.limit_request_field_size > MAX_HEADERFIELD_SIZE):
self.limit_request_field_size = MAX_HEADERFIELD_SIZE
# set max header buffer size
max_header_field_size = self.limit_request_field_size or MAX_HEADERFIELD_SIZE
self.max_buffer_headers = self.limit_request_fields * \
(max_header_field_size + 2) + 4
unused = self.parse(self.unreader)
self.unreader.unread(unused)
self.set_body_reader()
def parse(self):
raise NotImplementedError()
def parse_headers(self, data):
headers = []
# Split lines on \r\n keeping the \r\n on each line
lines = [bytes_to_str(line) + "\r\n" for line in data.split(b"\r\n")]
# Parse headers into key/value pairs paying attention
# to continuation lines.
while len(lines):
if len(headers) >= self.limit_request_fields:
raise LimitRequestHeaders("limit request headers fields")
# Parse initial header name : value pair.
curr = lines.pop(0)
header_length = len(curr)
if curr.find(":") < 0:
raise InvalidHeader(curr.strip())
name, value = curr.split(":", 1)
name = name.rstrip(" \t").upper()
if HEADER_RE.search(name):
raise InvalidHeaderName(name)
name, value = name.strip(), [value.lstrip()]
# Consume value continuation lines
while len(lines) and lines[0].startswith((" ", "\t")):
curr = lines.pop(0)
header_length += len(curr)
if header_length > self.limit_request_field_size > 0:
raise LimitRequestHeaders("limit request headers "
+ "fields size")
value.append(curr)
value = ''.join(value).rstrip()
if header_length > self.limit_request_field_size > 0:
raise LimitRequestHeaders("limit request headers fields size")
headers.append((name, value))
return headers
def set_body_reader(self):
chunked = False
content_length = None
for (name, value) in self.headers:
if name == "CONTENT-LENGTH":
content_length = value
elif name == "TRANSFER-ENCODING":
chunked = value.lower() == "chunked"
elif name == "SEC-WEBSOCKET-KEY1":
content_length = 8
if chunked:
self.body = Body(ChunkedReader(self, self.unreader))
elif content_length is not None:
try:
content_length = int(content_length)
except ValueError:
raise InvalidHeader("CONTENT-LENGTH", req=self)
if content_length < 0:
raise InvalidHeader("CONTENT-LENGTH", req=self)
self.body = Body(LengthReader(self.unreader, content_length))
else:
self.body = Body(EOFReader(self.unreader))
def should_close(self):
for (h, v) in self.headers:
if h == "CONNECTION":
v = v.lower().strip()
if v == "close":
return True
elif v == "keep-alive":
return False
break
return self.version <= (1, 0)
class Request(Message):
def __init__(self, cfg, unreader, req_number=1):
self.method = None
self.uri = None
self.path = None
self.query = None
self.fragment = None
# get max request line size
self.limit_request_line = cfg.limit_request_line
if (self.limit_request_line < 0
or self.limit_request_line >= MAX_REQUEST_LINE):
self.limit_request_line = MAX_REQUEST_LINE
self.req_number = req_number
self.proxy_protocol_info = None
super(Request, self).__init__(cfg, unreader)
def get_data(self, unreader, buf, stop=False):
data = unreader.read()
if not data:
if stop:
raise StopIteration()
raise NoMoreData(buf.getvalue())
buf.write(data)
def parse(self, unreader):
buf = BytesIO()
self.get_data(unreader, buf, stop=True)
# get request line
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
# proxy protocol
if self.proxy_protocol(bytes_to_str(line)):
# get next request line
buf = BytesIO()
buf.write(rbuf)
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
self.parse_request_line(bytes_to_str(line))
buf = BytesIO()
buf.write(rbuf)
# Headers
data = buf.getvalue()
idx = data.find(b"\r\n\r\n")
done = data[:2] == b"\r\n"
while True:
idx = data.find(b"\r\n\r\n")
done = data[:2] == b"\r\n"
if idx < 0 and not done:
self.get_data(unreader, buf)
data = buf.getvalue()
if len(data) > self.max_buffer_headers:
raise LimitRequestHeaders("max buffer headers")
else:
break
if done:
self.unreader.unread(data[2:])
return b""
self.headers = self.parse_headers(data[:idx])
ret = data[idx + 4:]
buf = BytesIO()
return ret
def read_line(self, unreader, buf, limit=0):
data = buf.getvalue()
while True:
idx = data.find(b"\r\n")
if idx >= 0:
# check if the request line is too large
if idx > limit > 0:
raise LimitRequestLine(idx, limit)
break
elif len(data) - 2 > limit > 0:
raise LimitRequestLine(len(data), limit)
self.get_data(unreader, buf)
data = buf.getvalue()
return (data[:idx], # request line,
data[idx + 2:]) # residue in the buffer, skip \r\n
def proxy_protocol(self, line):
"""\
Detect, check and parse proxy protocol.
:raises: ForbiddenProxyRequest, InvalidProxyLine.
:return: True for proxy protocol line else False
"""
if not self.cfg.proxy_protocol:
return False
if self.req_number != 1:
return False
if not line.startswith("PROXY"):
return False
self.proxy_protocol_access_check()
self.parse_proxy_protocol(line)
return True
def proxy_protocol_access_check(self):
# check in allow list
if isinstance(self.unreader, SocketUnreader):
try:
remote_host = self.unreader.sock.getpeername()[0]
except socket.error as e:
if e.args[0] == ENOTCONN:
raise ForbiddenProxyRequest("UNKNOW")
raise
if ("*" not in self.cfg.proxy_allow_ips and
remote_host not in self.cfg.proxy_allow_ips):
raise ForbiddenProxyRequest(remote_host)
def parse_proxy_protocol(self, line):
bits = line.split()
if len(bits) != 6:
raise InvalidProxyLine(line)
# Extract data
proto = bits[1]
s_addr = bits[2]
d_addr = bits[3]
# Validation
if proto not in ["TCP4", "TCP6"]:
raise InvalidProxyLine("protocol '%s' not supported" % proto)
if proto == "TCP4":
try:
socket.inet_pton(socket.AF_INET, s_addr)
socket.inet_pton(socket.AF_INET, d_addr)
except socket.error:
raise InvalidProxyLine(line)
elif proto == "TCP6":
try:
socket.inet_pton(socket.AF_INET6, s_addr)
socket.inet_pton(socket.AF_INET6, d_addr)
except socket.error:
raise InvalidProxyLine(line)
try:
s_port = int(bits[4])
d_port = int(bits[5])
except ValueError:
raise InvalidProxyLine("invalid port %s" % line)
if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):
raise InvalidProxyLine("invalid port %s" % line)
# Set data
self.proxy_protocol_info = {
"proxy_protocol": proto,
"client_addr": s_addr,
"client_port": s_port,
"proxy_addr": d_addr,
"proxy_port": d_port
}
def parse_request_line(self, line):
bits = line.split(None, 2)
if len(bits) != 3:
raise InvalidRequestLine(line)
# Method
if not METH_RE.match(bits[0]):
raise InvalidRequestMethod(bits[0])
self.method = bits[0].upper()
# URI
# When the path starts with //, urlsplit considers it as a
# relative uri while the RDF says it shouldnt
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
# considers it as an absolute url.
# fix issue #297
if bits[1].startswith("//"):
self.uri = bits[1][1:]
else:
self.uri = bits[1]
parts = urlsplit(self.uri)
self.path = parts.path or ""
self.query = parts.query or ""
self.fragment = parts.fragment or ""
# Version
match = VERSION_RE.match(bits[2])
if match is None:
raise InvalidHTTPVersion(bits[2])
self.version = (int(match.group(1)), int(match.group(2)))
def set_body_reader(self):
super(Request, self).set_body_reader()
if isinstance(self.body.reader, EOFReader):
self.body = Body(LengthReader(self.unreader, 0))
| mit |
mpvismer/pyqtgraph | pyqtgraph/graphicsItems/PlotItem/PlotItem.py | 17 | 47201 | # -*- coding: utf-8 -*-
"""
PlotItem.py - Graphics item implementing a scalable ViewBox with plotting powers.
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
This class is one of the workhorses of pyqtgraph. It implements a graphics item with
plots, labels, and scales which can be viewed inside a QGraphicsScene. If you want
a widget that can be added to your GUI, see PlotWidget instead.
This class is very heavily featured:
- Automatically creates and manages PlotCurveItems
- Fast display and update of plots
- Manages zoom/pan ViewBox, scale, and label elements
- Automatic scaling when data changes
- Control panel with a huge feature set including averaging, decimation,
display, power spectrum, svg/png export, plot linking, and more.
"""
import sys
import weakref
import numpy as np
import os
from ...Qt import QtGui, QtCore, QT_LIB
from ... import pixmaps
from ... import functions as fn
from ...widgets.FileDialog import FileDialog
from .. PlotDataItem import PlotDataItem
from .. ViewBox import ViewBox
from .. AxisItem import AxisItem
from .. LabelItem import LabelItem
from .. LegendItem import LegendItem
from .. GraphicsWidget import GraphicsWidget
from .. ButtonItem import ButtonItem
from .. InfiniteLine import InfiniteLine
from ...WidgetGroup import WidgetGroup
from ...python2_3 import basestring
if QT_LIB == 'PyQt4':
from .plotConfigTemplate_pyqt import *
elif QT_LIB == 'PySide':
from .plotConfigTemplate_pyside import *
elif QT_LIB == 'PyQt5':
from .plotConfigTemplate_pyqt5 import *
__all__ = ['PlotItem']
try:
from metaarray import *
HAVE_METAARRAY = True
except:
HAVE_METAARRAY = False
class PlotItem(GraphicsWidget):
"""
**Bases:** :class:`GraphicsWidget <pyqtgraph.GraphicsWidget>`
Plot graphics item that can be added to any graphics scene. Implements axes, titles, and interactive viewbox.
PlotItem also provides some basic analysis functionality that may be accessed from the context menu.
Use :func:`plot() <pyqtgraph.PlotItem.plot>` to create a new PlotDataItem and add it to the view.
Use :func:`addItem() <pyqtgraph.PlotItem.addItem>` to add any QGraphicsItem to the view.
This class wraps several methods from its internal ViewBox:
:func:`setXRange <pyqtgraph.ViewBox.setXRange>`,
:func:`setYRange <pyqtgraph.ViewBox.setYRange>`,
:func:`setRange <pyqtgraph.ViewBox.setRange>`,
:func:`autoRange <pyqtgraph.ViewBox.autoRange>`,
:func:`setXLink <pyqtgraph.ViewBox.setXLink>`,
:func:`setYLink <pyqtgraph.ViewBox.setYLink>`,
:func:`setAutoPan <pyqtgraph.ViewBox.setAutoPan>`,
:func:`setAutoVisible <pyqtgraph.ViewBox.setAutoVisible>`,
:func:`setLimits <pyqtgraph.ViewBox.setLimits>`,
:func:`viewRect <pyqtgraph.ViewBox.viewRect>`,
:func:`viewRange <pyqtgraph.ViewBox.viewRange>`,
:func:`setMouseEnabled <pyqtgraph.ViewBox.setMouseEnabled>`,
:func:`enableAutoRange <pyqtgraph.ViewBox.enableAutoRange>`,
:func:`disableAutoRange <pyqtgraph.ViewBox.disableAutoRange>`,
:func:`setAspectLocked <pyqtgraph.ViewBox.setAspectLocked>`,
:func:`invertY <pyqtgraph.ViewBox.invertY>`,
:func:`invertX <pyqtgraph.ViewBox.invertX>`,
:func:`register <pyqtgraph.ViewBox.register>`,
:func:`unregister <pyqtgraph.ViewBox.unregister>`
The ViewBox itself can be accessed by calling :func:`getViewBox() <pyqtgraph.PlotItem.getViewBox>`
==================== =======================================================================
**Signals:**
sigYRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>`
sigXRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>`
sigRangeChanged wrapped from :class:`ViewBox <pyqtgraph.ViewBox>`
==================== =======================================================================
"""
sigRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox range has changed
sigYRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox Y range has changed
sigXRangeChanged = QtCore.Signal(object, object) ## Emitted when the ViewBox X range has changed
lastFileDir = None
def __init__(self, parent=None, name=None, labels=None, title=None, viewBox=None, axisItems=None, enableMenu=True, **kargs):
"""
Create a new PlotItem. All arguments are optional.
Any extra keyword arguments are passed to PlotItem.plot().
============== ==========================================================================================
**Arguments:**
*title* Title to display at the top of the item. Html is allowed.
*labels* A dictionary specifying the axis labels to display::
{'left': (args), 'bottom': (args), ...}
The name of each axis and the corresponding arguments are passed to
:func:`PlotItem.setLabel() <pyqtgraph.PlotItem.setLabel>`
Optionally, PlotItem my also be initialized with the keyword arguments left,
right, top, or bottom to achieve the same effect.
*name* Registers a name for this view so that others may link to it
*viewBox* If specified, the PlotItem will be constructed with this as its ViewBox.
*axisItems* Optional dictionary instructing the PlotItem to use pre-constructed items
for its axes. The dict keys must be axis names ('left', 'bottom', 'right', 'top')
and the values must be instances of AxisItem (or at least compatible with AxisItem).
============== ==========================================================================================
"""
GraphicsWidget.__init__(self, parent)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
## Set up control buttons
path = os.path.dirname(__file__)
#self.autoImageFile = os.path.join(path, 'auto.png')
#self.lockImageFile = os.path.join(path, 'lock.png')
self.autoBtn = ButtonItem(pixmaps.getPixmap('auto'), 14, self)
self.autoBtn.mode = 'auto'
self.autoBtn.clicked.connect(self.autoBtnClicked)
#self.autoBtn.hide()
self.buttonsHidden = False ## whether the user has requested buttons to be hidden
self.mouseHovering = False
self.layout = QtGui.QGraphicsGridLayout()
self.layout.setContentsMargins(1,1,1,1)
self.setLayout(self.layout)
self.layout.setHorizontalSpacing(0)
self.layout.setVerticalSpacing(0)
if viewBox is None:
viewBox = ViewBox(parent=self)
self.vb = viewBox
self.vb.sigStateChanged.connect(self.viewStateChanged)
self.setMenuEnabled(enableMenu, enableMenu) ## en/disable plotitem and viewbox menus
if name is not None:
self.vb.register(name)
self.vb.sigRangeChanged.connect(self.sigRangeChanged)
self.vb.sigXRangeChanged.connect(self.sigXRangeChanged)
self.vb.sigYRangeChanged.connect(self.sigYRangeChanged)
self.layout.addItem(self.vb, 2, 1)
self.alpha = 1.0
self.autoAlpha = True
self.spectrumMode = False
self.legend = None
## Create and place axis items
if axisItems is None:
axisItems = {}
self.axes = {}
for k, pos in (('top', (1,1)), ('bottom', (3,1)), ('left', (2,0)), ('right', (2,2))):
if k in axisItems:
axis = axisItems[k]
else:
axis = AxisItem(orientation=k, parent=self)
axis.linkToView(self.vb)
self.axes[k] = {'item': axis, 'pos': pos}
self.layout.addItem(axis, *pos)
axis.setZValue(-1000)
axis.setFlag(axis.ItemNegativeZStacksBehindParent)
self.titleLabel = LabelItem('', size='11pt', parent=self)
self.layout.addItem(self.titleLabel, 0, 1)
self.setTitle(None) ## hide
for i in range(4):
self.layout.setRowPreferredHeight(i, 0)
self.layout.setRowMinimumHeight(i, 0)
self.layout.setRowSpacing(i, 0)
self.layout.setRowStretchFactor(i, 1)
for i in range(3):
self.layout.setColumnPreferredWidth(i, 0)
self.layout.setColumnMinimumWidth(i, 0)
self.layout.setColumnSpacing(i, 0)
self.layout.setColumnStretchFactor(i, 1)
self.layout.setRowStretchFactor(2, 100)
self.layout.setColumnStretchFactor(1, 100)
self.items = []
self.curves = []
self.itemMeta = weakref.WeakKeyDictionary()
self.dataItems = []
self.paramList = {}
self.avgCurves = {}
### Set up context menu
w = QtGui.QWidget()
self.ctrl = c = Ui_Form()
c.setupUi(w)
dv = QtGui.QDoubleValidator(self)
menuItems = [
('Transforms', c.transformGroup),
('Downsample', c.decimateGroup),
('Average', c.averageGroup),
('Alpha', c.alphaGroup),
('Grid', c.gridGroup),
('Points', c.pointsGroup),
]
self.ctrlMenu = QtGui.QMenu()
self.ctrlMenu.setTitle('Plot Options')
self.subMenus = []
for name, grp in menuItems:
sm = QtGui.QMenu(name)
act = QtGui.QWidgetAction(self)
act.setDefaultWidget(grp)
sm.addAction(act)
self.subMenus.append(sm)
self.ctrlMenu.addMenu(sm)
self.stateGroup = WidgetGroup()
for name, w in menuItems:
self.stateGroup.autoAdd(w)
self.fileDialog = None
c.alphaGroup.toggled.connect(self.updateAlpha)
c.alphaSlider.valueChanged.connect(self.updateAlpha)
c.autoAlphaCheck.toggled.connect(self.updateAlpha)
c.xGridCheck.toggled.connect(self.updateGrid)
c.yGridCheck.toggled.connect(self.updateGrid)
c.gridAlphaSlider.valueChanged.connect(self.updateGrid)
c.fftCheck.toggled.connect(self.updateSpectrumMode)
c.logXCheck.toggled.connect(self.updateLogMode)
c.logYCheck.toggled.connect(self.updateLogMode)
c.downsampleSpin.valueChanged.connect(self.updateDownsampling)
c.downsampleCheck.toggled.connect(self.updateDownsampling)
c.autoDownsampleCheck.toggled.connect(self.updateDownsampling)
c.subsampleRadio.toggled.connect(self.updateDownsampling)
c.meanRadio.toggled.connect(self.updateDownsampling)
c.clipToViewCheck.toggled.connect(self.updateDownsampling)
self.ctrl.avgParamList.itemClicked.connect(self.avgParamListClicked)
self.ctrl.averageGroup.toggled.connect(self.avgToggled)
self.ctrl.maxTracesCheck.toggled.connect(self.updateDecimation)
self.ctrl.maxTracesSpin.valueChanged.connect(self.updateDecimation)
self.hideAxis('right')
self.hideAxis('top')
self.showAxis('left')
self.showAxis('bottom')
if labels is None:
labels = {}
for label in list(self.axes.keys()):
if label in kargs:
labels[label] = kargs[label]
del kargs[label]
for k in labels:
if isinstance(labels[k], basestring):
labels[k] = (labels[k],)
self.setLabel(k, *labels[k])
if title is not None:
self.setTitle(title)
if len(kargs) > 0:
self.plot(**kargs)
def implements(self, interface=None):
return interface in ['ViewBoxWrapper']
def getViewBox(self):
"""Return the :class:`ViewBox <pyqtgraph.ViewBox>` contained within."""
return self.vb
## Wrap a few methods from viewBox.
#Important: don't use a settattr(m, getattr(self.vb, m)) as we'd be leaving the viebox alive
#because we had a reference to an instance method (creating wrapper methods at runtime instead).
for m in ['setXRange', 'setYRange', 'setXLink', 'setYLink', 'setAutoPan', # NOTE:
'setAutoVisible', 'setRange', 'autoRange', 'viewRect', 'viewRange', # If you update this list, please
'setMouseEnabled', 'setLimits', 'enableAutoRange', 'disableAutoRange', # update the class docstring
'setAspectLocked', 'invertY', 'invertX', 'register', 'unregister']: # as well.
def _create_method(name):
def method(self, *args, **kwargs):
return getattr(self.vb, name)(*args, **kwargs)
method.__name__ = name
return method
locals()[m] = _create_method(m)
del _create_method
def setLogMode(self, x=None, y=None):
"""
Set log scaling for x and/or y axes.
This informs PlotDataItems to transform logarithmically and switches
the axes to use log ticking.
Note that *no other items* in the scene will be affected by
this; there is (currently) no generic way to redisplay a GraphicsItem
with log coordinates.
"""
if x is not None:
self.ctrl.logXCheck.setChecked(x)
if y is not None:
self.ctrl.logYCheck.setChecked(y)
def showGrid(self, x=None, y=None, alpha=None):
"""
Show or hide the grid for either axis.
============== =====================================
**Arguments:**
x (bool) Whether to show the X grid
y (bool) Whether to show the Y grid
alpha (0.0-1.0) Opacity of the grid
============== =====================================
"""
if x is None and y is None and alpha is None:
raise Exception("Must specify at least one of x, y, or alpha.") ## prevent people getting confused if they just call showGrid()
if x is not None:
self.ctrl.xGridCheck.setChecked(x)
if y is not None:
self.ctrl.yGridCheck.setChecked(y)
if alpha is not None:
v = np.clip(alpha, 0, 1)*self.ctrl.gridAlphaSlider.maximum()
self.ctrl.gridAlphaSlider.setValue(v)
#def paint(self, *args):
#prof = debug.Profiler()
#QtGui.QGraphicsWidget.paint(self, *args)
## bad idea.
#def __getattr__(self, attr): ## wrap ms
#return getattr(self.vb, attr)
def close(self):
#print "delete", self
## Most of this crap is needed to avoid PySide trouble.
## The problem seems to be whenever scene.clear() leads to deletion of widgets (either through proxies or qgraphicswidgets)
## the solution is to manually remove all widgets before scene.clear() is called
if self.ctrlMenu is None: ## already shut down
return
self.ctrlMenu.setParent(None)
self.ctrlMenu = None
self.autoBtn.setParent(None)
self.autoBtn = None
for k in self.axes:
i = self.axes[k]['item']
i.close()
self.axes = None
self.scene().removeItem(self.vb)
self.vb = None
def registerPlot(self, name): ## for backward compatibility
self.vb.register(name)
def updateGrid(self, *args):
alpha = self.ctrl.gridAlphaSlider.value()
x = alpha if self.ctrl.xGridCheck.isChecked() else False
y = alpha if self.ctrl.yGridCheck.isChecked() else False
self.getAxis('top').setGrid(x)
self.getAxis('bottom').setGrid(x)
self.getAxis('left').setGrid(y)
self.getAxis('right').setGrid(y)
def viewGeometry(self):
"""Return the screen geometry of the viewbox"""
v = self.scene().views()[0]
b = self.vb.mapRectToScene(self.vb.boundingRect())
wr = v.mapFromScene(b).boundingRect()
pos = v.mapToGlobal(v.pos())
wr.adjust(pos.x(), pos.y(), pos.x(), pos.y())
return wr
def avgToggled(self, b):
if b:
self.recomputeAverages()
for k in self.avgCurves:
self.avgCurves[k][1].setVisible(b)
def avgParamListClicked(self, item):
name = str(item.text())
self.paramList[name] = (item.checkState() == QtCore.Qt.Checked)
self.recomputeAverages()
def recomputeAverages(self):
if not self.ctrl.averageGroup.isChecked():
return
for k in self.avgCurves:
self.removeItem(self.avgCurves[k][1])
self.avgCurves = {}
for c in self.curves:
self.addAvgCurve(c)
self.replot()
def addAvgCurve(self, curve):
## Add a single curve into the pool of curves averaged together
## If there are plot parameters, then we need to determine which to average together.
remKeys = []
addKeys = []
if self.ctrl.avgParamList.count() > 0:
### First determine the key of the curve to which this new data should be averaged
for i in range(self.ctrl.avgParamList.count()):
item = self.ctrl.avgParamList.item(i)
if item.checkState() == QtCore.Qt.Checked:
remKeys.append(str(item.text()))
else:
addKeys.append(str(item.text()))
if len(remKeys) < 1: ## In this case, there would be 1 average plot for each data plot; not useful.
return
p = self.itemMeta.get(curve,{}).copy()
for k in p:
if type(k) is tuple:
p['.'.join(k)] = p[k]
del p[k]
for rk in remKeys:
if rk in p:
del p[rk]
for ak in addKeys:
if ak not in p:
p[ak] = None
key = tuple(p.items())
### Create a new curve if needed
if key not in self.avgCurves:
plot = PlotDataItem()
plot.setPen(fn.mkPen([0, 200, 0]))
plot.setShadowPen(fn.mkPen([0, 0, 0, 100], width=3))
plot.setAlpha(1.0, False)
plot.setZValue(100)
self.addItem(plot, skipAverage=True)
self.avgCurves[key] = [0, plot]
self.avgCurves[key][0] += 1
(n, plot) = self.avgCurves[key]
### Average data together
(x, y) = curve.getData()
stepMode = curve.opts['stepMode']
if plot.yData is not None and y.shape == plot.yData.shape:
# note that if shapes do not match, then the average resets.
newData = plot.yData * (n-1) / float(n) + y * 1.0 / float(n)
plot.setData(plot.xData, newData, stepMode=stepMode)
else:
plot.setData(x, y, stepMode=stepMode)
def autoBtnClicked(self):
if self.autoBtn.mode == 'auto':
self.enableAutoRange()
self.autoBtn.hide()
else:
self.disableAutoRange()
def viewStateChanged(self):
self.updateButtons()
def enableAutoScale(self):
"""
Enable auto-scaling. The plot will continuously scale to fit the boundaries of its data.
"""
print("Warning: enableAutoScale is deprecated. Use enableAutoRange(axis, enable) instead.")
self.vb.enableAutoRange(self.vb.XYAxes)
def addItem(self, item, *args, **kargs):
"""
Add a graphics item to the view box.
If the item has plot data (PlotDataItem, PlotCurveItem, ScatterPlotItem), it may
be included in analysis performed by the PlotItem.
"""
self.items.append(item)
vbargs = {}
if 'ignoreBounds' in kargs:
vbargs['ignoreBounds'] = kargs['ignoreBounds']
self.vb.addItem(item, *args, **vbargs)
name = None
if hasattr(item, 'implements') and item.implements('plotData'):
name = item.name()
self.dataItems.append(item)
#self.plotChanged()
params = kargs.get('params', {})
self.itemMeta[item] = params
#item.setMeta(params)
self.curves.append(item)
#self.addItem(c)
if hasattr(item, 'setLogMode'):
item.setLogMode(self.ctrl.logXCheck.isChecked(), self.ctrl.logYCheck.isChecked())
if isinstance(item, PlotDataItem):
## configure curve for this plot
(alpha, auto) = self.alphaState()
item.setAlpha(alpha, auto)
item.setFftMode(self.ctrl.fftCheck.isChecked())
item.setDownsampling(*self.downsampleMode())
item.setClipToView(self.clipToViewMode())
item.setPointMode(self.pointMode())
## Hide older plots if needed
self.updateDecimation()
## Add to average if needed
self.updateParamList()
if self.ctrl.averageGroup.isChecked() and 'skipAverage' not in kargs:
self.addAvgCurve(item)
#c.connect(c, QtCore.SIGNAL('plotChanged'), self.plotChanged)
#item.sigPlotChanged.connect(self.plotChanged)
#self.plotChanged()
#name = kargs.get('name', getattr(item, 'opts', {}).get('name', None))
if name is not None and hasattr(self, 'legend') and self.legend is not None:
self.legend.addItem(item, name=name)
def addDataItem(self, item, *args):
print("PlotItem.addDataItem is deprecated. Use addItem instead.")
self.addItem(item, *args)
def listDataItems(self):
"""Return a list of all data items (PlotDataItem, PlotCurveItem, ScatterPlotItem, etc)
contained in this PlotItem."""
return self.dataItems[:]
def addCurve(self, c, params=None):
print("PlotItem.addCurve is deprecated. Use addItem instead.")
self.addItem(c, params)
def addLine(self, x=None, y=None, z=None, **kwds):
"""
Create an InfiniteLine and add to the plot.
If *x* is specified,
the line will be vertical. If *y* is specified, the line will be
horizontal. All extra keyword arguments are passed to
:func:`InfiniteLine.__init__() <pyqtgraph.InfiniteLine.__init__>`.
Returns the item created.
"""
pos = kwds.get('pos', x if x is not None else y)
angle = kwds.get('angle', 0 if x is None else 90)
line = InfiniteLine(pos, angle, **kwds)
self.addItem(line)
if z is not None:
line.setZValue(z)
return line
def removeItem(self, item):
"""
Remove an item from the internal ViewBox.
"""
if not item in self.items:
return
self.items.remove(item)
if item in self.dataItems:
self.dataItems.remove(item)
if item.scene() is not None:
self.vb.removeItem(item)
if item in self.curves:
self.curves.remove(item)
self.updateDecimation()
self.updateParamList()
#item.connect(item, QtCore.SIGNAL('plotChanged'), self.plotChanged)
#item.sigPlotChanged.connect(self.plotChanged)
def clear(self):
"""
Remove all items from the ViewBox.
"""
for i in self.items[:]:
self.removeItem(i)
self.avgCurves = {}
def clearPlots(self):
for i in self.curves[:]:
self.removeItem(i)
self.avgCurves = {}
def plot(self, *args, **kargs):
"""
Add and return a new plot.
See :func:`PlotDataItem.__init__ <pyqtgraph.PlotDataItem.__init__>` for data arguments
Extra allowed arguments are:
clear - clear all plots before displaying new data
params - meta-parameters to associate with this data
"""
clear = kargs.get('clear', False)
params = kargs.get('params', None)
if clear:
self.clear()
item = PlotDataItem(*args, **kargs)
if params is None:
params = {}
self.addItem(item, params=params)
return item
def addLegend(self, size=None, offset=(30, 30)):
"""
Create a new LegendItem and anchor it over the internal ViewBox.
Plots will be automatically displayed in the legend if they
are created with the 'name' argument.
"""
self.legend = LegendItem(size, offset)
self.legend.setParentItem(self.vb)
return self.legend
def scatterPlot(self, *args, **kargs):
if 'pen' in kargs:
kargs['symbolPen'] = kargs['pen']
kargs['pen'] = None
if 'brush' in kargs:
kargs['symbolBrush'] = kargs['brush']
del kargs['brush']
if 'size' in kargs:
kargs['symbolSize'] = kargs['size']
del kargs['size']
return self.plot(*args, **kargs)
def replot(self):
self.update()
def updateParamList(self):
self.ctrl.avgParamList.clear()
## Check to see that each parameter for each curve is present in the list
for c in self.curves:
for p in list(self.itemMeta.get(c, {}).keys()):
if type(p) is tuple:
p = '.'.join(p)
## If the parameter is not in the list, add it.
matches = self.ctrl.avgParamList.findItems(p, QtCore.Qt.MatchExactly)
if len(matches) == 0:
i = QtGui.QListWidgetItem(p)
if p in self.paramList and self.paramList[p] is True:
i.setCheckState(QtCore.Qt.Checked)
else:
i.setCheckState(QtCore.Qt.Unchecked)
self.ctrl.avgParamList.addItem(i)
else:
i = matches[0]
self.paramList[p] = (i.checkState() == QtCore.Qt.Checked)
## Qt's SVG-writing capabilities are pretty terrible.
def writeSvgCurves(self, fileName=None):
if fileName is None:
self.fileDialog = FileDialog()
if PlotItem.lastFileDir is not None:
self.fileDialog.setDirectory(PlotItem.lastFileDir)
self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.writeSvg)
return
#if fileName is None:
#fileName = QtGui.QFileDialog.getSaveFileName()
if isinstance(fileName, tuple):
raise Exception("Not implemented yet..")
fileName = str(fileName)
PlotItem.lastFileDir = os.path.dirname(fileName)
rect = self.vb.viewRect()
xRange = rect.left(), rect.right()
svg = ""
fh = open(fileName, 'w')
dx = max(rect.right(),0) - min(rect.left(),0)
ymn = min(rect.top(), rect.bottom())
ymx = max(rect.top(), rect.bottom())
dy = max(ymx,0) - min(ymn,0)
sx = 1.
sy = 1.
while dx*sx < 10:
sx *= 1000
while dy*sy < 10:
sy *= 1000
sy *= -1
#fh.write('<svg viewBox="%f %f %f %f">\n' % (rect.left()*sx, rect.top()*sx, rect.width()*sy, rect.height()*sy))
fh.write('<svg>\n')
fh.write('<path fill="none" stroke="#000000" stroke-opacity="0.5" stroke-width="1" d="M%f,0 L%f,0"/>\n' % (rect.left()*sx, rect.right()*sx))
fh.write('<path fill="none" stroke="#000000" stroke-opacity="0.5" stroke-width="1" d="M0,%f L0,%f"/>\n' % (rect.top()*sy, rect.bottom()*sy))
for item in self.curves:
if isinstance(item, PlotCurveItem):
color = fn.colorStr(item.pen.color())
opacity = item.pen.color().alpha() / 255.
color = color[:6]
x, y = item.getData()
mask = (x > xRange[0]) * (x < xRange[1])
mask[:-1] += mask[1:]
m2 = mask.copy()
mask[1:] += m2[:-1]
x = x[mask]
y = y[mask]
x *= sx
y *= sy
#fh.write('<g fill="none" stroke="#%s" stroke-opacity="1" stroke-width="1">\n' % color)
fh.write('<path fill="none" stroke="#%s" stroke-opacity="%f" stroke-width="1" d="M%f,%f ' % (color, opacity, x[0], y[0]))
for i in range(1, len(x)):
fh.write('L%f,%f ' % (x[i], y[i]))
fh.write('"/>')
#fh.write("</g>")
for item in self.dataItems:
if isinstance(item, ScatterPlotItem):
pRect = item.boundingRect()
vRect = pRect.intersected(rect)
for point in item.points():
pos = point.pos()
if not rect.contains(pos):
continue
color = fn.colorStr(point.brush.color())
opacity = point.brush.color().alpha() / 255.
color = color[:6]
x = pos.x() * sx
y = pos.y() * sy
fh.write('<circle cx="%f" cy="%f" r="1" fill="#%s" stroke="none" fill-opacity="%f"/>\n' % (x, y, color, opacity))
fh.write("</svg>\n")
def writeSvg(self, fileName=None):
if fileName is None:
fileName = QtGui.QFileDialog.getSaveFileName()
fileName = str(fileName)
PlotItem.lastFileDir = os.path.dirname(fileName)
from ...exporters import SVGExporter
ex = SVGExporter(self)
ex.export(fileName)
def writeImage(self, fileName=None):
if fileName is None:
self.fileDialog = FileDialog()
if PlotItem.lastFileDir is not None:
self.fileDialog.setDirectory(PlotItem.lastFileDir)
self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.writeImage)
return
#if fileName is None:
#fileName = QtGui.QFileDialog.getSaveFileName()
if isinstance(fileName, tuple):
raise Exception("Not implemented yet..")
fileName = str(fileName)
PlotItem.lastFileDir = os.path.dirname(fileName)
self.png = QtGui.QImage(int(self.size().width()), int(self.size().height()), QtGui.QImage.Format_ARGB32)
painter = QtGui.QPainter(self.png)
painter.setRenderHints(painter.Antialiasing | painter.TextAntialiasing)
self.scene().render(painter, QtCore.QRectF(), self.mapRectToScene(self.boundingRect()))
painter.end()
self.png.save(fileName)
def writeCsv(self, fileName=None):
if fileName is None:
self.fileDialog = FileDialog()
if PlotItem.lastFileDir is not None:
self.fileDialog.setDirectory(PlotItem.lastFileDir)
self.fileDialog.setFileMode(QtGui.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.writeCsv)
return
#if fileName is None:
#fileName = QtGui.QFileDialog.getSaveFileName()
fileName = str(fileName)
PlotItem.lastFileDir = os.path.dirname(fileName)
fd = open(fileName, 'w')
data = [c.getData() for c in self.curves]
i = 0
while True:
done = True
for d in data:
if i < len(d[0]):
fd.write('%g,%g,'%(d[0][i], d[1][i]))
done = False
else:
fd.write(' , ,')
fd.write('\n')
if done:
break
i += 1
fd.close()
def saveState(self):
state = self.stateGroup.state()
state['paramList'] = self.paramList.copy()
state['view'] = self.vb.getState()
return state
def restoreState(self, state):
if 'paramList' in state:
self.paramList = state['paramList'].copy()
self.stateGroup.setState(state)
self.updateSpectrumMode()
self.updateDownsampling()
self.updateAlpha()
self.updateDecimation()
if 'powerSpectrumGroup' in state:
state['fftCheck'] = state['powerSpectrumGroup']
if 'gridGroup' in state:
state['xGridCheck'] = state['gridGroup']
state['yGridCheck'] = state['gridGroup']
self.stateGroup.setState(state)
self.updateParamList()
if 'view' not in state:
r = [[float(state['xMinText']), float(state['xMaxText'])], [float(state['yMinText']), float(state['yMaxText'])]]
state['view'] = {
'autoRange': [state['xAutoRadio'], state['yAutoRadio']],
'linkedViews': [state['xLinkCombo'], state['yLinkCombo']],
'targetRange': r,
'viewRange': r,
}
self.vb.setState(state['view'])
def widgetGroupInterface(self):
return (None, PlotItem.saveState, PlotItem.restoreState)
def updateSpectrumMode(self, b=None):
if b is None:
b = self.ctrl.fftCheck.isChecked()
for c in self.curves:
c.setFftMode(b)
self.enableAutoRange()
self.recomputeAverages()
def updateLogMode(self):
x = self.ctrl.logXCheck.isChecked()
y = self.ctrl.logYCheck.isChecked()
for i in self.items:
if hasattr(i, 'setLogMode'):
i.setLogMode(x,y)
self.getAxis('bottom').setLogMode(x)
self.getAxis('top').setLogMode(x)
self.getAxis('left').setLogMode(y)
self.getAxis('right').setLogMode(y)
self.enableAutoRange()
self.recomputeAverages()
def setDownsampling(self, ds=None, auto=None, mode=None):
"""Change the default downsampling mode for all PlotDataItems managed by this plot.
=============== =================================================================
**Arguments:**
ds (int) Reduce visible plot samples by this factor, or
(bool) To enable/disable downsampling without changing the value.
auto (bool) If True, automatically pick *ds* based on visible range
mode 'subsample': Downsample by taking the first of N samples.
This method is fastest and least accurate.
'mean': Downsample by taking the mean of N samples.
'peak': Downsample by drawing a saw wave that follows the min
and max of the original data. This method produces the best
visual representation of the data but is slower.
=============== =================================================================
"""
if ds is not None:
if ds is False:
self.ctrl.downsampleCheck.setChecked(False)
elif ds is True:
self.ctrl.downsampleCheck.setChecked(True)
else:
self.ctrl.downsampleCheck.setChecked(True)
self.ctrl.downsampleSpin.setValue(ds)
if auto is not None:
if auto and ds is not False:
self.ctrl.downsampleCheck.setChecked(True)
self.ctrl.autoDownsampleCheck.setChecked(auto)
if mode is not None:
if mode == 'subsample':
self.ctrl.subsampleRadio.setChecked(True)
elif mode == 'mean':
self.ctrl.meanRadio.setChecked(True)
elif mode == 'peak':
self.ctrl.peakRadio.setChecked(True)
else:
raise ValueError("mode argument must be 'subsample', 'mean', or 'peak'.")
def updateDownsampling(self):
ds, auto, method = self.downsampleMode()
clip = self.ctrl.clipToViewCheck.isChecked()
for c in self.curves:
c.setDownsampling(ds, auto, method)
c.setClipToView(clip)
self.recomputeAverages()
def downsampleMode(self):
if self.ctrl.downsampleCheck.isChecked():
ds = self.ctrl.downsampleSpin.value()
else:
ds = 1
auto = self.ctrl.downsampleCheck.isChecked() and self.ctrl.autoDownsampleCheck.isChecked()
if self.ctrl.subsampleRadio.isChecked():
method = 'subsample'
elif self.ctrl.meanRadio.isChecked():
method = 'mean'
elif self.ctrl.peakRadio.isChecked():
method = 'peak'
return ds, auto, method
def setClipToView(self, clip):
"""Set the default clip-to-view mode for all PlotDataItems managed by this plot.
If *clip* is True, then PlotDataItems will attempt to draw only points within the visible
range of the ViewBox."""
self.ctrl.clipToViewCheck.setChecked(clip)
def clipToViewMode(self):
return self.ctrl.clipToViewCheck.isChecked()
def updateDecimation(self):
if self.ctrl.maxTracesCheck.isChecked():
numCurves = self.ctrl.maxTracesSpin.value()
else:
numCurves = -1
curves = self.curves[:]
split = len(curves) - numCurves
for i in range(len(curves)):
if numCurves == -1 or i >= split:
curves[i].show()
else:
if self.ctrl.forgetTracesCheck.isChecked():
curves[i].clear()
self.removeItem(curves[i])
else:
curves[i].hide()
def updateAlpha(self, *args):
(alpha, auto) = self.alphaState()
for c in self.curves:
c.setAlpha(alpha**2, auto)
def alphaState(self):
enabled = self.ctrl.alphaGroup.isChecked()
auto = self.ctrl.autoAlphaCheck.isChecked()
alpha = float(self.ctrl.alphaSlider.value()) / self.ctrl.alphaSlider.maximum()
if auto:
alpha = 1.0 ## should be 1/number of overlapping plots
if not enabled:
auto = False
alpha = 1.0
return (alpha, auto)
def pointMode(self):
if self.ctrl.pointsGroup.isChecked():
if self.ctrl.autoPointsCheck.isChecked():
mode = None
else:
mode = True
else:
mode = False
return mode
def resizeEvent(self, ev):
if self.autoBtn is None: ## already closed down
return
btnRect = self.mapRectFromItem(self.autoBtn, self.autoBtn.boundingRect())
y = self.size().height() - btnRect.height()
self.autoBtn.setPos(0, y)
def getMenu(self):
return self.ctrlMenu
def getContextMenus(self, event):
## called when another item is displaying its context menu; we get to add extras to the end of the menu.
if self.menuEnabled():
return self.ctrlMenu
else:
return None
def setMenuEnabled(self, enableMenu=True, enableViewBoxMenu='same'):
"""
Enable or disable the context menu for this PlotItem.
By default, the ViewBox's context menu will also be affected.
(use enableViewBoxMenu=None to leave the ViewBox unchanged)
"""
self._menuEnabled = enableMenu
if enableViewBoxMenu is None:
return
if enableViewBoxMenu is 'same':
enableViewBoxMenu = enableMenu
self.vb.setMenuEnabled(enableViewBoxMenu)
def menuEnabled(self):
return self._menuEnabled
def hoverEvent(self, ev):
if ev.enter:
self.mouseHovering = True
if ev.exit:
self.mouseHovering = False
self.updateButtons()
def getLabel(self, key):
pass
def _checkScaleKey(self, key):
if key not in self.axes:
raise Exception("Scale '%s' not found. Scales are: %s" % (key, str(list(self.axes.keys()))))
def getScale(self, key):
return self.getAxis(key)
def getAxis(self, name):
"""Return the specified AxisItem.
*name* should be 'left', 'bottom', 'top', or 'right'."""
self._checkScaleKey(name)
return self.axes[name]['item']
def setLabel(self, axis, text=None, units=None, unitPrefix=None, **args):
"""
Set the label for an axis. Basic HTML formatting is allowed.
============== =================================================================
**Arguments:**
axis must be one of 'left', 'bottom', 'right', or 'top'
text text to display along the axis. HTML allowed.
units units to display after the title. If units are given,
then an SI prefix will be automatically appended
and the axis values will be scaled accordingly.
(ie, use 'V' instead of 'mV'; 'm' will be added automatically)
============== =================================================================
"""
self.getAxis(axis).setLabel(text=text, units=units, **args)
self.showAxis(axis)
def setLabels(self, **kwds):
"""
Convenience function allowing multiple labels and/or title to be set in one call.
Keyword arguments can be 'title', 'left', 'bottom', 'right', or 'top'.
Values may be strings or a tuple of arguments to pass to setLabel.
"""
for k,v in kwds.items():
if k == 'title':
self.setTitle(v)
else:
if isinstance(v, basestring):
v = (v,)
self.setLabel(k, *v)
def showLabel(self, axis, show=True):
"""
Show or hide one of the plot's axis labels (the axis itself will be unaffected).
axis must be one of 'left', 'bottom', 'right', or 'top'
"""
self.getScale(axis).showLabel(show)
def setTitle(self, title=None, **args):
"""
Set the title of the plot. Basic HTML formatting is allowed.
If title is None, then the title will be hidden.
"""
if title is None:
self.titleLabel.setVisible(False)
self.layout.setRowFixedHeight(0, 0)
self.titleLabel.setMaximumHeight(0)
else:
self.titleLabel.setMaximumHeight(30)
self.layout.setRowFixedHeight(0, 30)
self.titleLabel.setVisible(True)
self.titleLabel.setText(title, **args)
def showAxis(self, axis, show=True):
"""
Show or hide one of the plot's axes.
axis must be one of 'left', 'bottom', 'right', or 'top'
"""
s = self.getScale(axis)
p = self.axes[axis]['pos']
if show:
s.show()
else:
s.hide()
def hideAxis(self, axis):
"""Hide one of the PlotItem's axes. ('left', 'bottom', 'right', or 'top')"""
self.showAxis(axis, False)
def showScale(self, *args, **kargs):
print("Deprecated. use showAxis() instead")
return self.showAxis(*args, **kargs)
def hideButtons(self):
"""Causes auto-scale button ('A' in lower-left corner) to be hidden for this PlotItem"""
#self.ctrlBtn.hide()
self.buttonsHidden = True
self.updateButtons()
def showButtons(self):
"""Causes auto-scale button ('A' in lower-left corner) to be visible for this PlotItem"""
#self.ctrlBtn.hide()
self.buttonsHidden = False
self.updateButtons()
def updateButtons(self):
try:
if self._exportOpts is False and self.mouseHovering and not self.buttonsHidden and not all(self.vb.autoRangeEnabled()):
self.autoBtn.show()
else:
self.autoBtn.hide()
except RuntimeError:
pass # this can happen if the plot has been deleted.
def _plotArray(self, arr, x=None, **kargs):
if arr.ndim != 1:
raise Exception("Array must be 1D to plot (shape is %s)" % arr.shape)
if x is None:
x = np.arange(arr.shape[0])
if x.ndim != 1:
raise Exception("X array must be 1D to plot (shape is %s)" % x.shape)
c = PlotCurveItem(arr, x=x, **kargs)
return c
def _plotMetaArray(self, arr, x=None, autoLabel=True, **kargs):
inf = arr.infoCopy()
if arr.ndim != 1:
raise Exception('can only automatically plot 1 dimensional arrays.')
## create curve
try:
xv = arr.xvals(0)
except:
if x is None:
xv = np.arange(arr.shape[0])
else:
xv = x
c = PlotCurveItem(**kargs)
c.setData(x=xv, y=arr.view(np.ndarray))
if autoLabel:
name = arr._info[0].get('name', None)
units = arr._info[0].get('units', None)
self.setLabel('bottom', text=name, units=units)
name = arr._info[1].get('name', None)
units = arr._info[1].get('units', None)
self.setLabel('left', text=name, units=units)
return c
def setExportMode(self, export, opts=None):
GraphicsWidget.setExportMode(self, export, opts)
self.updateButtons()
#if export:
#self.autoBtn.hide()
#else:
#self.autoBtn.show()
| mit |
aperigault/ansible | lib/ansible/modules/cloud/amazon/lambda_policy.py | 2 | 13967 | #!/usr/bin/python
# Copyright (c) 2016, Pierre Jodouin <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: lambda_policy
short_description: Creates, updates or deletes AWS Lambda policy statements.
description:
- This module allows the management of AWS Lambda policy statements.
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
function itself, M(lambda_alias) to manage function aliases, M(lambda_event) to manage event source mappings
such as Kinesis streams, M(execute_lambda) to execute a lambda function and M(lambda_facts) to gather facts
relating to one or more lambda functions.
version_added: "2.4"
author:
- Pierre Jodouin (@pjodouin)
- Michael De La Rue (@mikedlr)
options:
function_name:
description:
- "Name of the Lambda function whose resource policy you are updating by adding a new permission."
- "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
- "function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail ). AWS Lambda also allows you to"
- "specify partial ARN (for example, account-id:Thumbnail ). Note that the length constraint applies only to the"
- "ARN. If you specify only the function name, it is limited to 64 character in length."
required: true
aliases: ['lambda_function_arn', 'function_arn']
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
alias:
description:
- Name of the function alias. Mutually exclusive with C(version).
version:
description:
- Version of the Lambda function. Mutually exclusive with C(alias).
statement_id:
description:
- A unique statement identifier.
required: true
aliases: ['sid']
action:
description:
- "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
lambda: followed by the API name (see Operations ). For example, lambda:CreateFunction . You can use wildcard
(lambda:* ) to grant permission for all AWS Lambda actions."
required: true
principal:
description:
- "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
application in another AWS account to push events to AWS Lambda by invoking your function."
required: true
source_arn:
description:
- This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
the specified bucket can invoke the function.
source_account:
description:
- The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket,
then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
account created the bucket). You can also use this condition to specify all sources (that is, you don't
specify the SourceArn ) owned by a specific account.
event_source_token:
description:
- Token string representing source ARN or account. Mutually exclusive with C(source_arn) or C(source_account).
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: Lambda S3 event notification
lambda_policy:
state: "{{ state | default('present') }}"
function_name: functionName
alias: Dev
statement_id: lambda-s3-myBucket-create-data-log
action: lambda:InvokeFunction
principal: s3.amazonaws.com
source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
source_account: 123456789012
- name: show results
debug: var=lambda_policy_action
'''
RETURN = '''
---
lambda_policy_action:
description: describes what action was taken
returned: success
type: str
'''
import json
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
try:
from botocore.exceptions import ClientError
except Exception:
pass # will be protected by AnsibleAWSModule
def pc(key):
"""
Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def policy_equal(module, current_statement):
for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
if module.params.get(param) != current_statement.get(param):
return False
return True
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param)
if module_param is not None:
api_params[pc(param)] = module_param
return api_params
def validate_params(module):
"""
Performs parameter validation beyond the module framework's validation.
:param module:
:return:
"""
function_name = module.params['function_name']
# validate function name
if function_name.startswith('arn:'):
if not re.search(r'^[\w\-:]+$', function_name):
module.fail_json(
msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
)
if len(function_name) > 140:
module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
else:
if not re.search(r'^[\w\-]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
function_name)
)
if len(function_name) > 64:
module.fail_json(
msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
def get_qualifier(module):
"""
Returns the function qualifier as a version or alias or None.
:param module:
:return:
"""
if module.params.get('version') is not None:
return to_native(module.params['version'])
elif module.params['alias']:
return to_native(module.params['alias'])
return None
def extract_statement(policy, sid):
"""return flattened single policy statement from a policy
If a policy statement is present in the policy extract it and
return it in a flattened form. Otherwise return an empty
dictionary.
"""
if 'Statement' not in policy:
return {}
policy_statement = {}
# Now that we have the policy, check if required permission statement is present and flatten to
# simple dictionary if found.
for statement in policy['Statement']:
if statement['Sid'] == sid:
policy_statement['action'] = statement['Action']
try:
policy_statement['principal'] = statement['Principal']['Service']
except KeyError:
pass
try:
policy_statement['principal'] = statement['Principal']['AWS']
except KeyError:
pass
try:
policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
except KeyError:
pass
try:
policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
except KeyError:
pass
try:
policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
except KeyError:
pass
break
return policy_statement
def get_policy_statement(module, client):
"""Checks that policy exists and if so, that statement ID is present or absent.
:param module:
:param client:
:return:
"""
sid = module.params['statement_id']
# set API parameters
api_params = set_api_params(module, ('function_name', ))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
policy_results = None
# check if function policy exists
try:
policy_results = client.get_policy(**api_params)
except ClientError as e:
try:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
return {}
except AttributeError: # catches ClientErrors without response, e.g. fail before connect
pass
module.fail_json_aws(e, msg="retrieving function policy")
except Exception as e:
module.fail_json_aws(e, msg="retrieving function policy")
# get_policy returns a JSON string so must convert to dict before reassigning to its key
policy = json.loads(policy_results.get('Policy', '{}'))
return extract_statement(policy, sid)
def add_policy_permission(module, client):
"""
Adds a permission statement to the policy.
:param module:
:param aws:
:return:
"""
changed = False
# set API parameters
params = (
'function_name',
'statement_id',
'action',
'principal',
'source_arn',
'source_account',
'event_source_token')
api_params = set_api_params(module, params)
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
if not module.check_mode:
try:
client.add_permission(**api_params)
except Exception as e:
module.fail_json_aws(e, msg="adding permission to policy")
changed = True
return changed
def remove_policy_permission(module, client):
"""
Removed a permission statement from the policy.
:param module:
:param aws:
:return:
"""
changed = False
# set API parameters
api_params = set_api_params(module, ('function_name', 'statement_id'))
qualifier = get_qualifier(module)
if qualifier:
api_params.update(Qualifier=qualifier)
try:
if not module.check_mode:
client.remove_permission(**api_params)
changed = True
except Exception as e:
module.fail_json_aws(e, msg="removing permission from policy")
return changed
def manage_state(module, lambda_client):
changed = False
current_state = 'absent'
state = module.params['state']
action_taken = 'none'
# check if the policy exists
current_policy_statement = get_policy_statement(module, lambda_client)
if current_policy_statement:
current_state = 'present'
if state == 'present':
if current_state == 'present' and not policy_equal(module, current_policy_statement):
remove_policy_permission(module, lambda_client)
changed = add_policy_permission(module, lambda_client)
action_taken = 'updated'
if not current_state == 'present':
changed = add_policy_permission(module, lambda_client)
action_taken = 'added'
elif current_state == 'present':
# remove the policy statement
changed = remove_policy_permission(module, lambda_client)
action_taken = 'deleted'
return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
return connection
def setup_module_object():
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent']),
function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
statement_id=dict(required=True, aliases=['sid']),
alias=dict(),
version=dict(type='int'),
action=dict(required=True, ),
principal=dict(required=True, ),
source_arn=dict(),
source_account=dict(),
event_source_token=dict(),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['alias', 'version'],
['event_source_token', 'source_arn'],
['event_source_token', 'source_account']],
)
def main():
"""
Main entry point.
:return dict: ansible facts
"""
module = setup_module_object()
client = setup_client(module)
validate_params(module)
results = manage_state(module, client)
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
nag10/reposiatary | UI/node/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| apache-2.0 |
naibaf7/PyGreentea | examples/3D_u_malis_euclid/test.py | 2 | 1619 | from __future__ import print_function
import sys, os, math
import numpy as np
import h5py
from numpy import float32, int32, uint8, dtype
from PIL import Image
import glob
# Load PyGreentea
# Relative path to where PyGreentea resides
pygt_path = '../..'
sys.path.append(pygt_path)
import pygreentea.pygreentea as pygt
from pygreentea.pygreentea import malis
# Load the datasets - individual tiff files in a directory
raw_dir = '../../../project_data/dataset_01/train/raw'
raw_path = sorted(glob.glob(raw_dir+'/*.tif'))
num_images = len(raw_path)
raw_ds = [np.expand_dims(pygt.normalize(np.array(Image.open(raw_path[i]).convert('L'), 'f')),0) for i in range(0, num_images)]
datasets = []
for i in range(0,len(raw_ds)):
dataset = {}
dataset['data'] = raw_ds[i]
datasets += [dataset]
test_net_file = 'net.prototxt'
test_device = 0
pygt.caffe.set_devices((test_device,))
caffemodels = pygt.get_caffe_models('net')
test_net = pygt.init_testnet(test_net_file, trained_model=caffemodels[-1][1], test_device=test_device)
def process_data_slice_callback(input_specs, batch_size, dataset_indexes, offsets, dataset_combined_sizes, data_arrays, slices):
# Nothing to process here
pass
output_arrays = []
pygt.process(test_net, datasets, ['aff_pred', 'smax_pred'], output_arrays, process_data_slice_callback)
for i in range(0, len(output_arrays)):
for key in output_arrays[i].keys():
outhdf5 = h5py.File('output/' + key + str(i) + '.h5', 'w')
outdset = outhdf5.create_dataset('main', np.shape(output_arrays[i][key]), np.float32, data=output_arrays[i][key])
outhdf5.close() | bsd-2-clause |
kkreis/espressopp | src/analysis/AdressDensity.py | 7 | 2909 | # Copyright (C) 2016
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************
espressopp.analysis.AdressDensity
*********************************
Class to compute radial density profiles in adaptive resolution simulations based on distance to closest AdResS center. Works also for multiple overlapping AdResS regions.
Examples:
>>> densityprofile = espressopp.analysis.AdressDensity(system, verletlist)
>>> # creates the class
>>> densityprofile.addExclusions([1,2,3])
>>> # defines particle to be excluded from the calculation based on list of particle ids
>>> densityprofile.compute(100)
>>> # computes the densityprofile using 100 bins
.. function:: espressopp.analysis.AdressDensity(system, verletlist)
:param system: system object
:type system: shared_ptr<System>
:param verletlist: verletlist object
:type verletlist: shared_ptr<VerletListAdress>
.. function:: espressopp.analysis.AdressDensity.compute(bins)
:param bins: number of bins
:type bins: int
:rtype: list of reals
.. function:: espressopp.analysis.AdressDensity.addExclusions(pidlist)
:param pidlist: list of ids of particles to be excluded from the calculation
:type pidlist: list of ints
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_AdressDensity
class AdressDensityLocal(ObservableLocal, analysis_AdressDensity):
def __init__(self, system, verletlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_AdressDensity, system, verletlist)
def addExclusions(self, pidlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
for pid in pidlist:
self.cxxclass.addExclpid(self, pid)
def compute(self, bins):
return self.cxxclass.compute(self, bins)
if pmi.isController :
class AdressDensity(Observable):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ 'addExclusions', 'compute' ],
cls = 'espressopp.analysis.AdressDensityLocal'
)
| gpl-3.0 |
ab93/Depression-Identification | src/helpers/plot.py | 1 | 3688 | import os
import re
import sys
import csv
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
from src.main import config
def get_combined_data(file1, file2):
feature_df = pd.read_csv(file1)
feature_df = feature_df.append(pd.read_csv(file2))
train_split_df = pd.read_csv(config.TRAIN_SPLIT_FILE,
usecols=['Participant_ID', 'PHQ_Binary'])
feature_df = feature_df[feature_df['video'].isin(train_split_df['Participant_ID'])]
print "features: ", feature_df.shape
print "train_split: ", train_split_df.shape
train_split_dict = train_split_df.set_index('Participant_ID').T.to_dict()
del train_split_df
labels = np.zeros(feature_df.shape[0])
for i in xrange(feature_df.shape[0]):
video_id = feature_df.iat[i,0]
labels[i] = train_split_dict[video_id]['PHQ_Binary']
feature_df['label'] = pd.Series(labels, index=feature_df.index)
try:
feature_df.drop(['video','question','starttime','endtime'], inplace=True, axis=1)
except ValueError:
feature_df.drop(['video','question'], inplace=True, axis=1)
return feature_df
def plot_boxplot(df, dir_name):
columns = df.columns[:-1]
grouped_df = df.groupby(by='label')
for feature in columns:
data = []
for key, item in grouped_df:
temp_df = grouped_df.get_group(key)
print temp_df.loc[:,feature].describe()
#raw_input()
data.append(temp_df.loc[:,feature].values)
print len(data[0]), len(data[1])
plt.clf()
fig = plt.figure(1, figsize=(9, 6))
fig.clf()
ax = fig.add_subplot(111)
bp = ax.boxplot(data, notch=True, sym='+', vert=True, whis=1.5,
patch_artist=True)
ax.set_xticklabels(['Non-depressed','Depressed'])
ax.set_ylabel(feature)
ax.set_xlabel('Class Label')
plt.grid(axis='y',
linestyle='--',
which='major',
color='black',
alpha=0.25)
colors = ['green', 'red']
for box,color in zip(bp['boxes'],colors):
box.set(color='black', linewidth=0.5)
box.set_facecolor(color)
for whisker in bp['whiskers']:
whisker.set(color='grey', linewidth=1.5, linestyle='--')
for cap in bp['caps']:
cap.set(color='black', linewidth=2)
for median in bp['medians']:
median.set(color='black', linewidth=3)
for flier in bp['fliers']:
flier.set(marker='o', color='green', alpha=0.7)
#plt.show()
#sys.exit(1)
fig.savefig('plots/' + dir_name + '/' + feature + '.png')
def calculate_anova(df, filename):
filename += '.csv'
columns = df.columns[:-1]
grouped_df = df.groupby(by='label')
with open(os.path.join(config.ANOVA_DIR,filename), 'w') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
csv_writer.writerow(["Feature","P-value","F-value"])
for feature in columns:
data = []
for key, item in grouped_df:
temp_df = grouped_df.get_group(key)
data.append(temp_df.loc[:,feature].values)
f_val, p_val = stats.f_oneway(data[0], data[1])
csv_writer.writerow([feature, p_val, f_val])
def main():
filename = sys.argv[3]
features_df = get_combined_data(os.path.join(config.D_ND_DIR, sys.argv[1]),
os.path.join(config.D_ND_DIR, sys.argv[2]))
calculate_anova(features_df, filename)
plot_boxplot(features_df, filename)
if __name__ == '__main__':
main()
| mit |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/theano/tensor/tests/test_gc.py | 3 | 4084 | import numpy
import six.moves.cPickle as pickle
from six.moves import xrange
import theano
from theano import tensor as T
import time
def test_no_reuse():
x = T.lvector()
y = T.lvector()
f = theano.function([x, y], x + y)
# provide both inputs in the first call
f(numpy.ones(10, dtype='int64'), numpy.ones(10, dtype='int64'))
try:
f(numpy.ones(10))
except TypeError:
return
assert not 'should not get here'
def test_gc_never_pickles_temporaries():
x = T.dvector()
for i in xrange(2): # TODO: 30 causes like LONG compilation due to MERGE
if i:
r = r + r/10
else:
r = x
optimizer = None
optimizer = 'fast_run'
for f_linker, g_linker in [
(theano.PerformLinker(allow_gc=True),
theano.PerformLinker(allow_gc=False)),
(theano.OpWiseCLinker(allow_gc=True),
theano.OpWiseCLinker(allow_gc=False))]:
# f_linker has garbage collection
# g_linker has no garbage collection
f = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=f_linker))
g = theano.function([x], r, mode=theano.Mode(optimizer=optimizer,
linker=g_linker))
len_pre_f = len(pickle.dumps(f))
len_pre_g = len(pickle.dumps(g))
# We can't compare the content or the length of the string
# between f and g. 2 reason, we store some timming information
# in float. They won't be the same each time. Different float
# can have different lenght when printed.
def a(fn):
return len(pickle.dumps(fn.maker))
assert a(f) == a(f) # some sanity checks on the pickling mechanism
assert a(g) == a(g) # some sanity checks on the pickling mechanism
def b(fn):
return len(
pickle.dumps(
theano.compile.function_module._pickle_Function(
fn)))
assert b(f) == b(f) # some sanity checks on the pickling mechanism
def c(fn):
return len(pickle.dumps(fn))
assert c(f) == c(f) # some sanity checks on the pickling mechanism
assert c(g) == c(g) # some sanity checks on the pickling mechanism
# now run the function once to create temporaries within the no-gc
# linker
f(numpy.ones(100, dtype='float64'))
g(numpy.ones(100, dtype='float64'))
# serialize the functions again
post_f = pickle.dumps(f)
post_g = pickle.dumps(g)
len_post_f = len(post_f)
len_post_g = len(post_g)
# assert that f() didn't cause the function to grow
# allow_gc should leave the function un-changed by calling
assert len_pre_f == len_post_f
# assert that g() didn't cause g to grow because temporaries
# that weren't collected shouldn't be pickled anyway
# Allow for a couple of bytes of difference, since timing info,
# for instance, can be represented as text of varying size.
assert abs(len_post_f - len_post_g) < 256, (
f_linker, len_post_f, len_post_g)
def test_merge_opt_runtime():
"""In the original merge optimization, the following graph took
like caused the MERGE optimizer to exhibit really bad performance
(quadratic? exponential?)
Ironically, there is actually no merging to do in this graph.
"""
x = T.dvector()
for i in xrange(50):
if i:
r = r + r/10
else:
r = x
t = time.time()
f = theano.function([x], r, mode='FAST_COMPILE')
# FAST_RUN does in-place optimizer which requires a lot of
# toposorting, which is actually pretty slow at the moment. This
# test was designed to test MergeOptimizer... so I'm leaving
# toposort optimizations for a later date.
dt = time.time() - t
# it should never take longer than 5 seconds to compile this graph
assert dt < 5.0, dt
| mit |
jkbradley/spark | examples/src/main/python/mllib/regression_metrics_example.py | 158 | 2104 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGD
from pyspark.mllib.evaluation import RegressionMetrics
from pyspark.mllib.linalg import DenseVector
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="Regression Metrics Example")
# $example on$
# Load and parse the data
def parsePoint(line):
values = line.split()
return LabeledPoint(float(values[0]),
DenseVector([float(x.split(':')[1]) for x in values[1:]]))
data = sc.textFile("data/mllib/sample_linear_regression_data.txt")
parsedData = data.map(parsePoint)
# Build the model
model = LinearRegressionWithSGD.train(parsedData)
# Get predictions
valuesAndPreds = parsedData.map(lambda p: (float(model.predict(p.features)), p.label))
# Instantiate metrics object
metrics = RegressionMetrics(valuesAndPreds)
# Squared Error
print("MSE = %s" % metrics.meanSquaredError)
print("RMSE = %s" % metrics.rootMeanSquaredError)
# R-squared
print("R-squared = %s" % metrics.r2)
# Mean absolute error
print("MAE = %s" % metrics.meanAbsoluteError)
# Explained variance
print("Explained variance = %s" % metrics.explainedVariance)
# $example off$
| apache-2.0 |
jamespacileo/django-france | django/views/decorators/vary.py | 2 | 1185 | from functools import wraps
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
| bsd-3-clause |
lahwaacz/wiki-scripts | ws/diff.py | 1 | 4383 | #! /usr/bin/env python3
try:
import WikEdDiff
_has_wikeddiff = True
except ImportError:
_has_wikeddiff = False
import difflib
try:
import pygments
import pygments.lexers.text
import pygments.formatters
_has_pygments = True
except ImportError:
_has_pygments = False
def diff_highlighted(old, new, fromfile="", tofile="", fromfiledate="", tofiledate=""):
"""
Returns a diff between two texts formatted with ANSI color sequences
suitable for output in 256-color terminal.
When available, the :py:mod:`WikEdDiff` library and its
:py:class:`AnsiFormatter` is used. Otherwise the :py:mod:`difflib`
module from the standard library is used to generate the diff in unified
format and :py:mod:`pygments` is used (when available) as the highlighter.
:param old: text to compare (old revision)
:param new: text to compare (new revision)
:param fromfile: original file name (used as meta data to format diff header)
:param tofile: new file name (used as meta data to format diff header)
:param fromfiledate: original file timestamp (used as meta data to format diff header)
:param tofiledate: new file timestamp (used as meta data to format diff header)
:returns: diff formatted with ANSI color sequences
"""
# Wikicode -> str
new = str(new)
# normalize line breaks at the end
if not old.endswith("\n"):
old += "\n"
if not new.endswith("\n"):
new += "\n"
if _has_wikeddiff is True:
# get diff fragments
config = WikEdDiff.WikEdDiffConfig()
wd = WikEdDiff.WikEdDiff(config)
fragments = wd.diff(old, new)
# format with ANSI colors
formatter = WikEdDiff.AnsiFormatter()
diff_ansi = formatter.format( fragments, coloredBlocks=True )
# prepend metadata
header = formatter.pushColor(formatter.color_delete) + \
"--- {}\t{}".format(fromfile, fromfiledate) + \
formatter.popColor() + "\n" + \
formatter.pushColor(formatter.color_insert) + \
"+++ {}\t{}".format(tofile, tofiledate) + \
formatter.popColor() + "\n"
sep = formatter.pushColor(formatter.color_separator) + \
formatter.separator_symbol + \
formatter.popColor()
return header + sep + "\n" + diff_ansi + "\n" + sep
else:
# splitlines() omits the '\n' char from each line, so we need to
# explicitly set lineterm="", otherwise spacing would be inconsistent
diff = difflib.unified_diff(old.splitlines(), new.splitlines(), fromfile, tofile,
str(fromfiledate), str(tofiledate), lineterm="")
text = "\n".join(diff)
if _has_pygments is True:
lexer = pygments.lexers.text.DiffLexer()
formatter = pygments.formatters.Terminal256Formatter()
text = pygments.highlight(text, lexer, formatter)
return text
def diff_revisions(api, oldrevid, newrevid):
"""
Get a visual diff of two revisions obtained via a MediaWiki API.
Calls :py:func:`diff_highlighted` and includes basic meta data (title,
username, timestamp and comment) in the diff header.
:param api: a :py:class:`MediaWiki.api.API` instance to operate on
:param oldrevid: revision ID for old revision
:param newrevid: revision ID for new revision
"""
# query content + meta data for each revision
result = api.call_api(action="query", prop="revisions", rvprop="content|timestamp|user|comment", revids="%s|%s" % (oldrevid, newrevid))
page = list(result["pages"].values())[0] # returned structure is the same as for generators
title = page["title"]
if len(page["revisions"]) != 2:
raise Exception("API returned wrong number of revisions, are the revision IDs valid?")
rev_old = page["revisions"][0]
rev_new = page["revisions"][1]
# fields to show in header (extended, abusing original field titles)
fn_old = "%s\t(%s)" % (title, rev_old["user"])
fn_new = "%s\t(%s)" % (title, rev_new["user"])
ts_old = "%s\t%s" % (rev_old["timestamp"], rev_old["comment"])
ts_new = "%s\t%s" % (rev_new["timestamp"], rev_new["comment"])
return diff_highlighted(rev_old["*"], rev_new["*"], fn_old, fn_new, ts_old, ts_new)
| gpl-3.0 |
SivilTaram/edx-platform | common/test/acceptance/tests/lms/test_lms_problems.py | 3 | 13281 | # -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from textwrap import dedent
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
USERNAME = "joe_student"
EMAIL = "[email protected]"
def setUp(self):
super(ProblemsTest, self).setUp()
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<textline label="Enter the annual ROI" trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: hint')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): demand-hint2')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}}
],
actual_events)
class ProblemHintWithHtmlTest(ProblemsTest, EventsTestMixin):
"""
Tests that hints containing html get rendered properly
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">aa <a href="#">bb</a> cc</stringequalhint>
<stringequalhint answer="C"><a href="#">aa bb</a> cc</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>aa <a href="#">bb</a> cc</hint>
<hint><a href="#">dd ee</a> ff</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'PROBLEM HTML HINT TEST', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: aa bb cc')
problem_page.fill_answer('C')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: aa bb cc')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=2
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'aa <a href="#">bb</a> cc'}]}},
{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'C'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': '<a href="#">aa bb</a> cc'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): dd ee ff')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'<a href="#">dd ee</a> ff'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}}
],
actual_events)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<choicegroup label="Answer this?" type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
# Verify Mathjax have been rendered
self.assertTrue(problem_page.mathjax_rendered_in_problem, "MathJax did not rendered in body")
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertIn("Hint (1 of 2): mathjax should work1", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
| agpl-3.0 |
sonaht/ansible | lib/ansible/modules/network/sros/sros_config.py | 43 | 10865 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: sros_config
version_added: "2.2"
author: "Peter Sprygada (@privateip)"
short_description: Manage Nokia SR OS device configuration
description:
- Nokia SR OS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with SR OS configuration sections in
a deterministic way.
extends_documentation_fragment: sros
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "true", "false" ]
version_added: "2.2"
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
required: false
default: null
version_added: "2.2"
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
required: false
default: no
choices: ['yes', 'no']
aliases: ['detail']
version_added: "2.2"
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
transport: cli
---
- name: enable rollback location
sros_config:
lines: configure system rollback rollback-location "cf3:/ansible"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using one line
sros_config:
lines:
- configure system name "{{ inventory_hostname }}"
provider: "{{ cli }}"
- name: set system name to {{ inventory_hostname }} using parents
sros_config:
lines:
- 'name "{{ inventory_hostname }}"'
parents:
- configure
- system
provider: "{{ cli }}"
backup: yes
- name: load config from file
sros_config:
src: "{{ inventory_hostname }}.cfg"
provider: "{{ cli }}"
save: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['config system name "sros01"']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/sros_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.sros import sros_argument_spec, check_args
from ansible.module_utils.sros import load_config, run_commands, get_config
def sanitize_config(lines):
commands = list()
for line in lines:
for index, entry in enumerate(commands):
if line.startswith(entry):
del commands[index]
break
commands.append(line)
return commands
def get_active_config(module):
contents = module.params['config']
if not contents:
flags = []
if module.params['defaults']:
flags = ['detail']
return get_config(module, flags)
return contents
def get_candidate(module):
candidate = NetworkConfig(indent=4)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
candidate = get_candidate(module)
if match != 'none':
config_text = get_active_config(module)
config = NetworkConfig(indent=4, contents=config_text)
configobjs = candidate.difference(config)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
commands = sanitize_config(commands.split('\n'))
result['commands'] = commands
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
config=dict(),
defaults=dict(type='bool', default=False, aliases=['detail']),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(sros_argument_spec)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
result = dict(changed=False, warnings=list())
warnings = list()
check_args(module, warnings)
if warnings:
result['warnings'] = warnings
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
if module.params['save']:
if not module.check_mode:
run_commands(module, ['admin save'])
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
unoduetre/textclassify | results/resultgen.py | 1 | 3123 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys, os, math as m, random
os.chdir(os.path.dirname(sys.argv[0]))
print """{"""
print """
\\makeatletter
\\newcommand\\nobreakcline[1]{\\@nobreakcline#1\\@nil}%
\\def\\@nobreakcline#1-#2\\@nil{%
\\omit
\\@multicnt#1%
\\advance\\@multispan\\m@ne
\\ifnum\\@multicnt=\\@ne\\@firstofone{&\\omit}\\fi
\\@multicnt#2%
\\advance\@multicnt-#1%
\\advance\@multispan\@ne
\\leaders\\hrule\\@height\\arrayrulewidth\\hfill\\\\*
\\noalign{\\vskip-\\arrayrulewidth}}
\\makeatother
"""
print """\\begingroup
\\setlength{\LTleft}{-20cm plus -1fill}
\\setlength{\LTright}{\LTleft}
\\ltabela{Wyniki przeprowadzonych prób}{|c|c|c|c|c|c|c|c|}{ \\hline"""
print """\\textbf{\\#} & \\textbf{Zestaw} & \\textbf{Zb. tr./ucz.} & \\textbf{Podobieństwo} & $\mathbf{k}$ & \\textbf{Kategoria} & \\textbf{TPR} & \\textbf{PPV} \\\\
\\hline
\\endhead"""
T = {'countries': 'Kraje', 'metric': '$R^n$', 'c\_allwords': 'wszystko', 'euclidean': '$d_e$',
'jaccard': 'Jaccard', 'keywords': 'Wg. sł. kluczowych', 'chebyshev': '$d_\infty$',
'taxicab': '$d_1$', 'topics': 'Tematy', 'c\_fuzzy': 'zb. rozm.', 'c\_keywords': 'wybr. sł.', 'sb': 'Autorzy', 'ngram': 'N-gramy'}
def t(s):
return T[s] if s in T else s
h = open('trials.txt', 'rt')
i = 0
for l in h:
l = l.strip()
if not l or l.startswith('#'): continue
name, size, rand, overl, simil, contents, metric, k = l.split()
size = float(size)
rand = True if rand == 'yes' else False
overl = True if overl == 'yes' else False
d = name[:name.find('-')]
p = os.path.join(d, name + '2.csv')
data = []
g = open(p, 'rt')
for gl in g.readlines()[1:]:
gl = gl.strip()
if not gl: continue
cat, _, tpr, __, ppv = gl.split(',')
tpr = float(tpr[:-1])
ppv = float(ppv[:-1])
if m.isnan(tpr): tpr = 0.0
if m.isnan(ppv): ppv = 0.0
data.append([cat, tpr, ppv])
g.close()
mr = lambda s: '\multirow{%d}{*}{%s}' % (len(data) + 1, s)
ff = lambda f: ('%.2f' % f).replace('.', ',')
msg_sets = '$%.0f\\%%$/$%.0f\\%%$' % (40 * size, 60 * size)
if rand: msg_sets += ', losowanie'
if overl: msg_sets += ', zachodzenie'
msg_simil = t(simil)
if simil == 'metric':
msg_simil += ', ' + t('c\_' + contents) + ', ' + t(metric)
data.sort(key = lambda x: x[0])
print '%s & %s & %s & %s & %s & %s & $%s\\%%$ & $%s\\%%$ \\\\*' % (mr(str(i+1)), mr(t(d)), mr(msg_sets), mr(msg_simil), mr(str(k)), data[0][0], ff(data[0][1]), ff(data[0][2]))
for j in xrange(1, len(data)):
print ' & & & & & %s & $%s\\%%$ & $%s\\%%$ \\\\*' % (data[j][0], ff(data[j][1]), ff(data[j][2]))
print '\\nobreakcline{6-8}'
p = os.path.join(d, name + '1.csv')
g = open(p, 'rt')
count = 0
good = 0
for gl in g.readlines()[1:]:
gl = gl.strip()
if not gl: continue
_, lll, rrr = gl.split(',')
count += 1
good += (lll == rrr)
g.close()
succ = 0.0 if count == 0 else (100.0 * good) / count
print '& & & & & \\textbf{poprawne:} & \\multicolumn{2}{c|}{$\\mathbf{%s\\%%}$} \\\\' % (ff(succ))
print '\\hline'
i += 1
h.close()
print """}\\endgroup"""
print """}""" | gpl-3.0 |
nkcr/WebIndex | app/venv/lib/python3.5/site-packages/yaml/dumper.py | 277 | 2723 |
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
from .emitter import *
from .serializer import *
from .representer import *
from .resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| mit |
abhattad4/Digi-Menu | digimenu2/django/utils/lru_cache.py | 270 | 7647 | try:
from functools import lru_cache
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
kurikuri99/xen_study | tools/python/xen/util/xmlrpclib2.py | 40 | 8653 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 Anthony Liguori <[email protected]>
# Copyright (C) 2006 XenSource Inc.
#============================================================================
"""
An enhanced XML-RPC client/server interface for Python.
"""
import re
import fcntl
from types import *
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import SocketServer
import xmlrpclib, socket, os, stat
import mkdir
from xen.web import connection
from xen.xend.XendLogging import log
#
# Convert all integers to strings as described in the Xen API
#
def stringify(value):
if isinstance(value, long) or \
(isinstance(value, int) and not isinstance(value, bool)):
return str(value)
elif isinstance(value, dict):
new_value = {}
for k, v in value.items():
new_value[stringify(k)] = stringify(v)
return new_value
elif isinstance(value, (tuple, list)):
return [stringify(v) for v in value]
else:
return value
# We're forced to subclass the RequestHandler class so that we can work around
# some bugs in Keep-Alive handling and also enabled it by default
class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
protocol_version = "HTTP/1.1"
# xend crashes in python 2.7 unless disable_nagle_algorithm = False
# it isn't used in earlier versions so it is harmless to set it generally
disable_nagle_algorithm = False
def __init__(self, hosts_allowed, request, client_address, server):
self.hosts_allowed = hosts_allowed
SimpleXMLRPCRequestHandler.__init__(self, request, client_address,
server)
# this is inspired by SimpleXMLRPCRequestHandler's do_POST but differs
# in a few non-trivial ways
# 1) we never generate internal server errors. We let the exception
# propagate so that it shows up in the Xend debug logs
# 2) we don't bother checking for a _dispatch function since we don't
# use one
def do_POST(self):
addrport = self.client_address
if not connection.hostAllowed(addrport, self.hosts_allowed):
self.connection.shutdown(1)
return
data = self.rfile.read(int(self.headers["content-length"]))
rsp = self.server._marshaled_dispatch(data)
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.send_header("Content-Length", str(len(rsp)))
self.end_headers()
self.wfile.write(rsp)
self.wfile.flush()
if self.close_connection == 1:
self.connection.shutdown(1)
# This is a base XML-RPC server for TCP. It sets allow_reuse_address to
# true, and has an improved marshaller that logs and serializes exceptions.
class TCPXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, addr, allowed, xenapi, requestHandler=None,
logRequests = 1):
self.xenapi = xenapi
if requestHandler is None:
requestHandler = XMLRPCRequestHandler
SimpleXMLRPCServer.__init__(self, addr,
(lambda x, y, z:
requestHandler(allowed, x, y, z)),
logRequests)
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def get_request(self):
(client, addr) = SimpleXMLRPCServer.get_request(self)
flags = fcntl.fcntl(client.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(client.fileno(), fcntl.F_SETFD, flags)
return (client, addr)
def _marshaled_dispatch(self, data, dispatch_method = None):
params, method = xmlrpclib.loads(data)
if False:
# Enable this block of code to exit immediately without sending
# a response. This allows you to test client-side crash handling.
import sys
sys.exit(1)
try:
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
if self.xenapi and \
(response is None or
not isinstance(response, dict) or
'Status' not in response):
log.exception('Internal error handling %s: Invalid result %s',
method, response)
response = { "Status": "Failure",
"ErrorDescription":
['INTERNAL_ERROR',
'Invalid result %s handling %s' %
(response, method)]}
# With either Unicode or normal strings, we can only transmit
# \t, \n, \r, \u0020-\ud7ff, \ue000-\ufffd, and \u10000-\u10ffff
# in an XML document. xmlrpclib does not escape these values
# properly, and then breaks when it comes to parse the document.
# To hack around this problem, we use repr here and exec above
# to transmit the string using Python encoding.
# Thanks to David Mertz <[email protected]> for the trick (buried
# in xml_pickle.py).
if isinstance(response, StringTypes):
response = repr(response)[1:-1]
response = (response,)
response = xmlrpclib.dumps(response,
methodresponse=1,
allow_none=1)
except Exception, exn:
try:
if self.xenapi:
if _is_not_supported(exn):
errdesc = ['MESSAGE_METHOD_UNKNOWN', method]
else:
log.exception('Internal error handling %s', method)
errdesc = ['INTERNAL_ERROR', str(exn)]
response = xmlrpclib.dumps(
({ "Status": "Failure",
"ErrorDescription": errdesc },),
methodresponse = 1)
else:
import xen.xend.XendClient
if isinstance(exn, xmlrpclib.Fault):
response = xmlrpclib.dumps(exn)
else:
log.exception('Internal error handling %s', method)
response = xmlrpclib.dumps(
xmlrpclib.Fault(xen.xend.XendClient.ERROR_INTERNAL, str(exn)))
except:
log.exception('Internal error handling error')
return response
notSupportedRE = re.compile(r'method "(.*)" is not supported')
def _is_not_supported(exn):
try:
m = notSupportedRE.search(exn[0])
return m is not None
except:
return False
# This is a XML-RPC server that sits on a Unix domain socket.
# It implements proper support for allow_reuse_address by
# unlink()'ing an existing socket.
class UnixXMLRPCRequestHandler(XMLRPCRequestHandler):
def address_string(self):
try:
return XMLRPCRequestHandler.address_string(self)
except ValueError, e:
return self.client_address[:2]
class UnixXMLRPCServer(TCPXMLRPCServer):
address_family = socket.AF_UNIX
allow_address_reuse = True
def __init__(self, addr, allowed, xenapi, logRequests = 1):
mkdir.parents(os.path.dirname(addr), stat.S_IRWXU, True)
if self.allow_reuse_address and os.path.exists(addr):
os.unlink(addr)
TCPXMLRPCServer.__init__(self, addr, allowed, xenapi,
UnixXMLRPCRequestHandler, logRequests)
| gpl-2.0 |
zhukaixy/kbengine | kbe/src/lib/python/Lib/turtledemo/bytedesign.py | 145 | 4244 | #!/usr/bin/env python3
""" turtle-example-suite:
tdemo_bytedesign.py
An example adapted from the example-suite
of PythonCard's turtle graphics.
It's based on an article in BYTE magazine
Problem Solving with Logo: Using Turtle
Graphics to Redraw a Design
November 1982, p. 118 - 134
-------------------------------------------
Due to the statement
t.delay(0)
in line 152, which sets the animation delay
to 0, this animation runs in "line per line"
mode as fast as possible.
"""
import math
from turtle import Turtle, mainloop
from time import clock
# wrapper for any additional drawing routines
# that need to know about each other
class Designer(Turtle):
def design(self, homePos, scale):
self.up()
for i in range(5):
self.forward(64.65 * scale)
self.down()
self.wheel(self.position(), scale)
self.up()
self.backward(64.65 * scale)
self.right(72)
self.up()
self.goto(homePos)
self.right(36)
self.forward(24.5 * scale)
self.right(198)
self.down()
self.centerpiece(46 * scale, 143.4, scale)
self.getscreen().tracer(True)
def wheel(self, initpos, scale):
self.right(54)
for i in range(4):
self.pentpiece(initpos, scale)
self.down()
self.left(36)
for i in range(5):
self.tripiece(initpos, scale)
self.left(36)
for i in range(5):
self.down()
self.right(72)
self.forward(28 * scale)
self.up()
self.backward(28 * scale)
self.left(54)
self.getscreen().update()
def tripiece(self, initpos, scale):
oldh = self.heading()
self.down()
self.backward(2.5 * scale)
self.tripolyr(31.5 * scale, scale)
self.up()
self.goto(initpos)
self.setheading(oldh)
self.down()
self.backward(2.5 * scale)
self.tripolyl(31.5 * scale, scale)
self.up()
self.goto(initpos)
self.setheading(oldh)
self.left(72)
self.getscreen().update()
def pentpiece(self, initpos, scale):
oldh = self.heading()
self.up()
self.forward(29 * scale)
self.down()
for i in range(5):
self.forward(18 * scale)
self.right(72)
self.pentr(18 * scale, 75, scale)
self.up()
self.goto(initpos)
self.setheading(oldh)
self.forward(29 * scale)
self.down()
for i in range(5):
self.forward(18 * scale)
self.right(72)
self.pentl(18 * scale, 75, scale)
self.up()
self.goto(initpos)
self.setheading(oldh)
self.left(72)
self.getscreen().update()
def pentl(self, side, ang, scale):
if side < (2 * scale): return
self.forward(side)
self.left(ang)
self.pentl(side - (.38 * scale), ang, scale)
def pentr(self, side, ang, scale):
if side < (2 * scale): return
self.forward(side)
self.right(ang)
self.pentr(side - (.38 * scale), ang, scale)
def tripolyr(self, side, scale):
if side < (4 * scale): return
self.forward(side)
self.right(111)
self.forward(side / 1.78)
self.right(111)
self.forward(side / 1.3)
self.right(146)
self.tripolyr(side * .75, scale)
def tripolyl(self, side, scale):
if side < (4 * scale): return
self.forward(side)
self.left(111)
self.forward(side / 1.78)
self.left(111)
self.forward(side / 1.3)
self.left(146)
self.tripolyl(side * .75, scale)
def centerpiece(self, s, a, scale):
self.forward(s); self.left(a)
if s < (7.5 * scale):
return
self.centerpiece(s - (1.2 * scale), a, scale)
def main():
t = Designer()
t.speed(0)
t.hideturtle()
t.getscreen().delay(0)
t.getscreen().tracer(0)
at = clock()
t.design(t.position(), 2)
et = clock()
return "runtime: %.2f sec." % (et-at)
if __name__ == '__main__':
msg = main()
print(msg)
mainloop()
| lgpl-3.0 |
crosswalk-project/crosswalk-android-extensions | build/idl-generator/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/coverage/execfile.py | 67 | 4782 | """Execute files of Python code."""
import imp, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import NoSource, ExceptionDuringRun
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv and the first path element properly.
old_argv = sys.argv
old_path0 = sys.path[0]
sys.argv = args
sys.path[0] = os.path.abspath(os.path.dirname(filename))
try:
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
# Execute the source file.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
sys.path[0] = old_path0
| bsd-3-clause |
pioneers/topgear | ipython-in-depth/examples/Parallel Computing/interengine/bintree.py | 4 | 7171 | """
BinaryTree inter-engine communication class
use from bintree_script.py
Provides parallel [all]reduce functionality
"""
from __future__ import print_function
import cPickle as pickle
import re
import socket
import uuid
import zmq
from IPython.parallel.util import disambiguate_url
#----------------------------------------------------------------------------
# bintree-related construction/printing helpers
#----------------------------------------------------------------------------
def bintree(ids, parent=None):
"""construct {child:parent} dict representation of a binary tree
keys are the nodes in the tree, and values are the parent of each node.
The root node has parent `parent`, default: None.
>>> tree = bintree(range(7))
>>> tree
{0: None, 1: 0, 2: 1, 3: 1, 4: 0, 5: 4, 6: 4}
>>> print_bintree(tree)
0
1
2
3
4
5
6
"""
parents = {}
n = len(ids)
if n == 0:
return parents
root = ids[0]
parents[root] = parent
if len(ids) == 1:
return parents
else:
ids = ids[1:]
n = len(ids)
left = bintree(ids[:n/2], parent=root)
right = bintree(ids[n/2:], parent=root)
parents.update(left)
parents.update(right)
return parents
def reverse_bintree(parents):
"""construct {parent:[children]} dict from {child:parent}
keys are the nodes in the tree, and values are the lists of children
of that node in the tree.
reverse_tree[None] is the root node
>>> tree = bintree(range(7))
>>> reverse_bintree(tree)
{None: 0, 0: [1, 4], 4: [5, 6], 1: [2, 3]}
"""
children = {}
for child,parent in parents.iteritems():
if parent is None:
children[None] = child
continue
elif parent not in children:
children[parent] = []
children[parent].append(child)
return children
def depth(n, tree):
"""get depth of an element in the tree"""
d = 0
parent = tree[n]
while parent is not None:
d += 1
parent = tree[parent]
return d
def print_bintree(tree, indent=' '):
"""print a binary tree"""
for n in sorted(tree.keys()):
print("%s%s" % (indent * depth(n,tree), n))
#----------------------------------------------------------------------------
# Communicator class for a binary-tree map
#----------------------------------------------------------------------------
ip_pat = re.compile(r'^\d+\.\d+\.\d+\.\d+$')
def disambiguate_dns_url(url, location):
"""accept either IP address or dns name, and return IP"""
if not ip_pat.match(location):
location = socket.gethostbyname(location)
return disambiguate_url(url, location)
class BinaryTreeCommunicator(object):
id = None
pub = None
sub = None
downstream = None
upstream = None
pub_url = None
tree_url = None
def __init__(self, id, interface='tcp://*', root=False):
self.id = id
self.root = root
# create context and sockets
self._ctx = zmq.Context()
if root:
self.pub = self._ctx.socket(zmq.PUB)
else:
self.sub = self._ctx.socket(zmq.SUB)
self.sub.setsockopt(zmq.SUBSCRIBE, b'')
self.downstream = self._ctx.socket(zmq.PULL)
self.upstream = self._ctx.socket(zmq.PUSH)
# bind to ports
interface_f = interface + ":%i"
if self.root:
pub_port = self.pub.bind_to_random_port(interface)
self.pub_url = interface_f % pub_port
tree_port = self.downstream.bind_to_random_port(interface)
self.tree_url = interface_f % tree_port
self.downstream_poller = zmq.Poller()
self.downstream_poller.register(self.downstream, zmq.POLLIN)
# guess first public IP from socket
self.location = socket.gethostbyname_ex(socket.gethostname())[-1][0]
def __del__(self):
self.downstream.close()
self.upstream.close()
if self.root:
self.pub.close()
else:
self.sub.close()
self._ctx.term()
@property
def info(self):
"""return the connection info for this object's sockets."""
return (self.tree_url, self.location)
def connect(self, peers, btree, pub_url, root_id=0):
"""connect to peers. `peers` will be a dict of 4-tuples, keyed by name.
{peer : (ident, addr, pub_addr, location)}
where peer is the name, ident is the XREP identity, addr,pub_addr are the
"""
# count the number of children we have
self.nchildren = btree.values().count(self.id)
if self.root:
return # root only binds
root_location = peers[root_id][-1]
self.sub.connect(disambiguate_dns_url(pub_url, root_location))
parent = btree[self.id]
tree_url, location = peers[parent]
self.upstream.connect(disambiguate_dns_url(tree_url, location))
def serialize(self, obj):
"""serialize objects.
Must return list of sendable buffers.
Can be extended for more efficient/noncopying serialization of numpy arrays, etc.
"""
return [pickle.dumps(obj)]
def unserialize(self, msg):
"""inverse of serialize"""
return pickle.loads(msg[0])
def publish(self, value):
assert self.root
self.pub.send_multipart(self.serialize(value))
def consume(self):
assert not self.root
return self.unserialize(self.sub.recv_multipart())
def send_upstream(self, value, flags=0):
assert not self.root
self.upstream.send_multipart(self.serialize(value), flags=flags|zmq.NOBLOCK)
def recv_downstream(self, flags=0, timeout=2000.):
# wait for a message, so we won't block if there was a bug
self.downstream_poller.poll(timeout)
msg = self.downstream.recv_multipart(zmq.NOBLOCK|flags)
return self.unserialize(msg)
def reduce(self, f, value, flat=True, all=False):
"""parallel reduce on binary tree
if flat:
value is an entry in the sequence
else:
value is a list of entries in the sequence
if all:
broadcast final result to all nodes
else:
only root gets final result
"""
if not flat:
value = reduce(f, value)
for i in range(self.nchildren):
value = f(value, self.recv_downstream())
if not self.root:
self.send_upstream(value)
if all:
if self.root:
self.publish(value)
else:
value = self.consume()
return value
def allreduce(self, f, value, flat=True):
"""parallel reduce followed by broadcast of the result"""
return self.reduce(f, value, flat=flat, all=True)
| apache-2.0 |
DreamLab/contrail-controller | src/nodemgr/common/event_manager.py | 7 | 20742 | #
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
import gevent
import json
import ConfigParser
from StringIO import StringIO
from ConfigParser import NoOptionError, NoSectionError
import sys
import os
import socket
import time
import subprocess
from subprocess import Popen, PIPE
import supervisor.xmlrpc
import xmlrpclib
from supervisor import childutils
from nodemgr.common.event_listener_protocol_nodemgr import \
EventListenerProtocolNodeMgr
from nodemgr.common.process_stat import ProcessStat
from sandesh_common.vns.constants import INSTANCE_ID_DEFAULT
import discoveryclient.client as client
class EventManager(object):
rules_data = []
group_names = []
process_state_db = {}
FAIL_STATUS_DUMMY = 0x1
FAIL_STATUS_DISK_SPACE = 0x2
FAIL_STATUS_SERVER_PORT = 0x4
FAIL_STATUS_NTP_SYNC = 0x8
FAIL_STATUS_DISK_SPACE_NA = 0x10
def __init__(self, rule_file, discovery_server,
discovery_port, collector_addr):
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.rule_file = rule_file
self.rules_data = ''
self.max_cores = 4
self.max_old_cores = 3
self.max_new_cores = 1
self.all_core_file_list = []
self.core_dir_modified_time = 0
self.tick_count = 0
self.fail_status_bits = 0
self.prev_fail_status_bits = 1
self.instance_id = INSTANCE_ID_DEFAULT
self.discovery_server = discovery_server
self.discovery_port = discovery_port
self.collector_addr = collector_addr
self.listener_nodemgr = EventListenerProtocolNodeMgr()
self.sandesh_global = None
# Get all the current processes in the node
def get_current_process(self):
proxy = xmlrpclib.ServerProxy(
'http://127.0.0.1',
transport=supervisor.xmlrpc.SupervisorTransport(
None, None, serverurl=self.supervisor_serverurl))
# Add all current processes to make sure nothing misses the radar
process_state_db = {}
for proc_info in proxy.supervisor.getAllProcessInfo():
if (proc_info['name'] != proc_info['group']):
proc_name = proc_info['group'] + ":" + proc_info['name']
else:
proc_name = proc_info['name']
process_stat_ent = self.get_process_stat_object(proc_name)
process_stat_ent.process_state = "PROCESS_STATE_" + \
proc_info['statename']
if (process_stat_ent.process_state ==
'PROCESS_STATE_RUNNING'):
process_stat_ent.start_time = str(proc_info['start'] * 1000000)
process_stat_ent.start_count += 1
process_state_db[proc_name] = process_stat_ent
return process_state_db
# end get_current_process
# Add the current processes in the node to db
def add_current_process(self):
self.process_state_db = self.get_current_process()
# end add_current_process
# In case the processes in the Node can change, update current processes
def update_current_process(self):
process_state_db = self.get_current_process()
old_process_set = set(self.process_state_db.keys())
new_process_set = set(process_state_db.keys())
common_process_set = new_process_set.intersection(old_process_set)
added_process_set = new_process_set - common_process_set
deleted_process_set = old_process_set - common_process_set
for deleted_process in deleted_process_set:
self.delete_process_handler(deleted_process)
for added_process in added_process_set:
self.add_process_handler(
added_process, process_state_db[added_process])
# end update_current_process
# process is deleted, send state & remove it from db
def delete_process_handler(self, deleted_process):
self.process_state_db[deleted_process].deleted = True
group_val = self.process_state_db[deleted_process].group
self.send_process_state_db([group_val])
del self.process_state_db[deleted_process]
# end delete_process_handler
# new process added, update db & send state
def add_process_handler(self, added_process, process_info):
self.process_state_db[added_process] = process_info
group_val = self.process_state_db[added_process].group
self.send_process_state_db([group_val])
# end add_process_handler
def get_discovery_client(self):
_disc = client.DiscoveryClient(
self.discovery_server, self.discovery_port, self.module_id)
return _disc
def check_ntp_status(self):
ntp_status_cmd = 'ntpq -n -c pe | grep "^*"'
proc = Popen(ntp_status_cmd, shell=True, stdout=PIPE, stderr=PIPE)
(output, errout) = proc.communicate()
if proc.returncode != 0:
self.fail_status_bits |= self.FAIL_STATUS_NTP_SYNC
else:
self.fail_status_bits &= ~self.FAIL_STATUS_NTP_SYNC
self.send_nodemgr_process_status()
def send_process_state_db_base(self, group_names, ProcessInfo,
NodeStatus, NodeStatusUVE):
name = socket.gethostname()
for group in group_names:
process_infos = []
delete_status = True
for key in self.process_state_db:
pstat = self.process_state_db[key]
if (pstat.group != group):
continue
process_info = ProcessInfo()
process_info.process_name = key
process_info.process_state = pstat.process_state
process_info.start_count = pstat.start_count
process_info.stop_count = pstat.stop_count
process_info.exit_count = pstat.exit_count
process_info.last_start_time = pstat.start_time
process_info.last_stop_time = pstat.stop_time
process_info.last_exit_time = pstat.exit_time
process_info.core_file_list = pstat.core_file_list
process_infos.append(process_info)
name = pstat.name
if pstat.deleted == False:
delete_status = False
if not process_infos:
continue
# send node UVE
node_status = NodeStatus()
node_status.name = name
node_status.deleted = delete_status
node_status.process_info = process_infos
node_status.all_core_file_list = self.all_core_file_list
node_status_uve = NodeStatusUVE(data=node_status)
sys.stderr.write('Sending UVE:' + str(node_status_uve))
node_status_uve.send()
def send_all_core_file(self):
stat_command_option = "stat --printf=%Y /var/crashes"
modified_time = Popen(
stat_command_option.split(),
stdout=PIPE).communicate()
if modified_time[0] == self.core_dir_modified_time:
return
self.core_dir_modified_time = modified_time[0]
ls_command_option = "ls /var/crashes"
(corename, stderr) = Popen(
ls_command_option.split(),
stdout=PIPE).communicate()
self.all_core_file_list = corename.split('\n')[0:-1]
self.send_process_state_db(self.group_names)
def get_process_stat_object(self, pname):
return ProcessStat(pname)
def send_process_state(self, pname, pstate, pheaders):
# update process stats
if pname in self.process_state_db.keys():
proc_stat = self.process_state_db[pname]
else:
proc_stat = self.get_process_stat_object(pname)
if not proc_stat.group in self.group_names:
self.group_names.append(proc_stat.group)
proc_stat.process_state = pstate
send_uve = False
if (pstate == 'PROCESS_STATE_RUNNING'):
proc_stat.start_count += 1
proc_stat.start_time = str(int(time.time() * 1000000))
send_uve = True
if (pstate == 'PROCESS_STATE_STOPPED'):
proc_stat.stop_count += 1
send_uve = True
proc_stat.stop_time = str(int(time.time() * 1000000))
proc_stat.last_exit_unexpected = False
if (pstate == 'PROCESS_STATE_EXITED'):
proc_stat.exit_count += 1
send_uve = True
proc_stat.exit_time = str(int(time.time() * 1000000))
if not(int(pheaders['expected'])):
self.stderr.write(
pname + " with pid:" + pheaders['pid'] +
" exited abnormally\n")
proc_stat.last_exit_unexpected = True
# check for core file for this exit
find_command_option = \
"find /var/crashes -name core.[A-Za-z]*." + \
pheaders['pid'] + "*"
self.stderr.write(
"find command option for cores:" +
find_command_option + "\n")
(corename, stderr) = Popen(
find_command_option.split(),
stdout=PIPE).communicate()
self.stderr.write("core file: " + corename + "\n")
if ((corename is not None) and (len(corename.rstrip()) >= 1)):
# before adding to the core file list make
# sure that we do not have too many cores
sys.stderr.write(
'core_file_list:' + str(proc_stat.core_file_list) +
", self.max_cores:" + str(self.max_cores) + "\n")
if (len(proc_stat.core_file_list) == self.max_cores):
# get rid of old cores
sys.stderr.write(
'max # of cores reached:' +
str(self.max_cores) + "\n")
val = self.max_cores - self.max_new_cores + 1
core_files_to_be_deleted = \
proc_stat.core_file_list[self.max_old_cores:(val)]
sys.stderr.write(
'deleting core file list:' +
str(core_files_to_be_deleted) + "\n")
for core_file in core_files_to_be_deleted:
sys.stderr.write(
'deleting core file:' + core_file + "\n")
try:
os.remove(core_file)
except OSError as e:
sys.stderr.write('ERROR: ' + str(e) + '\n')
# now delete the list as well
val = self.max_cores - self.max_new_cores + 1
del proc_stat.core_file_list[self.max_old_cores:(val)]
# now add the new core to the core file list
proc_stat.core_file_list.append(corename.rstrip())
sys.stderr.write(
"# of cores for " + pname + ":" +
str(len(proc_stat.core_file_list)) + "\n")
# update process state database
self.process_state_db[pname] = proc_stat
f = open('/var/log/contrail/process_state' +
self.node_type + ".json", 'w')
f.write(json.dumps(
self.process_state_db,
default=lambda obj: obj.__dict__))
if not(send_uve):
return
if (send_uve):
self.send_process_state_db([proc_stat.group])
def send_nodemgr_process_status_base(self, ProcessStateNames,
ProcessState, ProcessStatus,
NodeStatus, NodeStatusUVE):
if (self.prev_fail_status_bits != self.fail_status_bits):
self.prev_fail_status_bits = self.fail_status_bits
fail_status_bits = self.fail_status_bits
state, description = self.get_process_state(fail_status_bits)
process_status = ProcessStatus(
module_id=self.module_id, instance_id=self.instance_id,
state=state, description=description)
process_status_list = []
process_status_list.append(process_status)
node_status = NodeStatus(
name=socket.gethostname(),
process_status=process_status_list)
node_status_uve = NodeStatusUVE(data=node_status)
sys.stderr.write('Sending UVE:' + str(node_status_uve))
node_status_uve.send()
def send_disk_usage_info_base(self, NodeStatusUVE, NodeStatus,
DiskPartitionUsageStats):
partition = subprocess.Popen(
"df -T -t ext2 -t ext3 -t ext4 -t xfs",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
disk_usage_infos = []
for line in partition.stdout:
if 'Filesystem' in line:
continue
partition_name = line.rsplit()[0]
partition_type = line.rsplit()[1]
partition_space_used_1k = line.rsplit()[3]
partition_space_available_1k = line.rsplit()[4]
disk_usage_stat = DiskPartitionUsageStats()
try:
disk_usage_stat.partition_type = str(partition_type)
disk_usage_stat.partition_name = str(partition_name)
disk_usage_stat.partition_space_used_1k = \
int(partition_space_used_1k)
disk_usage_stat.partition_space_available_1k = \
int(partition_space_available_1k)
except ValueError:
sys.stderr.write("Failed to get local disk space usage" + "\n")
else:
disk_usage_infos.append(disk_usage_stat)
# send node UVE
node_status = NodeStatus(
name=socket.gethostname(), disk_usage_info=disk_usage_infos)
node_status_uve = NodeStatusUVE(data=node_status)
sys.stderr.write('Sending UVE:' + str(node_status_uve))
node_status_uve.send()
# end send_disk_usage_info
def get_process_state_base(self, fail_status_bits,
ProcessStateNames, ProcessState):
if fail_status_bits:
state = ProcessStateNames[ProcessState.NON_FUNCTIONAL]
description = self.get_failbits_nodespecific_desc(fail_status_bits)
if (description is ""):
if fail_status_bits & self.FAIL_STATUS_NTP_SYNC:
if description != "":
description += " "
description += "NTP state unsynchronized."
else:
state = ProcessStateNames[ProcessState.FUNCTIONAL]
description = ''
return state, description
def get_failbits_nodespecific_desc(self, fail_status_bits):
return ""
def event_process_state(self, pheaders, headers):
self.stderr.write("process:" + pheaders['processname'] + "," +
"groupname:" + pheaders['groupname'] + "," +
"eventname:" + headers['eventname'] + '\n')
pname = pheaders['processname']
if (pheaders['processname'] != pheaders['groupname']):
pname = pheaders['groupname'] + ":" + pheaders['processname']
self.send_process_state(pname, headers['eventname'], pheaders)
for rules in self.rules_data['Rules']:
if 'processname' in rules:
if ((rules['processname'] == pheaders['groupname']) and
(rules['process_state'] == headers['eventname'])):
self.stderr.write("got a hit with:" + str(rules) + '\n')
# do not make async calls
try:
ret_code = subprocess.call(
[rules['action']], shell=True,
stdout=self.stderr, stderr=self.stderr)
except Exception as e:
self.stderr.write(
'Failed to execute action: ' +
rules['action'] + ' with err ' + str(e) + '\n')
else:
if ret_code:
self.stderr.write(
'Execution of action ' +
rules['action'] + ' returned err ' +
str(ret_code) + '\n')
def event_process_communication(self, pdata):
flag_and_value = pdata.partition(":")
self.stderr.write("Flag:" + flag_and_value[0] +
" Value:" + flag_and_value[2] + "\n")
for rules in self.rules_data['Rules']:
if 'flag_name' in rules:
if ((rules['flag_name'] == flag_and_value[0]) and
(rules['flag_value'].strip() == flag_and_value[2].strip())):
self.stderr.write("got a hit with:" + str(rules) + '\n')
cmd_and_args = ['/usr/bin/bash', '-c', rules['action']]
subprocess.Popen(cmd_and_args)
def event_tick_60(self, prev_current_time):
self.tick_count += 1
# send other core file
self.send_all_core_file()
# send disk usage info periodically
self.send_disk_usage_info()
# typical ntp sync time is about 5 min - first time,
# we scan only after 10 min
if self.tick_count >= 10:
self.check_ntp_status()
current_time = int(time.time())
if ((abs(current_time - prev_current_time)) > 300):
# update all process start_times with the updated time
# Compute the elapsed time and subtract them from
# current time to get updated values
sys.stderr.write(
"Time lapse detected " +
str(abs(current_time - prev_current_time)) + "\n")
for key in self.process_state_db:
pstat = self.process_state_db[key]
if pstat.start_time is not '':
pstat.start_time = str(
(int(current_time - (prev_current_time -
((int)(pstat.start_time)) / 1000000))) * 1000000)
if (pstat.process_state == 'PROCESS_STATE_STOPPED'):
if pstat.stop_time is not '':
pstat.stop_time = str(
int(current_time - (prev_current_time -
((int)(pstat.stop_time)) / 1000000)) *
1000000)
if (pstat.process_state == 'PROCESS_STATE_EXITED'):
if pstat.exit_time is not '':
pstat.exit_time = str(
int(current_time - (prev_current_time -
((int)(pstat.exit_time)) / 1000000)) *
1000000)
# update process state database
self.process_state_db[key] = pstat
try:
json_file = '/var/log/contrail/process_state' + \
self.node_type + ".json"
f = open(json_file, 'w')
f.write(
json.dumps(
self.process_state_db,
default=lambda obj: obj.__dict__))
except:
sys.stderr.write("Unable to write json")
pass
self.send_process_state_db(self.group_names)
prev_current_time = int(time.time())
return prev_current_time
def runforever(self, test=False):
prev_current_time = int(time.time())
while 1:
# we explicitly use self.stdin, self.stdout, and self.stderr
# instead of sys.* so we can unit test this code
headers, payload = self.listener_nodemgr.wait(
self.stdin, self.stdout)
pheaders, pdata = childutils.eventdata(payload + '\n')
# check for process state change events
if headers['eventname'].startswith("PROCESS_STATE"):
self.event_process_state(pheaders, headers)
# check for flag value change events
if headers['eventname'].startswith("PROCESS_COMMUNICATION"):
self.event_process_communication(pdata)
# do periodic events
if headers['eventname'].startswith("TICK_60"):
prev_current_time = self.event_tick_60(prev_current_time)
self.listener_nodemgr.ok(self.stdout)
| apache-2.0 |
stefanseefeld/numba | numba/cuda/tests/cudapy/test_nondet.py | 6 | 1467 | from __future__ import print_function, absolute_import
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
def generate_input(n):
A = np.array(np.arange(n * n).reshape(n, n), dtype=np.float32)
B = np.array(np.arange(n) + 0, dtype=A.dtype)
return A, B
class TestCudaNonDet(unittest.TestCase):
def test_for_pre(self):
"""Test issue with loop not running due to bad sign-extension at the for loop
precondition.
"""
@cuda.jit(argtypes=[float32[:, :], float32[:, :], float32[:]])
def diagproduct(c, a, b):
startX, startY = cuda.grid(2)
gridX = cuda.gridDim.x * cuda.blockDim.x
gridY = cuda.gridDim.y * cuda.blockDim.y
height = c.shape[0]
width = c.shape[1]
for x in range(startX, width, (gridX)):
for y in range(startY, height, (gridY)):
c[y, x] = a[y, x] * b[x]
N = 8
A, B = generate_input(N)
E = np.zeros(A.shape, dtype=A.dtype)
F = np.empty(A.shape, dtype=A.dtype)
E = np.dot(A, np.diag(B))
blockdim = (32, 8)
griddim = (1, 1)
dA = cuda.to_device(A)
dB = cuda.to_device(B)
dF = cuda.to_device(F, copy=False)
diagproduct[griddim, blockdim](dF, dA, dB)
dF.to_host()
self.assertTrue(np.allclose(F, E))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
GeoscienceAustralia/eo-datasets | tests/__init__.py | 2 | 5206 | # coding=utf-8
from __future__ import absolute_import
import atexit
import os
import pathlib
import shutil
import sys
import tempfile
from pathlib import Path
def assert_same(o1, o2, prefix=""):
"""
Assert the two are equal.
Compares property values one-by-one recursively to print friendly error messages.
(ie. the exact property that differs)
:type o1: object
:type o2: object
:raises: AssertionError
"""
__tracebackhide__ = True
def _compare(k, val1, val2):
assert_same(val1, val2, prefix=prefix + "." + str(k))
if isinstance(o1, list) and isinstance(o2, list):
assert len(o1) == len(o2), "Differing lengths: %s" % prefix
for i, val in enumerate(o1):
_compare(i, val, o2[i])
elif isinstance(o1, dict) and isinstance(o2, dict):
for k, val in o1.items():
assert k in o2, "%s[%r] is missing.\n\t%r\n\t%r" % (prefix, k, o1, o2)
for k, val in o2.items():
assert k in o1, "%s[%r] is missing.\n\t%r\n\t%r" % (prefix, k, o2, o1)
_compare(k, val, o1[k])
elif o1 != o2:
sys.stderr.write("%r\n" % o1)
sys.stderr.write("%r\n" % o2)
raise AssertionError("Mismatch for property %r: %r != %r" % (prefix, o1, o2))
def assert_file_structure(folder, expected_structure, root=""):
"""
Assert that the contents of a folder (filenames and subfolder names recursively)
match the given nested dictionary structure.
:type folder: pathlib.Path
:type expected_structure: dict[str,str|dict]
"""
__tracebackhide__ = True
required_filenames = set(
name for name, option in expected_structure.items() if option != "optional"
)
optional_filenames = set(
name for name, option in expected_structure.items() if option == "optional"
)
actual_filenames = {f.name for f in folder.iterdir()}
if required_filenames != (actual_filenames - optional_filenames):
missing_files = required_filenames - actual_filenames
missing_text = "Missing: %r" % (sorted(list(missing_files)))
extra_files = actual_filenames - required_filenames
added_text = "Extra : %r" % (sorted(list(extra_files)))
raise AssertionError(
"Folder mismatch of %r\n\t%s\n\t%s" % (root, missing_text, added_text)
)
for k, v in expected_structure.items():
id_ = "%s/%s" % (root, k) if root else k
is_optional = v == "optional"
f = folder.joinpath(k)
if not f.exists():
if is_optional:
continue
assert False, "%s is missing" % (id_,)
elif isinstance(v, dict):
assert f.is_dir(), "%s is not a dir" % (id_,)
assert_file_structure(f, v, id_)
elif isinstance(v, str):
assert f.is_file(), "%s is not a file" % (id_,)
else:
assert (
False
), "Only strings and dicts expected when defining a folder structure."
def write_files(file_dict):
"""
Convenience method for writing a bunch of files to a temporary directory.
Dict format is "filename": "text content"
If content is another dict, it is created recursively in the same manner.
writeFiles({'test.txt': 'contents of text file'})
:type file_dict: dict
:rtype: pathlib.Path
:return: Created temporary directory path
"""
containing_dir = tempfile.mkdtemp(suffix="neotestrun")
_write_files_to_dir(containing_dir, file_dict)
def remove_if_exists(path):
if os.path.exists(path):
shutil.rmtree(path)
atexit.register(remove_if_exists, containing_dir)
return pathlib.Path(containing_dir)
def _write_files_to_dir(directory_path, file_dict):
"""
Convenience method for writing a bunch of files to a given directory.
:type directory_path: str
:type file_dict: dict
"""
for filename, contents in file_dict.items():
path = os.path.join(directory_path, filename)
if isinstance(contents, dict):
os.mkdir(path)
_write_files_to_dir(path, contents)
else:
with open(path, "w") as f:
if isinstance(contents, list):
f.writelines(contents)
elif isinstance(contents, str):
f.write(contents)
else:
raise Exception("Unexpected file contents: %s" % type(contents))
def temp_dir():
"""
Create and return a temporary directory that will be deleted automatically on exit.
:rtype: pathlib.Path
"""
return write_files({})
def file_of_size(path, size_mb):
"""
Create a blank file of the given size.
"""
with open(path, "wb") as f:
f.seek(size_mb * 1024 * 1024 - 1)
f.write(b"\0")
def as_file_list(path):
"""
Build a flat list of filenames relative to the given folder
(similar to the contents of package.sha1 files)
"""
output = []
for directory, _, files in os.walk(str(path)):
output.extend(
str(Path(directory).relative_to(path).joinpath(file_)) for file_ in files
)
return output
| apache-2.0 |
castroflavio/ryu | ryu/exception.py | 52 | 1824 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RyuException(Exception):
message = 'An unknown exception'
def __init__(self, msg=None, **kwargs):
self.kwargs = kwargs
if msg is None:
msg = self.message
try:
msg = msg % kwargs
except Exception:
msg = self.message
super(RyuException, self).__init__(msg)
class OFPUnknownVersion(RyuException):
message = 'unknown version %(version)x'
class OFPMalformedMessage(RyuException):
message = 'malformed message'
class NetworkNotFound(RyuException):
message = 'no such network id %(network_id)s'
class NetworkAlreadyExist(RyuException):
message = 'network id %(network_id)s already exists'
class PortNotFound(RyuException):
message = 'no such port (%(dpid)s, %(port)s) in network %(network_id)s'
class PortAlreadyExist(RyuException):
message = 'port (%(dpid)s, %(port)s) in network %(network_id)s ' \
'already exists'
class PortUnknown(RyuException):
message = 'unknown network id for port (%(dpid)s %(port)s)'
class MacAddressDuplicated(RyuException):
message = 'MAC address %(mac)s is duplicated'
| apache-2.0 |
Nexenta/cinder | cinder/interface/volume_management_driver.py | 6 | 3988 | # Copyright 2016 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Manage/unmanage existing volume driver interface.
"""
from cinder.interface import base
class VolumeManagementDriver(base.CinderInterface):
"""Interface for drivers that support managing existing volumes."""
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Dictionary with keys 'source-id', 'source-name'
with driver-specific values to identify a backend
storage object.
:raises: ManageExistingInvalidReference If the existing_ref doesn't
make sense, or doesn't refer to an existing backend storage
object.
:raises: ManageExistingVolumeTypeMismatch If there is a mismatch
between the volume type and the properties of the existing
backend storage object.
"""
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Dictionary with keys 'source-id', 'source-name'
with driver-specific values to identify a backend
storage object.
:raises: ManageExistingInvalidReference If the existing_ref doesn't
make sense, or doesn't refer to an existing backend storage
object.
"""
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
| apache-2.0 |
marckuz/django | django/forms/utils.py | 241 | 6131 | from __future__ import unicode_literals
import json
import sys
from django.conf import settings
from django.core.exceptions import ValidationError # backwards compatibility
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import escape, format_html, format_html_join, html_safe
from django.utils.translation import ugettext_lazy as _
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. In the case of a boolean value, the key will appear
without a value. It is assumed that the keys do not need to be
XML-escaped. If the passed dictionary is empty, then return an empty
string.
The result is passed through 'mark_safe' (by way of 'format_html_join').
"""
key_value_attrs = []
boolean_attrs = []
for attr, value in attrs.items():
if isinstance(value, bool):
if value:
boolean_attrs.append((attr,))
else:
key_value_attrs.append((attr, value))
return (
format_html_join('', ' {}="{}"', sorted(key_value_attrs)) +
format_html_join('', ' {}', sorted(boolean_attrs))
)
@html_safe
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{}</ul>',
format_html_join('', '<li>{}{}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@html_safe
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def __init__(self, initlist=None, error_class=None):
super(ErrorList, self).__init__(initlist)
if error_class is None:
self.error_class = 'errorlist'
else:
self.error_class = 'errorlist {}'.format(error_class)
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="{}">{}</ul>',
self.error_class,
format_html_join('', '<li>{}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def __reduce_ex__(self, *args, **kwargs):
# The `list` reduce function returns an iterator as the fourth element
# that is normally used for repopulating. Since we only inherit from
# `list` for `isinstance` backward compatibility (Refs #17413) we
# nullify this iterator as it would otherwise result in duplicate
# entries. (Refs #23594)
info = super(UserList, self).__reduce_ex__(*args, **kwargs)
return info[:3] + (None, None)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive datetimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| bsd-3-clause |
Peerapps/PeerChat | bitcoin/core/__init__.py | 3 | 25353 | # Copyright (C) 2012-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from __future__ import absolute_import, division, print_function
import binascii
import hashlib
import socket
import struct
import sys
import time
from .script import CScript
from .serialize import *
# Core definitions
COIN = 1000000
CENT = 10000
MAX_MONEY = 2000000000 * COIN
MAX_BLOCK_SIZE = 1000000
MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50
def MoneyRange(nValue):
return 0 <= nValue <= MAX_MONEY
def _py2_x(h):
"""Convert a hex string to bytes"""
return binascii.unhexlify(h)
def x(h):
"""Convert a hex string to bytes"""
return binascii.unhexlify(h.encode('utf8'))
def _py2_b2x(b):
"""Convert bytes to a hex string"""
return binascii.hexlify(b)
def b2x(b):
"""Convert bytes to a hex string"""
return binascii.hexlify(b).decode('utf8')
def _py2_lx(h):
"""Convert a little-endian hex string to bytes
Lets you write uint256's and uint160's the way the Satoshi codebase shows
them.
"""
return binascii.unhexlify(h)[::-1]
def lx(h):
"""Convert a little-endian hex string to bytes
Lets you write uint256's and uint160's the way the Satoshi codebase shows
them.
"""
return binascii.unhexlify(h.encode('utf8'))[::-1]
def _py2_b2lx(b):
"""Convert bytes to a little-endian hex string
Lets you show uint256's and uint160's the way the Satoshi codebase shows
them.
"""
return binascii.hexlify(b[::-1])
def b2lx(b):
"""Convert bytes to a little-endian hex string
Lets you show uint256's and uint160's the way the Satoshi codebase shows
them.
"""
return binascii.hexlify(b[::-1]).decode('utf8')
if not (sys.version > '3'):
x = _py2_x
b2x = _py2_b2x
lx = _py2_lx
b2lx = _py2_b2lx
del _py2_x
del _py2_b2x
del _py2_lx
del _py2_b2lx
def str_money_value(value):
"""Convert an integer money value to a fixed point string"""
r = '%i.%08i' % (value // COIN, value % COIN)
r = r.rstrip('0')
if r[-1] == '.':
r += '0'
return r
class ValidationError(Exception):
"""Base class for all blockchain validation errors
Everything that is related to validating the blockchain, blocks,
transactions, scripts, etc. is derived from this class.
"""
def __make_mutable(cls):
# For speed we use a class decorator that removes the immutable
# restrictions directly. In addition the modified behavior of GetHash() and
# hash() is undone.
cls.__setattr__ = object.__setattr__
cls.__delattr__ = object.__delattr__
cls.GetHash = Serializable.GetHash
cls.__hash__ = Serializable.__hash__
return cls
class COutPoint(ImmutableSerializable):
"""The combination of a transaction hash and an index n into its vout"""
__slots__ = ['hash', 'n']
def __init__(self, hash=b'\x00'*32, n=0xffffffff):
if not len(hash) == 32:
raise ValueError('COutPoint: hash must be exactly 32 bytes; got %d bytes' % len(hash))
object.__setattr__(self, 'hash', hash)
if not (0 <= n <= 0xffffffff):
raise ValueError('COutPoint: n must be in range 0x0 to 0xffffffff; got %x' % n)
object.__setattr__(self, 'n', n)
@classmethod
def stream_deserialize(cls, f):
hash = ser_read(f,32)
n = struct.unpack(b"<I", ser_read(f,4))[0]
return cls(hash, n)
def stream_serialize(self, f):
assert len(self.hash) == 32
f.write(self.hash)
f.write(struct.pack(b"<I", self.n))
def is_null(self):
return ((self.hash == b'\x00'*32) and (self.n == 0xffffffff))
def __repr__(self):
if self.is_null():
return 'COutPoint()'
else:
return 'COutPoint(lx(%r), %i)' % (b2lx(self.hash), self.n)
@classmethod
def from_outpoint(cls, outpoint):
"""Create an immutable copy of an existing OutPoint
If output is already immutable (outpoint.__class__ is COutPoint) it is
returned directly.
"""
if output.__class__ is COutPoint:
return output
else:
return cls(outpoint.hash, outpoint.n)
@__make_mutable
class CMutableOutPoint(COutPoint):
"""A mutable COutPoint"""
__slots__ = []
@classmethod
def from_outpoint(cls, outpoint):
"""Create a mutable copy of an existing COutPoint"""
return cls(outpoint.hash, outpoint.n)
class CTxIn(ImmutableSerializable):
"""An input of a transaction
Contains the location of the previous transaction's output that it claims,
and a signature that matches the output's public key.
"""
__slots__ = ['prevout', 'scriptSig', 'nSequence']
def __init__(self, prevout=COutPoint(), scriptSig=CScript(), nSequence = 0xffffffff):
if not (0 <= nSequence <= 0xffffffff):
raise ValueError('CTxIn: nSequence must be an integer between 0x0 and 0xffffffff; got %x' % nSequence)
object.__setattr__(self, 'nSequence', nSequence)
object.__setattr__(self, 'prevout', prevout)
object.__setattr__(self, 'scriptSig', scriptSig)
@classmethod
def stream_deserialize(cls, f):
prevout = COutPoint.stream_deserialize(f)
scriptSig = script.CScript(BytesSerializer.stream_deserialize(f))
nSequence = struct.unpack(b"<I", ser_read(f,4))[0]
return cls(prevout, scriptSig, nSequence)
def stream_serialize(self, f):
COutPoint.stream_serialize(self.prevout, f)
BytesSerializer.stream_serialize(self.scriptSig, f)
f.write(struct.pack(b"<I", self.nSequence))
def is_final(self):
return (self.nSequence == 0xffffffff)
def __repr__(self):
return "CTxIn(%s, %s, 0x%x)" % (repr(self.prevout), repr(self.scriptSig), self.nSequence)
@classmethod
def from_txin(cls, txin):
"""Create an immutable copy of an existing TxIn
If txin is already immutable (txin.__class__ is CTxIn) it is returned
directly.
"""
if txin.__class__ is CTxIn:
return txin
else:
return cls(COutPoint.from_txout(txin.prevout), txin.scriptSig, txin.nSequence)
@__make_mutable
class CMutableTxIn(CTxIn):
"""A mutable CTxIn"""
__slots__ = []
def __init__(self, prevout=None, scriptSig=CScript(), nSequence = 0xffffffff):
if not (0 <= nSequence <= 0xffffffff):
raise ValueError('CTxIn: nSequence must be an integer between 0x0 and 0xffffffff; got %x' % nSequence)
self.nSequence = nSequence
if prevout is None:
prevout = CMutableOutPoint()
self.prevout = prevout
self.scriptSig = scriptSig
@classmethod
def from_txin(cls, txin):
"""Create a fully mutable copy of an existing TxIn"""
prevout = CMutableOutPoint.from_outpoint(txin.prevout)
return cls(prevout, txin.scriptSig, txin.nSequence)
class CTxOut(ImmutableSerializable):
"""An output of a transaction
Contains the public key that the next input must be able to sign with to
claim it.
"""
__slots__ = ['nValue', 'scriptPubKey']
def __init__(self, nValue=-1, scriptPubKey=script.CScript()):
object.__setattr__(self, 'nValue', int(nValue))
object.__setattr__(self, 'scriptPubKey', scriptPubKey)
@classmethod
def stream_deserialize(cls, f):
nValue = struct.unpack(b"<q", ser_read(f,8))[0]
scriptPubKey = script.CScript(BytesSerializer.stream_deserialize(f))
return cls(nValue, scriptPubKey)
def stream_serialize(self, f):
f.write(struct.pack(b"<q", self.nValue))
BytesSerializer.stream_serialize(self.scriptPubKey, f)
def is_valid(self):
if not MoneyRange(self.nValue):
return False
if not self.scriptPubKey.is_valid():
return False
return True
def __repr__(self):
if self.nValue >= 0:
return "CTxOut(%s*COIN, %r)" % (str_money_value(self.nValue), self.scriptPubKey)
else:
return "CTxOut(%d, %r)" % (self.nValue, self.scriptPubKey)
@classmethod
def from_txout(cls, txout):
"""Create an immutable copy of an existing TxOut
If txout is already immutable (txout.__class__ is CTxOut) then it will
be returned directly.
"""
if txout.__class__ is CTxOut:
return txout
else:
return cls(txout.nValue, txout.scriptPubKey)
@__make_mutable
class CMutableTxOut(CTxOut):
"""A mutable CTxOut"""
__slots__ = []
@classmethod
def from_txout(cls, txout):
"""Create a fullly mutable copy of an existing TxOut"""
return cls(txout.nValue, txout.scriptPubKey)
class CTransaction(ImmutableSerializable):
"""A transaction"""
__slots__ = ['nVersion', 'vin', 'vout', 'nLockTime', 'nTime']
def __init__(self, vin=(), vout=(), nLockTime=0, nVersion=1, nTime=0):
"""Create a new transaction
vin and vout are iterables of transaction inputs and outputs
respectively. If their contents are not already immutable, immutable
copies will be made.
"""
if not (0 <= nLockTime <= 0xffffffff):
raise ValueError('CTransaction: nLockTime must be in range 0x0 to 0xffffffff; got %x' % nLockTime)
object.__setattr__(self, 'nLockTime', nLockTime)
object.__setattr__(self, 'nTime', int(time.time()))
object.__setattr__(self, 'nVersion', nVersion)
object.__setattr__(self, 'vin', tuple(CTxIn.from_txin(txin) for txin in vin))
object.__setattr__(self, 'vout', tuple(CTxOut.from_txout(txout) for txout in vout))
@classmethod
def stream_deserialize(cls, f):
nVersion = struct.unpack(b"<i", ser_read(f,4))[0]
nTime = struct.unpack(b"<I", ser_read(f,4))[0]
vin = VectorSerializer.stream_deserialize(CTxIn, f)
vout = VectorSerializer.stream_deserialize(CTxOut, f)
nLockTime = struct.unpack(b"<I", ser_read(f,4))[0]
return cls(vin, vout, nLockTime, nTime, nVersion)
def stream_serialize(self, f):
f.write(struct.pack(b"<i", self.nVersion))
f.write(struct.pack(b"<i", self.nTime))
VectorSerializer.stream_serialize(CTxIn, self.vin, f)
VectorSerializer.stream_serialize(CTxOut, self.vout, f)
f.write(struct.pack(b"<I", self.nLockTime))
def is_coinbase(self):
return len(self.vin) == 1 and self.vin[0].prevout.is_null()
def __repr__(self):
return "CTransaction(%r, %r, %i, %i, %i)" % (self.vin, self.vout, self.nTime, self.nLockTime, self.nVersion)
@classmethod
def from_tx(cls, tx):
"""Create an immutable copy of a pre-existing transaction
If tx is already immutable (tx.__class__ is CTransaction) then it will
be returned directly.
"""
if tx.__class__ is CTransaction:
return tx
else:
return cls(tx.vin, tx.vout, tx.nLockTime, tx.nVersion)
@__make_mutable
class CMutableTransaction(CTransaction):
"""A mutable transaction"""
__slots__ = []
def __init__(self, vin=None, vout=None, nLockTime=0, nVersion=1):
if not (0 <= nLockTime <= 0xffffffff):
raise ValueError('CTransaction: nLockTime must be in range 0x0 to 0xffffffff; got %x' % nLockTime)
self.nLockTime = nLockTime
if vin is None:
vin = []
self.vin = vin
if vout is None:
vout = []
self.vout = vout
self.nVersion = nVersion
self.nTime = int(time.time())
@classmethod
def from_tx(cls, tx):
"""Create a fully mutable copy of a pre-existing transaction"""
vin = [CMutableTxIn.from_txin(txin) for txin in tx.vin]
vout = [CMutableTxOut.from_txout(txout) for txout in tx.vout]
return cls(vin, vout, tx.nLockTime, tx.nVersion)
class CBlockHeader(ImmutableSerializable):
"""A block header"""
__slots__ = ['nVersion', 'hashPrevBlock', 'hashMerkleRoot', 'nTime', 'nBits', 'nNonce']
def __init__(self, nVersion=2, hashPrevBlock=b'\x00'*32, hashMerkleRoot=b'\x00'*32, nTime=0, nBits=0, nNonce=0):
object.__setattr__(self, 'nVersion', nVersion)
assert len(hashPrevBlock) == 32
object.__setattr__(self, 'hashPrevBlock', hashPrevBlock)
assert len(hashMerkleRoot) == 32
object.__setattr__(self, 'hashMerkleRoot', hashMerkleRoot)
object.__setattr__(self, 'nTime', nTime)
object.__setattr__(self, 'nBits', nBits)
object.__setattr__(self, 'nNonce', nNonce)
@classmethod
def stream_deserialize(cls, f):
nVersion = struct.unpack(b"<i", ser_read(f,4))[0]
hashPrevBlock = ser_read(f,32)
hashMerkleRoot = ser_read(f,32)
nTime = struct.unpack(b"<I", ser_read(f,4))[0]
nBits = struct.unpack(b"<I", ser_read(f,4))[0]
nNonce = struct.unpack(b"<I", ser_read(f,4))[0]
return cls(nVersion, hashPrevBlock, hashMerkleRoot, nTime, nBits, nNonce)
def stream_serialize(self, f):
f.write(struct.pack(b"<i", self.nVersion))
assert len(self.hashPrevBlock) == 32
f.write(self.hashPrevBlock)
assert len(self.hashMerkleRoot) == 32
f.write(self.hashMerkleRoot)
f.write(struct.pack(b"<I", self.nTime))
f.write(struct.pack(b"<I", self.nBits))
f.write(struct.pack(b"<I", self.nNonce))
@staticmethod
def calc_difficulty(nBits):
"""Calculate difficulty from nBits target"""
nShift = (nBits >> 24) & 0xff
dDiff = float(0x0000ffff) / float(nBits & 0x00ffffff)
while nShift < 29:
dDiff *= 256.0
nShift += 1
while nShift > 29:
dDiff /= 256.0
nShift -= 1
return dDiff
difficulty = property(lambda self: CBlockHeader.calc_difficulty(self.nBits))
def __repr__(self):
return "%s(%i, lx(%s), lx(%s), %s, 0x%08x, 0x%08x)" % \
(self.__class__.__name__, self.nVersion, b2lx(self.hashPrevBlock), b2lx(self.hashMerkleRoot),
self.nTime, self.nBits, self.nNonce)
class CBlock(CBlockHeader):
"""A block including all transactions in it"""
__slots__ = ['vtx', 'vMerkleTree']
@staticmethod
def build_merkle_tree_from_txids(txids):
"""Build a full CBlock merkle tree from txids
txids - iterable of txids
Returns a new merkle tree in deepest first order. The last element is
the merkle root.
WARNING! If you're reading this because you're learning about crypto
and/or designing a new system that will use merkle trees, keep in mind
that the following merkle tree algorithm has a serious flaw related to
duplicate txids, resulting in a vulnerability. (CVE-2012-2459) Bitcoin
has since worked around the flaw, but for new applications you should
use something different; don't just copy-and-paste this code without
understanding the problem first.
"""
merkle_tree = list(txids)
size = len(txids)
j = 0
while size > 1:
for i in range(0, size, 2):
i2 = min(i+1, size-1)
merkle_tree.append(Hash(merkle_tree[j+i] + merkle_tree[j+i2]))
j += size
size = (size + 1) // 2
return merkle_tree
@staticmethod
def build_merkle_tree_from_txs(txs):
"""Build a full merkle tree from transactions"""
txids = [tx.GetHash() for tx in txs]
return CBlock.build_merkle_tree_from_txids(txids)
def calc_merkle_root(self):
"""Calculate the merkle root
The calculated merkle root is not cached; every invocation
re-calculates it from scratch.
"""
if not len(self.vtx):
raise ValueError('Block contains no transactions')
return self.build_merkle_tree_from_txs(self.vtx)[-1]
def __init__(self, nVersion=2, hashPrevBlock=b'\x00'*32, hashMerkleRoot=b'\x00'*32, nTime=0, nBits=0, nNonce=0, vtx=()):
"""Create a new block"""
super(CBlock, self).__init__(nVersion, hashPrevBlock, hashMerkleRoot, nTime, nBits, nNonce)
vMerkleTree = tuple(CBlock.build_merkle_tree_from_txs(vtx))
object.__setattr__(self, 'vMerkleTree', vMerkleTree)
object.__setattr__(self, 'vtx', tuple(CTransaction.from_tx(tx) for tx in vtx))
@classmethod
def stream_deserialize(cls, f):
self = super(CBlock, cls).stream_deserialize(f)
vtx = VectorSerializer.stream_deserialize(CTransaction, f)
vMerkleTree = tuple(CBlock.build_merkle_tree_from_txs(vtx))
object.__setattr__(self, 'vMerkleTree', vMerkleTree)
object.__setattr__(self, 'vtx', tuple(vtx))
return self
def stream_serialize(self, f):
super(CBlock, self).stream_serialize(f)
VectorSerializer.stream_serialize(CTransaction, self.vtx, f)
def get_header(self):
"""Return the block header
Returned header is a new object.
"""
return CBlockHeader(nVersion=self.nVersion,
hashPrevBlock=self.hashPrevBlock,
hashMerkleRoot=self.hashMerkleRoot,
nTime=self.nTime,
nBits=self.nBits,
nNonce=self.nNonce)
def GetHash(self):
"""Return the block hash
Note that this is the hash of the header, not the entire serialized
block.
"""
try:
return self._cached_GetHash
except AttributeError:
_cached_GetHash = self.get_header().GetHash()
object.__setattr__(self, '_cached_GetHash', _cached_GetHash)
return _cached_GetHash
class CoreChainParams(object):
"""Define consensus-critical parameters of a given instance of the Bitcoin system"""
GENESIS_BLOCK = None
PROOF_OF_WORK_LIMIT = None
SUBSIDY_HALVING_INTERVAL = None
NAME = None
class CoreMainParams(CoreChainParams):
NAME = 'mainnet'
GENESIS_BLOCK = None
SUBSIDY_HALVING_INTERVAL = 210000
PROOF_OF_WORK_LIMIT = 2**256-1 >> 32
class CoreTestNetParams(CoreMainParams):
NAME = 'testnet'
GENESIS_BLOCK = None
class CoreRegTestParams(CoreTestNetParams):
NAME = 'regtest'
GENESIS_BLOCK = None
SUBSIDY_HALVING_INTERVAL = 150
PROOF_OF_WORK_LIMIT = 2**256-1 >> 1
"""Master global setting for what core chain params we're using"""
coreparams = CoreMainParams()
def _SelectCoreParams(name):
"""Select the core chain parameters to use
Don't use this directly, use bitcoin.SelectParams() instead so both
consensus-critical and general parameters are set properly.
"""
global coreparams
if name == 'mainnet':
coreparams = CoreMainParams()
elif name == 'testnet':
coreparams = CoreTestNetParams()
elif name == 'regtest':
coreparams = CoreRegTestParams()
else:
raise ValueError('Unknown chain %r' % name)
class CheckTransactionError(ValidationError):
pass
def CheckTransaction(tx):
"""Basic transaction checks that don't depend on any context.
Raises CheckTransactionError
"""
if not tx.vin:
raise CheckTransactionError("CheckTransaction() : vin empty")
if not tx.vout:
raise CheckTransactionError("CheckTransaction() : vout empty")
# Size limits
if len(tx.serialize()) > MAX_BLOCK_SIZE:
raise CheckTransactionError("CheckTransaction() : size limits failed")
# Check for negative or overflow output values
nValueOut = 0
for txout in tx.vout:
if txout.nValue < 0:
raise CheckTransactionError("CheckTransaction() : txout.nValue negative")
if txout.nValue > MAX_MONEY:
raise CheckTransactionError("CheckTransaction() : txout.nValue too high")
nValueOut += txout.nValue
if not MoneyRange(nValueOut):
raise CheckTransactionError("CheckTransaction() : txout total out of range")
# Check for duplicate inputs
vin_outpoints = set()
for txin in tx.vin:
if txin.prevout in vin_outpoints:
raise CheckTransactionError("CheckTransaction() : duplicate inputs")
vin_outpoints.add(txin.prevout)
if tx.is_coinbase():
if not (2 <= len(tx.vin[0].scriptSig) <= 100):
raise CheckTransactionError("CheckTransaction() : coinbase script size")
else:
for txin in tx.vin:
if txin.prevout.is_null():
raise CheckTransactionError("CheckTransaction() : prevout is null")
class CheckBlockHeaderError(ValidationError):
pass
class CheckProofOfWorkError(CheckBlockHeaderError):
pass
def CheckProofOfWork(hash, nBits):
"""Check a proof-of-work
Raises CheckProofOfWorkError
"""
target = uint256_from_compact(nBits)
# Check range
if not (0 < target <= coreparams.PROOF_OF_WORK_LIMIT):
raise CheckProofOfWorkError("CheckProofOfWork() : nBits below minimum work")
# Check proof of work matches claimed amount
hash = uint256_from_str(hash)
if hash > target:
raise CheckProofOfWorkError("CheckProofOfWork() : hash doesn't match nBits")
def CheckBlockHeader(block_header, fCheckPoW = True, cur_time=None):
"""Context independent CBlockHeader checks.
fCheckPoW - Check proof-of-work.
cur_time - Current time. Defaults to time.time()
Raises CBlockHeaderError if block header is invalid.
"""
if cur_time is None:
cur_time = time.time()
# Check proof-of-work matches claimed amount
if fCheckPoW:
CheckProofOfWork(block_header.GetHash(), block_header.nBits)
# Check timestamp
if block_header.nTime > cur_time + 2 * 60 * 60:
raise CheckBlockHeaderError("CheckBlockHeader() : block timestamp too far in the future")
class CheckBlockError(CheckBlockHeaderError):
pass
def GetLegacySigOpCount(tx):
nSigOps = 0
for txin in tx.vin:
nSigOps += txin.scriptSig.GetSigOpCount(False)
for txout in tx.vout:
nSigOps += txout.scriptPubKey.GetSigOpCount(False)
return nSigOps
def CheckBlock(block, fCheckPoW = True, fCheckMerkleRoot = True, cur_time=None):
"""Context independent CBlock checks.
CheckBlockHeader() is called first, which may raise a CheckBlockHeader
exception, followed the block tests. CheckTransaction() is called for every
transaction.
fCheckPoW - Check proof-of-work.
fCheckMerkleRoot - Check merkle root matches transactions.
cur_time - Current time. Defaults to time.time()
"""
# Block header checks
CheckBlockHeader(block.get_header(), fCheckPoW=fCheckPoW, cur_time=cur_time)
# Size limits
if not block.vtx:
raise CheckBlockError("CheckBlock() : vtx empty")
if len(block.serialize()) > MAX_BLOCK_SIZE:
raise CheckBlockError("CheckBlock() : block larger than MAX_BLOCK_SIZE")
# First transaction must be coinbase
if not block.vtx[0].is_coinbase():
raise CheckBlockError("CheckBlock() : first tx is not coinbase")
# Check rest of transactions. Note how we do things "all at once", which
# could potentially be a consensus failure if there was some obscure bug.
# For unique txid uniqueness testing. If coinbase tx is included twice
# it'll be caught by the "more than one coinbase" test.
unique_txids = set()
nSigOps = 0
for tx in block.vtx[1:]:
if tx.is_coinbase():
raise CheckBlockError("CheckBlock() : more than one coinbase")
CheckTransaction(tx)
txid = tx.GetHash()
if txid in unique_txids:
raise CheckBlockError("CheckBlock() : duplicate transaction")
unique_txids.add(txid)
nSigOps += GetLegacySigOpCount(tx)
if nSigOps > MAX_BLOCK_SIGOPS:
raise CheckBlockError("CheckBlock() : out-of-bounds SigOpCount")
# Check merkle root
if fCheckMerkleRoot and block.hashMerkleRoot != block.calc_merkle_root():
raise CheckBlockError("CheckBlock() : hashMerkleRoot mismatch")
__all__ = (
'Hash',
'Hash160',
'COIN',
'MAX_MONEY',
'MAX_BLOCK_SIZE',
'MAX_BLOCK_SIGOPS',
'MoneyRange',
'x',
'b2x',
'lx',
'b2lx',
'str_money_value',
'ValidationError',
'COutPoint',
'CMutableOutPoint',
'CTxIn',
'CMutableTxIn',
'CTxOut',
'CMutableTxOut',
'CTransaction',
'CMutableTransaction',
'CBlockHeader',
'CBlock',
'CoreChainParams',
'CoreMainParams',
'CoreTestNetParams',
'CoreRegTestParams',
'CheckTransactionError',
'CheckTransaction',
'CheckBlockHeaderError',
'CheckProofOfWorkError',
'CheckProofOfWork',
'CheckBlockHeader',
'CheckBlockError',
'GetLegacySigOpCount',
'CheckBlock',
)
| mit |
kaiyuanl/gem5 | src/sim/ClockedObject.py | 49 | 2482 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class ClockedObject(SimObject):
type = 'ClockedObject'
abstract = True
cxx_header = "sim/clocked_object.hh"
# The clock domain this clocked object belongs to, inheriting the
# parent's clock domain by default
clk_domain = Param.ClockDomain(Parent.clk_domain, "Clock domain")
| bsd-3-clause |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/pip/_vendor/distlib/_backport/shutil.py | 1002 | 25650 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
from . import tarfile
try:
import bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive", "ignore_patterns"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registery operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError as why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
os.symlink(linkto, dstname)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file"),
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not isinstance(function, collections.Callable):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
| mit |
AlexStarov/Shop | applications/discount/admin.py | 1 | 3002 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Action
__author__ = 'AlexStarov'
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin, ):
list_display = ['pk', 'name', 'datetime_start', 'datetime_end', 'auto_start',
'auto_end', 'auto_del', 'auto_del_action_from_product', 'auto_del_action_price', 'deleted', ]
list_display_links = ['pk', 'name', 'datetime_start', 'datetime_end', ]
list_filter = ('name', )
search_fields = ['name', ]
fieldsets = [
(None, {'classes': ['wide'], 'fields': ['name', 'datetime_start', 'datetime_end', 'auto_start',
'auto_end', 'auto_del', 'auto_del_action_from_product',
'auto_del_action_price', ], }, ),
(u'Скрытые параметры', {'classes': ['collapse'], 'fields': ['deleted', ], }, ),
# (u'Информация о товаре для поисковых систем', {'classes': ['collapse'], 'fields': ['meta_title',
# 'meta_description',
# 'meta_keywords', ], }, ),
# (u'Дополнительные функции', {'classes': ['collapse'], 'fields': ['template', 'visibility', ], }, ),
# (u'Ссылка на пользователя создателя', {'classes': ['collapse'], 'fields': ['user_obj', ], }, ),
]
# readonly_fields = u'url'
# form = patch_admin_form(ProductAdminForm, )
# prepopulated_fields = {u'url': (u'title', ), }
# filter_horizontal = ('category', 'recommended', 'action', )
# inlines = [
# genericStacked_ItemID_InLine,
# genericStacked_IntermediateModelManufacturer_InLine,
# Tabular_Discount_InLine,
# admin_Additional_Information_InLine,
# genericStacked_Photo_InLine,
# Tabular_AdditionalInformationForPrice_InLine,
# Tabular_ExtendedPrice_InLine,
# ]
save_as = True
save_on_top = True
ordering = ['-created_at', ]
# # Тут начинается магия, СуперАдмину показываем всё, а пользователю, показываем только его объекты
# def queryset(self, request, ):
# if request.user.is_superuser:
# return super(ProductAdmin, self).queryset(request, )
# else:
# return super(ProductAdmin, self).queryset(request).filter(user_obj=request.user, )
# # Так решим вторую задачу, в поле author подставляем request.user
# def save_model(self, request, obj, form, change, ):
# if getattr(obj, 'user_obj', None, ) is None:
# obj.user_obj = request.user
# obj.save()
# class Media:
# js = ('/media/js/admin/ruslug-urlify.js', )
| apache-2.0 |
dzbarsky/servo | tests/wpt/css-tests/tools/wptserve/wptserve/stash.py | 89 | 5279 | import base64
import json
import os
import uuid
from multiprocessing import Process
from multiprocessing.managers import BaseManager, DictProxy
class ServerDictManager(BaseManager):
shared_data = {}
def _get_shared():
return ServerDictManager.shared_data
ServerDictManager.register("get_dict",
callable=_get_shared,
proxytype=DictProxy)
class ClientDictManager(BaseManager):
pass
ClientDictManager.register("get_dict")
class StashServer(object):
def __init__(self, address=None, authkey=None):
self.address = address
self.authkey = authkey
self.manager = None
def __enter__(self):
self.manager, self.address, self.authkey = start_server(self.address, self.authkey)
store_env_config(self.address, self.authkey)
def __exit__(self, *args, **kwargs):
if self.manager is not None:
self.manager.shutdown()
def load_env_config():
address, authkey = json.loads(os.environ["WPT_STASH_CONFIG"])
if isinstance(address, list):
address = tuple(address)
else:
address = str(address)
authkey = base64.decodestring(authkey)
return address, authkey
def store_env_config(address, authkey):
authkey = base64.encodestring(authkey)
os.environ["WPT_STASH_CONFIG"] = json.dumps((address, authkey))
def start_server(address=None, authkey=None):
manager = ServerDictManager(address, authkey)
manager.start()
return (manager, manager._address, manager._authkey)
#TODO: Consider expiring values after some fixed time for long-running
#servers
class Stash(object):
"""Key-value store for persisting data across HTTP/S and WS/S requests.
This data store is specifically designed for persisting data across server
requests. The synchronization is achieved by using the BaseManager from
the multiprocessing module so different processes can acccess the same data.
Stash can be used interchangeably between HTTP, HTTPS, WS and WSS servers.
A thing to note about WS/S servers is that they require additional steps in
the handlers for accessing the same underlying shared data in the Stash.
This can usually be achieved by using load_env_config(). When using Stash
interchangeably between HTTP/S and WS/S request, the path part of the key
should be expliclitly specified if accessing the same key/value subset.
The store has several unusual properties. Keys are of the form (path,
uuid), where path is, by default, the path in the HTTP request and
uuid is a unique id. In addition, the store is write-once, read-once,
i.e. the value associated with a particular key cannot be changed once
written and the read operation (called "take") is destructive. Taken together,
these properties make it difficult for data to accidentally leak
between different resources or different requests for the same
resource.
"""
_proxy = None
def __init__(self, default_path, address=None, authkey=None):
self.default_path = default_path
self.data = self._get_proxy(address, authkey)
def _get_proxy(self, address=None, authkey=None):
if address is None and authkey is None:
Stash._proxy = {}
if Stash._proxy is None:
manager = ClientDictManager(address, authkey)
manager.connect()
Stash._proxy = manager.get_dict()
return Stash._proxy
def _wrap_key(self, key, path):
if path is None:
path = self.default_path
# This key format is required to support using the path. Since the data
# passed into the stash can be a DictProxy which wouldn't detect changes
# when writing to a subdict.
return (str(path), str(uuid.UUID(key)))
def put(self, key, value, path=None):
"""Place a value in the shared stash.
:param key: A UUID to use as the data's key.
:param value: The data to store. This can be any python object.
:param path: The path that has access to read the data (by default
the current request path)"""
if value is None:
raise ValueError("SharedStash value may not be set to None")
internal_key = self._wrap_key(key, path)
if internal_key in self.data:
raise StashError("Tried to overwrite existing shared stash value "
"for key %s (old value was %s, new value is %s)" %
(internal_key, self[str(internal_key)], value))
else:
self.data[internal_key] = value
def take(self, key, path=None):
"""Remove a value from the shared stash and return it.
:param key: A UUID to use as the data's key.
:param path: The path that has access to read the data (by default
the current request path)"""
internal_key = self._wrap_key(key, path)
value = self.data.get(internal_key, None)
if not value is None:
try:
self.data.pop(internal_key)
except KeyError:
# Silently continue when pop error occurs.
pass
return value
class StashError(Exception):
pass
| mpl-2.0 |
cntnboys/410Lab5 | env-lab4/lib/python2.7/site-packages/werkzeug/_compat.py | 148 | 6190 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| apache-2.0 |
jdorvi/CSHORE | lake_ontario/TRANSECTS_aecom.py | 1 | 8744 | TRANSECTS = {"123":["23102", "23101", "23194"],
"124":["23286", "23193", "23192"],
"125":["23383", "23284", "23382"],
"126":["23381", "23482", "23483"],
"127":["23675", "23676", "23578"],
"128":["23765", "23857", "23858"],
"129":["24208", "24134", "24207"],
"130":["24593", "24518", "24517"],
"131":["25336", "25236", "25235"],
"132":["25335", "25235", "25334"],
"133":["24823", "24744", "24743"],
"134":["26710", "26861", "26862"],
"135":["26169", "26167", "26300"],
"136":["27445", "27667", "27668"],
"137":["28109", "27878", "27871"],
"138":["27202", "27020", "27201"],
"139":["30218", "30211", "30533"],
"140":["30541", "30879", "30880"],
"141":["28940", "29257", "29258"],
"142":["29588", "29257", "29256"],
"143":["29247", "29577", "29578"],
"144":["30848", "30504", "30847"],
"145":["30498", "30841", "30499"],
"146":["30175", "30495", "30496"],
"147":["29863", "30168", "30169"],
"148":["32310", "32309", "32717"],
"149":["33843", "33507", "33842"],
"150":["35778", "35779", "35501"],
"151":["36884", "36885", "36610"],
"152":["38175", "38176", "37820"],
"153":["40381", "39890", "39889"],
"154":["42628", "41995", "42627"],
"155":["47508", "47509", "46788"],
"156":["51576", "52507", "51577"],
"157":["53392", "53393", "52509"],
"158":["57093", "56525", "56524"],
"159":["58648", "58649", "58112"],
"160":["59231", "59831", "59232"],
"161":["62174", "61571", "61570"],
"162":["64925", "64192", "64924"],
"163":["64923", "64924", "64190"],
"164":["61564", "61563", "62164"],
"165":["62156", "62157", "61556"],
"166":["62150", "62151", "61552"],
"167":["60377", "60959", "60378"],
"168":["59809", "59810", "59211"],
"169":["59801", "59802", "59205"],
"170":["60943", "60944", "60361"],
"171":["57075", "57583", "57584"],
"172":["55043", "55857", "55858"],
"173":["55040", "55039", "55852"],
"174":["54226", "55034", "55035"],
"175":["57061", "57577", "57578"],
"176":["58614", "58615", "58079"],
"177":["60346", "59784", "59783"],
"178":["58607", "58608", "58073"],
"179":["55019", "55830", "55831"],
"180":["53361", "52477", "52476"],
"181":["49856", "50687", "49857"],
"182":["46071", "46070", "46776"],
"183":["49850", "49849", "50680"],
"184":["53358", "52472", "52471"],
"185":["55013", "55012", "55824"],
"186":["55822", "56466", "55823"],
"187":["56464", "56463", "57037"],
"188":["55819", "55007", "55818"],
"189":["54200", "54199", "55005"],
"190":["52461", "53347", "52462"],
"191":["50670", "50669", "51531"],
"192":["49839", "49027", "49026"],
"193":["47480", "47479", "48235"],
"194":["46763", "46057", "46762"],
"195":["44642", "45354", "45355"],
"196":["43950", "43276", "43275"],
"197":["41423", "41984", "41985"],
"198":["40373", "40890", "40374"],
"199":["39882", "39402", "39401"],
"200":["38550", "38172", "38549"],
"201":["37815", "37816", "37490"],
"202":["37485", "37171", "37484"],
"203":["37805", "37806", "37479"],
"204":["37802", "37803", "37478"],
"205":["38533", "38933", "38934"],
"206":["39390", "38932", "39389"],
"207":["38151", "38532", "38152"],
"208":["37471", "37795", "37472"],
"209":["38915", "39370", "38916"],
"210":["40357", "39860", "40356"],
"211":["40874", "40355", "40873"],
"212":["42606", "41979", "42605"],
"213":["44637", "43947", "43946"],
"214":["45349", "46051", "45350"],
"215":["47472", "47473", "46758"],
"216":["48228", "49016", "49017"],
"217":["51527", "50663", "51526"],
"218":["55811", "54998", "55810"],
"219":["56452", "57028", "57029"],
"220":["55807", "56448", "56449"],
"221":["56443", "56442", "56444"],
"222":["57018", "57531", "57532"],
"223":["57015", "57014", "57526"],
"224":["54181", "54180", "54987"],
"225":["50654", "50655", "49827"],
"226":["48223", "47466", "48222"],
"227":["46753", "46047", "46046"],
"228":["44632", "45342", "45343"],
"229":["43264", "42601", "43263"],
"230":["41975", "41976", "41409"],
"231":["41975", "41976", "41409"],
"232":["40352", "40351", "40868"],
"233":["39365", "39364", "39853"],
"234":["38514", "38513", "38906"],
"235":["40339", "40856", "40340"],
"236":["41389", "40854", "40853"],
"237":["42581", "41958", "41957"],
"238":["41383", "41954", "41955"],
"239":["43234", "42573", "42572"],
"240":["41955", "42579", "41956"],
"241":["45321", "44610", "45320"],
"242":["45319", "44608", "45318"],
"243":["44606", "45316", "45317"],
"244":["43900", "44604", "43901"],
"245":["43225", "43224", "43898"],
"246":["42564", "43223", "43224"],
"247":["42561", "42562", "41940"],
"248":["41367", "40833", "41366"],
"249":["41365", "41364", "41933"],
"250":["41362", "41930", "41931"],
"251":["41360", "41927", "41928"],
"252":["41358", "41925", "41926"],
"253":["41923", "41357", "41356"],
"254":["41922", "41921", "42537"],
"255":["42536", "42537", "41920"],
"256":["42534", "41919", "41918"],
"257":["42532", "43213", "42533"],
"258":["43213", "43212", "43891"],
"259":["44595", "44594", "45307"],
"260":["49000", "49822", "49002"],
"261":["44591", "44590", "45302"],
"262":["44588", "45300", "44589"],
"263":["45999", "46722", "46000"],
"264":["44583", "44584", "43884"],
"265":["41346", "40816", "41345"],
"266":["39813", "40305", "40306"],
"267":["38876", "38482", "38875"],
"268":["37137", "37136", "37442"],
"269":["35748", "35474", "35747"],
"270":["35466", "35467", "35196"],
"271":["35191", "34908", "34907"],
"272":["34635", "34634", "34904"],
"273":["34629", "34628", "34895"],
"274":["31526", "31150", "31525"],
"275":["28599", "28323", "28598"],
"276":["28885", "28585", "28584"],
"277":["30779", "30778", "31123"],
"278":["33769", "34052", "33770"],
"279":["33808", "34088", "33810"],
"280":["34871", "34872", "34611"],
"281":["34891", "35177", "35178"],
"282":["36283", "36008", "36007"],
"283":["36007", "36282", "36283"],
"284":["37425", "37747", "37426"],
"285":["38094", "38470", "38095"],
"286":["39310", "39311", "38861"],
"287":["39797", "39796", "40286"],
"288":["43190", "42497", "42496"],
"289":["41317", "41316", "41876"],
"290":["40793", "40794", "40284"],
"291":["41318", "40790", "41317"],
"292":["41318", "40790", "41317"],
"293":["41314", "40787", "41313"],
"294":["40783", "40275", "40274"],
"295":["40782", "40273", "40781"],
"296":["40271", "40780", "40272"],
"297":["40778", "40779", "40270"],
"298":["41302", "40777", "40776"],
"299":["41852", "41851", "42462"],
"300":["43820", "43821", "43145"],
"301":["42454", "42453", "43140"],
"302":["42452", "43138", "43139"],
"303":["43137", "43812", "43813"],
"304":["108146", "107191", "108145"]}
| apache-2.0 |
mammique/django | django/utils/version.py | 228 | 1785 | from __future__ import unicode_literals
import datetime
import os
import subprocess
def get_version(version=None):
"Returns a PEP 386-compliant version number from VERSION."
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| bsd-3-clause |
frankiecjunle/yunblog | venv/lib/python2.7/site-packages/sqlalchemy/exc.py | 32 | 12030 | # sqlalchemy/exc.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Exceptions used with SQLAlchemy.
The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are
raised as a result of DBAPI exceptions are all subclasses of
:exc:`.DBAPIError`.
"""
class SQLAlchemyError(Exception):
"""Generic error class."""
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class ObjectNotExecutableError(ArgumentError):
"""Raised when an object is passed to .execute() that can't be
executed as SQL.
.. versionadded:: 1.1
"""
def __init__(self, target):
super(ObjectNotExecutableError, self).__init__(
"Not an executable object: %r" % target
)
class NoSuchModuleError(ArgumentError):
"""Raised when a dynamically-loaded module (usually a database dialect)
of a particular name cannot be located."""
class NoForeignKeysError(ArgumentError):
"""Raised when no foreign keys can be located between two selectables
during a join."""
class AmbiguousForeignKeysError(ArgumentError):
"""Raised when more than one foreign key matching can be located
between two selectables during a join."""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None):
if msg is None:
message += " (%s)" % ", ".join(repr(s) for s in cycles)
else:
message = msg
SQLAlchemyError.__init__(self, message)
self.cycles = cycles
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles,
self.edges, self.args[0])
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class UnsupportedCompilationError(CompileError):
"""Raised when an operation is not supported by the given compiler.
.. versionadded:: 0.8.3
"""
def __init__(self, compiler, element_type):
super(UnsupportedCompilationError, self).__init__(
"Compiler %r can't render element of type %s" %
(compiler, element_type))
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event so that the host pool
forces a retry; the exception will be caught three times in a row before
the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError`
regarding the connection attempt.
"""
class TimeoutError(SQLAlchemyError):
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class NoInspectionAvailable(InvalidRequestError):
"""A subject passed to :func:`sqlalchemy.inspection.inspect` produced
no context for inspection."""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be
located.
"""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name)
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be
located.
"""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name,
self.column_name)
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :exc:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
"""The string SQL statement being invoked when this exception occurred."""
params = None
"""The parameter list being used when this exception occurred."""
orig = None
"""The DBAPI exception object."""
def __init__(self, message, statement, params, orig):
SQLAlchemyError.__init__(self, message)
self.statement = statement
self.params = params
self.orig = orig
self.detail = []
def add_detail(self, msg):
self.detail.append(msg)
def __reduce__(self):
return self.__class__, (self.args[0], self.statement,
self.params, self.orig)
def __str__(self):
from sqlalchemy.sql import util
details = [SQLAlchemyError.__str__(self)]
if self.statement:
details.append("[SQL: %r]" % self.statement)
if self.params:
params_repr = util._repr_params(self.params, 10)
details.append("[parameters: %r]" % params_repr)
return ' '.join([
"(%s)" % det for det in self.detail
] + details)
def __unicode__(self):
return self.__str__()
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
Wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
:class:`DBAPIError` when possible. DB-API's ``Error`` type maps to
:class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context
regarding the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the
:attr:`~.StatementError.orig` attribute. Its type and properties are
DB-API implementation specific.
"""
@classmethod
def instance(cls, statement, params,
orig, dbapi_base_err,
connection_invalidated=False,
dialect=None):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if (isinstance(orig, BaseException) and
not isinstance(orig, Exception)) or \
isinstance(orig, DontWrapMixin):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if not isinstance(orig, dbapi_base_err) and statement:
return StatementError(
"(%s.%s) %s" %
(orig.__class__.__module__, orig.__class__.__name__,
orig),
statement, params, orig
)
glob = globals()
for super_ in orig.__class__.__mro__:
name = super_.__name__
if dialect:
name = dialect.dbapi_exception_translation_map.get(
name, name)
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
break
return cls(statement, params, orig, connection_invalidated)
def __reduce__(self):
return self.__class__, (self.statement, self.params,
self.orig, self.connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
try:
text = str(orig)
except Exception as e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
'(%s.%s) %s' % (
orig.__class__.__module__, orig.__class__.__name__, text, ),
statement,
params,
orig
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
| mit |
konsP/synnefo | snf-pithos-backend/pithos/backends/lib/sqlalchemy/alembic/env.py | 10 | 2382 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
try:
# pithos-app case
from synnefo.settings import PITHOS_BACKEND_DB_CONNECTION
except ImportError:
try:
# plankton case
from synnefo.settings import BACKEND_DB_CONNECTION as \
PITHOS_BACKEND_DB_CONNECTION
except ImportError:
PITHOS_BACKEND_DB_CONNECTION = None
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
db = config.get_main_option("sqlalchemy.url", PITHOS_BACKEND_DB_CONNECTION)
config.set_main_option("sqlalchemy.url", db)
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| gpl-3.0 |
wilsonfreitas/easycsv | build/lib/easycsv.py | 2 | 16011 | #!/usr/bin/env python
# encoding: utf-8
#
"""
easycsv.py
The easycsv module executes csv statements (a kind of csv DSL - domain
specific language) to insert/update/delete data into a database.
Easycsv was developed to be used with the Storm ORM framework, but it
could be easily adapted for others ORM frameworks (SQLAlchemy, SQLObject, ...).
>>> from storm.locals import *
>>>
>>> class Category(object):
... __storm_table__ = 'category'
... name = Unicode(primary=True)
... parent_name = Unicode()
... parent = Reference(parent_name, name)
...
>>>
>>> database = create_database('sqlite:')
>>> store = Store(database)
>>> store.execute('CREATE TABLE category (name TEXT PRIMARY KEY, parent_name TEXT)')
<storm.databases.sqlite.SQLiteResult object at 0xa8a790>
>>>
>>> from easycsv import StormORM
>>>
>>> statements = '''\
... Category, Name, Parent
... +, Expenses,
... +, Internet, Expenses
... '''
>>>
>>> orm = StormORM(store=store)
>>> orm.execute(statements, modName='__main__')
(2, 0, 0, 2)
This session creates the table Category using the storm framework and inserts
two rows into the database.
The tuple returned from orm.execute says that two statements were submitted and
two rows were inserted.
The variable defines one csv statement block. A csv statement block has a
header that starts with the name of the class followed by some of its attributes.
The lines starting with '+' represent the csv statements, in particular, csv
insert statements.
There are three types of csv statements:
- '+' insert
- '-' delete
- '~' update
Lines starting with '#', with the first column empty and empty lines are ignored.
Copyright (c) 2008. All rights reserved.
"""
import csv
import re
from datetime import date
from itertools import count
from operator import attrgetter, and_, eq
from types import MethodType
__all__ = ['INSERT', 'DELETE', 'UPDATE', 'AttributeParser', 'StormAttributeParser',
'simple', 'camelCase', 'CSV', 'CSVType', 'CSVStatement', 'StormORM', 'ORM']
INSERT = '+'
DELETE = '-'
UPDATE = '~'
class AttributeParser(object):
"""
Generic parser applied to column fields of a statements block.
The methods used to parse column fields start with parse and receives two parameters:
text to be parsed and match object of re module.
"""
def __init__(self):
self.regexes = self.__createMethodAnalyzers()
def __createMethodAnalyzers(self):
pairs = []
for methodName in dir(self):
method = getattr(self, methodName)
if methodName.startswith('parse') and type(method) is MethodType and method.__doc__:
pairs.append( (re.compile(method.__doc__), method) )
return pairs
def parse(self, text):
'''
Parse text elements according to its own parserXXX methods or
those created in child classes.
@param text: text to be parsed
@return: parsed value of text
'''
result = None
for regex, func in self.regexes:
match = regex.match(text)
if match:
result = func(text, match)
break
if result is None:
result = self.parseAny(text)
return result
def parseNumber(self, text, match):
r'^-?\s*\d+([\.,]\d+)?$'
return eval(text)
def parseBoolean(self, text, match):
r'^[Tt][Rr][Uu][eE]|[Ff][Aa][Ll][Ss][Ee]$'
return eval(text.lower().capitalize())
def parseText(self, text, match):
r'^\''
return text[1:]
def parseAny(self, text):
return text
class StormAttributeParser(AttributeParser):
"""
Implementation of parser for storm ORM. It generates unicode strings and
parses dd-mm-yyyy to datetime.date objects.
"""
def __init__(self):
super(StormAttributeParser, self).__init__()
def parseText(self, text, match):
r'^\''
s = text[1:]
s.decode('utf-8')
return unicode(s)
def parseDate(self, text, match):
r'^\d?\d[/.-]\d\d[/.-]\d\d\d\d$'
# dsr -- date separator regex
dsr = re.compile(r'[/.-]')
# dp -- date parts
dp = dsr.split(text)
return date( int(dp[2]), int(dp[1]), int(dp[0]) )
def parseAny(self, text):
return unicode(text.decode('utf-8'))
def simple(attrName):
'''
Convert human readable header names to property names in lower case and
replacing spaces to underscore.
Examples:
>>> simple("Category")
"category"
>>> simple("Bank Account")
"bank_account"
'''
attrName = str(attrName).strip()
attrName = attrName.lower()
attrName = re.sub('\s+', '_', attrName)
return attrName
def camelCase(attrName):
'''
Convert human readable header names to camel case property names.
Examples:
>>> camelCase("Category")
'category'
>>> camelCase("Bank Account")
'bankAccount'
'''
attrParts = attrName.lower().split()
s = []
for i,part in enumerate(attrParts):
if i == 0:
s.append(part)
else:
s.append(part.capitalize())
return ''.join(s)
class CSV(object):
"""CSV class that handles the csv files
content is any iterable where the content of each row is data delimited text.
"""
def __init__(self, content, attrParser=AttributeParser(), modName=None, module=None, nameResolution=simple):
'''
@param content: The csv content in one of following types: str, file or any iterable that iterate over csv lines.
@param attrParser: Any class that inherits AttributeParser.
@param modName: The name of the module where classes declared in the header of a statement block.
@param module: the module where classes declared in the header of a statement block.
@param nameResolution: The function used to resolve the column's names in the header of a statement block.
'''
if type(content) is str:
import os
content = content.split(os.linesep)
self.types = []
for i, csvRow in enumerate(csv.reader(content)):
csvRow = [f.strip() for f in csvRow]
if len(csvRow) is 0 or csvRow[0] in ['#', '']:
continue
elif csvRow[0] in '+-~':
statement = CSVStatement(csvRow, attrParser)
statement.lineNumber = i+1
statement.lineContent = ','.join(csvRow)
csvType.addStatement( statement )
elif csvRow[0][0].isalpha():
csvType = CSVType(csvRow, nameResolution=nameResolution, modName=modName, module=module)
csvType.lineNumber = i+1
csvType.lineContent = ','.join(csvRow)
self.types.append(csvType)
class CSVType(object):
"""
The CSVType declared at the header of a csv statement block.
"""
def __init__(self, fields, nameResolution=simple, modName=None, module=None):
'''
@param fields: A list with the fields of a row in a csv file.
@param modName: The name of the module where classes declared in the header of a statement block.
@param module: the module where classes declared in the header of a statement block.
@param nameResolution: The function used to resolve the column's names in the header of a statement block.
'''
self.typeName = fields[0]
self.type = importClass(self.typeName, modName=modName, module=module)
self.keys = {}
self.attributes = {}
self.statements = []
self.hasPrimaryKey = False
self.primaryKey = None
for i, field in zip(count(1), fields[1:]):
field = nameResolution(field)
if re.match(r'^\{[^\{\}]+\}$', field):
field = field.strip('{}')
self.keys[i] = field
else:
self.attributes[i] = field
if isPrimaryKey(self.type, field):
self.primaryKey = (i, field)
if i in self.keys:
self.hasPrimaryKey = True
if len(self.keys) is 0 and self.primaryKey:
# if self.primaryKey is None:
# raise Exception("No key given")
# else:
self.keys[ self.primaryKey[0] ] = self.primaryKey[1]
self.hasPrimaryKey = True
if self.primaryKey[0] in self.attributes:
del self.attributes[ self.primaryKey[0] ]
def addStatement(self, statement):
self.statements.append(statement)
class CSVStatement(object):
"""
CSVStatement represents the csv statement to be executed by a ORM.
"""
def __init__(self, csvRow, attrParser):
'''
@param csvRow: A list with the splited content of a text csv row.
@param attrParser: Any class that inherits AttributeParser.
'''
self.action = csvRow[0]
self.csvRow = csvRow
self.attributes = {}
for i, field in zip(count(1), csvRow[1:]):
self.attributes[i] = attrParser.parse(field)
class ORM(object):
"""The ORM engine super class."""
def execute(self, csv, attrParser=None, modName=None, module=None, nameResolution=simple):
"""
Creates the CSV object with csv types and csv statements and sends the CSV to be executed
by the proper ORM.
@param attrParser: Any class that inherits AttributeParser.
@param modName: The name of the module where classes declared in the header of a statement block.
@param module: the module where classes declared in the header of a statement block.
@param nameResolution: The function used to resolve the column's names in the header of a statement block.
@return: Return a 4-tuple that indicates:
- total rows inserted
- total rows updated
- total rows deleted
- total statements sent
following this order.
"""
if not attrParser:
attrParser = self.attrParser
if type(csv) is not CSV:
csv = CSV(csv, attrParser=attrParser, modName=modName, module=module, nameResolution=nameResolution)
return self._execute(csv)
def _execute(self, csv):
"""Executes all statements of a CSV object.
@param csv: CSV object.
"""
i, d, u, t = 0, 0, 0, 0
for typo in csv.types:
for statement in typo.statements:
try:
n = self.executeStatement(typo, statement)
t += n
if statement.action is INSERT:
i += n
elif statement.action is UPDATE:
u += n
elif statement.action is DELETE:
d += n
except ValueError, ex:
print ex
return i, u, d, t
class StormORM(ORM):
"""
Storm implementation of ORM super class.
"""
def __init__(self, uri=None, store=None):
'''
@param uri: Database URI following storm rules.
@param store: Storm store.
If uri is given a new store is instanciated and it is used
to execute the statements.
If both parameters are given the early created store overrides
the store given.
'''
from storm.locals import create_database, Store
self.uri = uri
self.store = store
if self.uri:
database = create_database(self.uri)
self.store = Store(database)
if not self.store:
raise Exception('None storm store')
self.attrParser = StormAttributeParser()
def _getObject(self, csvType, csvStatement):
"""
Retrieves the object to be used at statement execution.
@param csvType: The CSVType
@param csvStatement: The CSVStatement
@return: The object early instanciated (for insert statement) or
retrieved from database (for update or delete statements).
"""
typo = csvType.type
keys = csvType.keys
attributes = csvStatement.attributes
if csvStatement.action in [DELETE, UPDATE]:
if csvType.hasPrimaryKey:
return self.store.get(typo, attributes[ csvType.primaryKey[0] ])
else:
pred = And([Eq(typo, key, attributes[i]) for i,key in keys.iteritems()])
result = self.store.find(typo, pred)
if result.count() == 0:
return None
elif result.count() == 1:
return result.one()
else:
return [r for r in result]
elif csvStatement.action is INSERT:
return typo()
def executeStatement(self, csvType, csvStatement):
"""
Executes csv statements matched by the pair csvType, csvStatement.
@param csvType: The CSVType
@param csvStatement: The CSVStatement
@return: Total statements executed or raises a ValueError if the object retrieved with
the pair csvType, csvStatement is None.
"""
obj = self._getObject(csvType, csvStatement)
if not obj:
msg = 'Statement return None in line %d: %s' % (csvStatement.lineNumber, csvStatement.lineContent)
raise ValueError(msg)
objs = []
if type(obj) is list:
objs += obj
else:
objs.append(obj)
i = 0
for _obj in objs:
self._executeStatement(_obj, csvType, csvStatement)
i += 1
return i
def _executeStatement(self, obj, csvType, csvStatement):
"""
Executes a single csv statement
@param csvType: The CSVType
@param csvStatement: The CSVStatement
"""
keys = csvType.keys
attributes = csvType.attributes
values = csvStatement.attributes
if csvStatement.action is INSERT:
pairs = [(key, values[i]) for i,key in keys.iteritems()]
pairs += [(key, values[i]) for i,key in attributes.iteritems()]
for key, value in pairs:
setattr(obj, key, value)
self.store.add(obj)
elif csvStatement.action is UPDATE:
pairs = [(key, values[i]) for i,key in attributes.iteritems()]
for key, value in pairs:
setattr(obj, key, value)
elif csvStatement.action is DELETE:
self.store.remove(obj)
self.store.commit()
# class SQLObjectORM(ORM):
# """TODO: implement SQLObject Adaptor"""
# def __init__(self, arg):
# super(SQLObjectORM, self).__init__()
# self.arg = arg
#
#
#
# class SQLAlchemyORM(ORM):
# """TODO: implement SQLAlchemy Adaptor"""
# def __init__(self, arg):
# super(SQLAlchemyORM, self).__init__()
# self.arg = arg
def importClass(className, modName=None, module=None):
if not module:
if not modName:
fields = className.split('.')
modName = '.'.join(fields[:-1])
className = fields[-1]
# module = __import__(modName) # doesnt work
module = __import__(modName, globals(), locals(), [className], -1)
return getattr(module, className)
def isPrimaryKey(cls, attrName):
attr = getattr(cls, attrName)
if hasattr(attr, 'primary') and attr.primary:
return True
else:
return False
def Eq(cls, name, value):
f = attrgetter(name)
return eq(f(cls), value)
def And(preds):
return reduce(and_, preds)
| mit |
da1z/intellij-community | python/lib/Lib/asyncore.py | 70 | 16725 | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def _exception (obj):
try:
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & (select.POLLIN | select.POLLPRI):
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
obj.handle_expt_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=True, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = True
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
self._fileno = self.socket
self.add_channel()
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 1
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.addr = address
self.connected = True
self.handle_connect()
else:
raise socket.error, (err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = True
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = True
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = True
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
| apache-2.0 |
peterjoel/servo | tests/wpt/web-platform-tests/webdriver/tests/new_session/support/create.py | 40 | 3019 | # Note that we can only test things here all implementations must support
valid_data = [
("acceptInsecureCerts", [
False, None,
]),
("browserName", [
None,
]),
("browserVersion", [
None,
]),
("platformName", [
None,
]),
("pageLoadStrategy", [
None,
"none",
"eager",
"normal",
]),
("proxy", [
None,
]),
("timeouts", [
None, {},
{"script": 0, "pageLoad": 2.0, "implicit": 2**53 - 1},
{"script": 50, "pageLoad": 25},
{"script": 500},
]),
("strictFileInteractability", [
True, False, None,
]),
("unhandledPromptBehavior", [
"dismiss",
"accept",
None,
]),
("test:extension", [
None, False, "abc", 123, [],
{"key": "value"},
]),
]
invalid_data = [
("acceptInsecureCerts", [
1, [], {}, "false",
]),
("browserName", [
1, [], {}, False,
]),
("browserVersion", [
1, [], {}, False,
]),
("platformName", [
1, [], {}, False,
]),
("pageLoadStrategy", [
1, [], {}, False,
"invalid",
"NONE",
"Eager",
"eagerblah",
"interactive",
" eager",
"eager "]),
("proxy", [
1, [], "{}",
{"proxyType": "SYSTEM"},
{"proxyType": "systemSomething"},
{"proxy type": "pac"},
{"proxy-Type": "system"},
{"proxy_type": "system"},
{"proxytype": "system"},
{"PROXYTYPE": "system"},
{"proxyType": None},
{"proxyType": 1},
{"proxyType": []},
{"proxyType": {"value": "system"}},
{" proxyType": "system"},
{"proxyType ": "system"},
{"proxyType ": " system"},
{"proxyType": "system "},
]),
("timeouts", [
1, [], "{}", False,
{"invalid": 10},
{"PAGELOAD": 10},
{"page load": 10},
{" pageLoad": 10},
{"pageLoad ": 10},
{"pageLoad": None},
{"pageLoad": False},
{"pageLoad": []},
{"pageLoad": "10"},
{"pageLoad": 2.5},
{"pageLoad": -1},
{"pageLoad": 2**53},
{"pageLoad": {"value": 10}},
{"pageLoad": 10, "invalid": 10},
]),
("strictFileInteractability", [
1, [], {}, "false",
]),
("unhandledPromptBehavior", [
1, [], {}, False,
"DISMISS",
"dismissABC",
"Accept",
" dismiss",
"dismiss ",
])
]
invalid_extensions = [
"automaticInspection",
"automaticProfiling",
"browser",
"chromeOptions",
"ensureCleanSession",
"firefox",
"firefox_binary",
"firefoxOptions",
"initialBrowserUrl",
"javascriptEnabled",
"logFile",
"logLevel",
"nativeEvents",
"platform",
"platformVersion",
"profile",
"requireWindowFocus",
"safari.options",
"seleniumProtocol",
"trustAllSSLCertificates",
"version",
]
| mpl-2.0 |
Santinell/ansible-modules-core | cloud/digital_ocean/digital_ocean_domain.py | 15 | 7224 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_domain
short_description: Create/delete a DNS record in DigitalOcean
description:
- Create/delete a DNS record in DigitalOcean.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
api_token:
description:
- DigitalOcean api token.
version_added: "1.9.5"
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
ip:
description:
- The IP address to point a domain at.
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Create a domain record
- digital_ocean_domain: >
state=present
name=my.digitalocean.domain
ip=127.0.0.1
# Create a droplet and a corresponding domain record
- digital_ocean: >
state=present
name=test_droplet
size_id=1gb
region_id=sgp1
image_id=ubuntu-14-04-x64
register: test_droplet
- digital_ocean_domain: >
state=present
name={{ test_droplet.droplet.name }}.my.domain
ip={{ test_droplet.droplet.ip_address }}
'''
import os
import time
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError as e:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class DomainRecord(JsonfyMixIn):
manager = None
def __init__(self, json):
self.__dict__.update(json)
update_attr = __init__
def update(self, data = None, record_type = None):
json = self.manager.edit_domain_record(self.domain_id,
self.id,
record_type if record_type is not None else self.record_type,
data if data is not None else self.data)
self.__dict__.update(json)
return self
def destroy(self):
json = self.manager.destroy_domain_record(self.domain_id, self.id)
return json
class Domain(JsonfyMixIn):
manager = None
def __init__(self, domain_json):
self.__dict__.update(domain_json)
def destroy(self):
self.manager.destroy_domain(self.id)
def records(self):
json = self.manager.all_domain_records(self.id)
return map(DomainRecord, json)
@classmethod
def add(cls, name, ip):
json = cls.manager.new_domain(name, ip)
return cls(json)
@classmethod
def setup(cls, api_token):
cls.manager = DoManager(None, api_token, api_version=2)
DomainRecord.manager = cls.manager
@classmethod
def list_all(cls):
domains = cls.manager.all_domains()
return map(cls, domains)
@classmethod
def find(cls, name=None, id=None):
if name is None and id is None:
return False
domains = Domain.list_all()
if id is not None:
for domain in domains:
if domain.id == id:
return domain
if name is not None:
for domain in domains:
if domain.name == name:
return domain
return False
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
Domain.setup(api_token)
if state in ('present'):
domain = Domain.find(id=module.params["id"])
if not domain:
domain = Domain.find(name=getkeyordie("name"))
if not domain:
domain = Domain.add(getkeyordie("name"),
getkeyordie("ip"))
module.exit_json(changed=True, domain=domain.to_json())
else:
records = domain.records()
at_record = None
for record in records:
if record.name == "@" and record.record_type == 'A':
at_record = record
if not at_record.data == getkeyordie("ip"):
record.update(data=getkeyordie("ip"), record_type='A')
module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
module.exit_json(changed=False, domain=domain.to_json())
elif state in ('absent'):
domain = None
if "id" in module.params:
domain = Domain.find(id=module.params["id"])
if not domain and "name" in module.params:
domain = Domain.find(name=module.params["name"])
if not domain:
module.exit_json(changed=False, msg="Domain not found.")
event_json = domain.destroy()
module.exit_json(changed=True, event=event_json)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
api_token = dict(aliases=['API_TOKEN'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ip = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
simomarsili/scikit-msa | skmsa/filters.py | 1 | 10109 | # -*- coding: utf-8 -*-
# Copyright (C) 2017, Simone Marsili <[email protected]>
# License: BSD 3 clause
"""Filtering functions."""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import ( # pylint: disable=redefined-builtin, unused-import
bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super,
filter, map, zip)
import logging
import skmsa.utils
from skmsa.utils import timeit
__all__ = [
'flt',
'redundant',
'redundant_cdhit',
'redundant_sorted',
'gaps',
'gaps_greedy',
]
logger = logging.getLogger(__name__)
@timeit
def redundant_cdhit(seqs, thr=0.9):
"""Indices of non-redundant records from cd-hit output.
cdhit: http://weizhongli-lab.org/cd-hit/
cdhot will cluster sequences that meet a similarity threshold. Record
indices contain a representative record for each cluster.
Parameters
----------
seqs : iterable
Sequence strings.
thr : float, optional (0.9)
Sequence identity threshold (cd-hit '-c <thr>' option).
Returns
-------
(records, positions) : tuple
records: indices of non-redundant records.
"""
import subprocess
import skmsa.parser
if not thr:
# loop over sequences
nrec, npos = _iterable_shape(seqs)
return list(range(nrec)), list(range(npos))
# check for cd-hit on path
cdhit_exe = skmsa.utils.is_command(['cd-hit', 'cdhit'])
if cdhit_exe is None:
logging.error('cd-hit not found. Redundant records wont be filtered.')
nrec, npos = _iterable_shape(seqs)
return list(range(nrec)), list(range(npos))
if not 0.7 <= thr <= 1.0:
raise ValueError(
'Identity threshold should be in the [0.7,1.0] interval.')
# open tmp files
openf = skmsa.utils.open_tempfile
with openf() as fpi, openf() as fpo:
# write records to a FASTA formatted file using counter value as header
# remove gaps (cdhit takes unaligned sequences as input)
nrec = 0
for seq in seqs:
if nrec == 0:
npos = len(seq)
seq = ''.join(seq)
print('>%s\n%s' % ('id_'+str(nrec), seq.replace('-', '')),
file=fpi)
nrec += 1
fpi.flush()
# run cd-hit
subprocess.call('%s -i %s -o %s -c %s > cdhit.log' %
(cdhit_exe, fpi.name, fpo.name, thr),
shell=True)
fpo.flush()
# indices of non-redundant records from cdhit output headers
cdhit_recs, _ = skmsa.parser.parse(fpo)
records = [int(rec.name.split('_')[1]) for rec in cdhit_recs]
positions = list(range(npos)) # return all positions
nrec1 = len(records)
redundant_cdhit.msg = (
'Removed %s redundant records (%s/%s left)'
% (nrec - nrec1, nrec1, nrec))
return (records, positions)
@timeit
def redundant_sorted(seqs, thr=0.9):
"""Indices of non-redundant records via sorting and local comparison.
Sequences are sorted alphabetically and redundant ones are detected by
searching for similar pairs in in sorting order.
Parameters
----------
seqs : iterable
Sequence strings.
thr : float, optional (0.9)
Sequence identity threshold.
Returns
-------
(records, positions) : tuple
records: indices of non-redundant records.
"""
from collections import deque
from random import shuffle
def ncommon(x, y): # pylint: disable=invalid-name
"""Count common elements between x and y of equal length."""
return sum(1 for a, b in zip(x, y) if a == b)
def insorted(indices, data, order=None):
"""Return a sorted copy of data `iterable`."""
if order:
# reorder each entry according to `order`
data = ([entry[k] for k in order] for entry in data)
stack = sorted(zip(indices, data), key=lambda x: x[1])
return zip(*stack)
def filter_sorted(indices, stack, thr):
"""Return a filtered copy of a sorted stack."""
np = len(stack[0])
naverage = 2
finds = [0]
seq0 = stack[0]
sims = deque([np] * naverage, naverage)
left = []
for k, seq in enumerate(stack):
if k == 0:
continue
similarity = ncommon(seq0, seq)
sims.append(similarity)
if sum(1 for s in sims if s < np*thr) == naverage:
left = [x for x in left
if ncommon(seq, x[1]) < np*thr]
finds = finds + [x[0] for x in left]
finds.append(k)
seq0 = seq
sims = deque([np] * naverage, naverage)
left = []
else:
if similarity < np*thr:
left.append((k, seq))
return ([indices[k] for k in finds],
[stack[k] for k in finds])
if not thr:
# loop over sequences
nrec, npos = _iterable_shape(seqs)
return list(range(nrec)), list(range(npos))
stack = list(seqs)
nrec = len(stack)
npos = len(stack[0])
records = list(range(nrec))
positions = list(range(npos))
ix = list(range(npos))
records, stack = insorted(records, stack)
c = 1
# filter
records, filtered = filter_sorted(records, stack, thr)
print('left: ', c, len(records))
niter = 5
while c < niter:
c += 1
shuffle(ix)
records, stack = insorted(records, filtered, order=ix)
# filter again
records, filtered = filter_sorted(records, stack, thr)
print('left: ', c, len(records))
nrec1 = len(records)
redundant_sorted.msg = (
'Removed %s redundant records (%s/%s left)'
% (nrec - nrec1, nrec1, nrec))
return sorted(records), positions
@skmsa.utils.timeit
def gaps_greedy(seqs, thr=0.1, step=0.05):
"""Indices of records/positions s.t. the gap grequency for each
record/position in the sub-alignment is always <= `thr`.
Parameters
----------
seqs : iterable
Sequence strings.
thr : {float, tuple}, optional (0.1)
Max gap frequency for records/positions.
step : float (0.5)
Step for the iterative reduction of threshold.
Returns
-------
(records, positions) : tuple
Indices of non-gappy records/positions.
"""
import collections
import numpy
if not thr:
nrec, npos = _iterable_shape(seqs)
return list(range(nrec)), list(range(npos))
try:
# check for separate thr values
thr_record, thr_position = thr
except (TypeError, ValueError):
thr_record = thr_position = thr
if not all(0.01 <= thr <= 1.0 for thr in [thr_record, thr_position]):
raise ValueError(
'Max. gap fraction should be in the [0.01,1] interval.')
# sets gap indices for records/positions
# nrec: total number of records
# npos: sequence length
rgaps = collections.defaultdict(set)
pgaps = collections.defaultdict(set)
nrec = 0
for seq in seqs:
if nrec == 0:
npos = len(seq)
pgs = [k for k, aa in enumerate(seq) if aa == '-']
rgaps[nrec].update(pgs)
for p in pgs:
pgaps[p].add(nrec)
nrec += 1
if nrec == 0:
# return empty list of indices
return ([], [])
# initialize set of indices of active recs/positions
records = set(range(nrec))
positions = set(range(npos))
nar = nrec
thr = 1.0
while True:
thr -= step
if thr < thr_record - step*0.1:
break
# indices of gappy positions
pthr = nar * thr
gappy_positions = {p for p in positions
if len(pgaps[p]) > pthr}
# indices of gappy records
rthr = (npos - len(gappy_positions)) * thr
gappy_records = {r for r in records
if len(rgaps[r] - gappy_positions) > rthr}
# remove gappy records
records -= gappy_records
nar = len(records)
for p in pgaps:
pgaps[p] -= gappy_records
# remove gappy positions
gappy_positions = set([p for p in positions
if len(pgaps[p]) >
nar * thr_position])
positions -= gappy_positions
nar = len(records)
nap = len(positions)
gaps_greedy.msg = (
'Removed %s gappy records (%s/%s left) and '
'%s gappy positions (%s/%s left)'
% (nrec - nar, nar, nrec,
npos - nap, nap, npos))
return (sorted(list(records)), sorted(list(positions)))
# set default filters
# redundant records filter
if skmsa.utils.is_command(['cd-hit', 'cdhit']):
redundant = redundant_cdhit
else:
logging.warning("Can't find cdhit executable, filter using "
"filters.redundant_sorted.")
redundant = redundant_sorted
# gappy records/positions filter
gaps = gaps_greedy
def flt(name, seqs, *args, **kwargs):
"""Interface to generic filter function with name `name`.
Parameters
----------
name : str
Name of the filtering function to which args and kwargs will be passed.
func(seqs, *args, **kwargs)
seqs : iterable
Sequence strings.
Returns
-------
(records, positions) : tuple
Indices of records/positions.
"""
try:
func = globals()[name]
result = func(seqs, *args, **kwargs)
flt.msg = func.msg
except KeyError:
logging.exception('Invalid filter name: %s.', name)
# return full list of indices
n, m = _iterable_shape(seqs)
result = list(range(n)), list(range(m))
flt.msg = 'Invalid filter name. No filtering.'
return result
def _iterable_shape(iterable):
iterator = iter(iterable)
m = len(next(iterator))
n = sum(1 for s in iterator) + 1
return n, m
| bsd-3-clause |
MarkWh1te/xueqiu_predict | p3_env/lib/python3.5/site-packages/pygments/lexers/parasail.py | 23 | 2737 | # -*- coding: utf-8 -*-
"""
pygments.lexers.parasail
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for ParaSail.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ParaSailLexer']
class ParaSailLexer(RegexLexer):
"""
For `ParaSail <http://www.parasail-lang.org>`_ source code.
.. versionadded:: 2.1
"""
name = 'ParaSail'
aliases = ['parasail']
filenames = ['*.psi', '*.psl']
mimetypes = ['text/x-parasail']
flags = re.MULTILINE
tokens = {
'root': [
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'\b(and|or|xor)=', Operator.Word),
(r'\b(and(\s+then)?|or(\s+else)?|xor|rem|mod|'
r'(is|not)\s+null)\b',
Operator.Word),
# Keywords
(r'\b(abs|abstract|all|block|class|concurrent|const|continue|'
r'each|end|exit|extends|exports|forward|func|global|implements|'
r'import|in|interface|is|lambda|locked|new|not|null|of|op|'
r'optional|private|queued|ref|return|reverse|separate|some|'
r'type|until|var|with|'
# Control flow
r'if|then|else|elsif|case|for|while|loop)\b',
Keyword.Reserved),
(r'(abstract\s+)?(interface|class|op|func|type)',
Keyword.Declaration),
# Literals
(r'"[^"]*"', String),
(r'\\[\'ntrf"0]', String.Escape),
(r'#[a-zA-Z]\w*', Literal), # Enumeration
include('numbers'),
(r"'[^']'", String.Char),
(r'[a-zA-Z]\w*', Name),
# Operators and Punctuation
(r'(<==|==>|<=>|\*\*=|<\|=|<<=|>>=|==|!=|=\?|<=|>=|'
r'\*\*|<<|>>|=>|:=|\+=|-=|\*=|\||\|=|/=|\+|-|\*|/|'
r'\.\.|<\.\.|\.\.<|<\.\.<)',
Operator),
(r'(<|>|\[|\]|\(|\)|\||:|;|,|.|\{|\}|->)',
Punctuation),
(r'\n+', Text),
],
'numbers': [
(r'\d[0-9_]*#[0-9a-fA-F][0-9a-fA-F_]*#', Number.Hex), # any base
(r'0[xX][0-9a-fA-F][0-9a-fA-F_]*', Number.Hex), # C-like hex
(r'0[bB][01][01_]*', Number.Bin), # C-like bin
(r'\d[0-9_]*\.\d[0-9_]*[eE][+-]\d[0-9_]*', # float exp
Number.Float),
(r'\d[0-9_]*\.\d[0-9_]*', Number.Float), # float
(r'\d[0-9_]*', Number.Integer), # integer
],
}
| mit |
gram526/VTK | Web/Core/Testing/Python/TestDataEncoder.py | 5 | 2334 | import sys
import vtk
import array
from vtk.test import Testing
class TestDataEncoder(Testing.vtkTest):
def testEncodings(self):
# Render something
cylinder = vtk.vtkCylinderSource()
cylinder.SetResolution(8)
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.RotateX(30.0)
cylinderActor.RotateY(-45.0)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(cylinderActor)
renWin.SetSize(200, 200)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
renWin.Render()
# Get a vtkImageData with the rendered output
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.SetShouldRerender(1)
w2if.SetReadFrontBuffer(0)
w2if.Update()
imgData = w2if.GetOutput()
# Use vtkDataEncoder to convert the image to PNG format and Base64 encode it
encoder = vtk.vtkDataEncoder()
base64String = encoder.EncodeAsBase64Png(imgData).encode('ascii')
# Now Base64 decode the string back to PNG image data bytes
outputBuffer = bytearray(120000)
inputArray = array.array('B', base64String)
utils = vtk.vtkIOCore.vtkBase64Utilities()
actualLength = utils.Decode(inputArray, 120000, outputBuffer)
outputArray = bytearray(actualLength)
outputArray[:] = outputBuffer[0:actualLength]
# And write those bytes to the disk as an actual PNG image file
with open('TestDataEncoder.png', 'wb') as fd:
fd.write(outputArray)
# Create a vtkTesting object and specify a baseline image
rtTester = vtk.vtkTesting()
for arg in sys.argv[1:]:
rtTester.AddArgument(arg)
rtTester.AddArgument("-V")
rtTester.AddArgument("TestDataEncoder.png")
# Perform the image comparison test and print out the result.
result = rtTester.RegressionTest("TestDataEncoder.png", 0.0)
if result == 0:
raise Exception("TestDataEncoder failed.")
if __name__ == "__main__":
Testing.main([(TestDataEncoder, 'test')])
| bsd-3-clause |
pku9104038/edx-platform | common/djangoapps/edxmako/shortcuts.py | 1 | 4531 | # Copyright (c) 2008 Mikeal Rogers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.template import Context
from django.http import HttpResponse
import logging
from microsite_configuration.middleware import MicrositeConfiguration
from edxmako import lookup_template
import edxmako.middleware
from django.conf import settings
from django.core.urlresolvers import reverse
log = logging.getLogger(__name__)
def marketing_link(name):
"""Returns the correct URL for a link to the marketing site
depending on if the marketing site is enabled
Since the marketing site is enabled by a setting, we have two
possible URLs for certain links. This function is to decides
which URL should be provided.
"""
# link_map maps URLs from the marketing site to the old equivalent on
# the Django site
link_map = settings.MKTG_URL_LINK_MAP
enable_mktg_site = MicrositeConfiguration.get_microsite_configuration_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
if enable_mktg_site and name in settings.MKTG_URLS:
# special case for when we only want the root marketing URL
if name == 'ROOT':
return settings.MKTG_URLS.get('ROOT')
return settings.MKTG_URLS.get('ROOT') + settings.MKTG_URLS.get(name)
# only link to the old pages when the marketing site isn't on
elif not enable_mktg_site and name in link_map:
# don't try to reverse disabled marketing links
if link_map[name] is not None:
return reverse(link_map[name])
else:
log.warning("Cannot find corresponding link for name: {name}".format(name=name))
return '#'
def marketing_link_context_processor(request):
"""
A django context processor to give templates access to marketing URLs
Returns a dict whose keys are the marketing link names usable with the
marketing_link method (e.g. 'ROOT', 'CONTACT', etc.) prefixed with
'MKTG_URL_' and whose values are the corresponding URLs as computed by the
marketing_link method.
"""
return dict(
[
("MKTG_URL_" + k, marketing_link(k))
for k in (
settings.MKTG_URL_LINK_MAP.viewkeys() |
settings.MKTG_URLS.viewkeys()
)
]
)
def render_to_string(template_name, dictionary, context=None, namespace='main'):
# see if there is an override template defined in the microsite
template_name = MicrositeConfiguration.get_microsite_template_path(template_name)
context_instance = Context(dictionary)
# add dictionary to context_instance
context_instance.update(dictionary or {})
# collapse context_instance to a single dictionary for mako
context_dictionary = {}
context_instance['settings'] = settings
context_instance['EDX_ROOT_URL'] = settings.EDX_ROOT_URL
context_instance['marketing_link'] = marketing_link
# In various testing contexts, there might not be a current request context.
if edxmako.middleware.requestcontext is not None:
for d in edxmako.middleware.requestcontext:
context_dictionary.update(d)
for d in context_instance:
context_dictionary.update(d)
if context:
context_dictionary.update(context)
# fetch and render template
template = lookup_template(namespace, template_name)
return template.render_unicode(**context_dictionary)
def render_to_response(template_name, dictionary=None, context_instance=None, namespace='main', **kwargs):
"""
Returns a HttpResponse whose content is filled with the result of calling
lookup.get_template(args[0]).render with the passed arguments.
"""
# see if there is an override template defined in the microsite
template_name = MicrositeConfiguration.get_microsite_template_path(template_name)
dictionary = dictionary or {}
return HttpResponse(render_to_string(template_name, dictionary, context_instance, namespace), **kwargs)
| agpl-3.0 |
karlito40/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_mock.py | 496 | 5168 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for mock module."""
import Queue
import threading
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from test import mock
class MockConnTest(unittest.TestCase):
"""A unittest for MockConn class."""
def setUp(self):
self._conn = mock.MockConn('ABC\r\nDEFG\r\n\r\nHIJK')
def test_readline(self):
self.assertEqual('ABC\r\n', self._conn.readline())
self.assertEqual('DEFG\r\n', self._conn.readline())
self.assertEqual('\r\n', self._conn.readline())
self.assertEqual('HIJK', self._conn.readline())
self.assertEqual('', self._conn.readline())
def test_read(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n\r\nHI', self._conn.read(9))
self.assertEqual('JK', self._conn.read(10))
self.assertEqual('', self._conn.read(10))
def test_read_and_readline(self):
self.assertEqual('ABC\r\nD', self._conn.read(6))
self.assertEqual('EFG\r\n', self._conn.readline())
self.assertEqual('\r\nHIJK', self._conn.read(9))
self.assertEqual('', self._conn.readline())
def test_write(self):
self._conn.write('Hello\r\n')
self._conn.write('World\r\n')
self.assertEqual('Hello\r\nWorld\r\n', self._conn.written_data())
class MockBlockingConnTest(unittest.TestCase):
"""A unittest for MockBlockingConn class."""
def test_read(self):
"""Tests that data put to MockBlockingConn by put_bytes method can be
read from it.
"""
class LineReader(threading.Thread):
"""A test class that launches a thread, calls readline on the
specified conn repeatedly and puts the read data to the specified
queue.
"""
def __init__(self, conn, queue):
threading.Thread.__init__(self)
self._queue = queue
self._conn = conn
self.setDaemon(True)
self.start()
def run(self):
while True:
data = self._conn.readline()
self._queue.put(data)
conn = mock.MockBlockingConn()
queue = Queue.Queue()
reader = LineReader(conn, queue)
self.failUnless(queue.empty())
conn.put_bytes('Foo bar\r\n')
read = queue.get()
self.assertEqual('Foo bar\r\n', read)
class MockTableTest(unittest.TestCase):
"""A unittest for MockTable class."""
def test_create_from_dict(self):
table = mock.MockTable({'Key': 'Value'})
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_list(self):
table = mock.MockTable([('Key', 'Value')])
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_create_from_tuple(self):
table = mock.MockTable((('Key', 'Value'),))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['key'])
def test_set_and_get(self):
table = mock.MockTable()
self.assertEqual(None, table.get('Key'))
table['Key'] = 'Value'
self.assertEqual('Value', table.get('Key'))
self.assertEqual('Value', table.get('key'))
self.assertEqual('Value', table.get('KEY'))
self.assertEqual('Value', table['Key'])
self.assertEqual('Value', table['key'])
self.assertEqual('Value', table['KEY'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.