code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
The pyflink version will be consistent with the flink version and follow the PEP440.
.. seealso:: https://www.python.org/dev/peps/pep-0440
"""
__version__ = "1.13.dev0"
| aljoscha/flink | flink-python/pyflink/version.py | Python | apache-2.0 | 1,132 |
from __future__ import absolute_import
from collections import namedtuple
# Other useful structs
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
# Define retry policy for async producer
# Limit value: int >= 0, 0 means no retries
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
| mumrah/kafka-python | kafka/structs.py | Python | apache-2.0 | 801 |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.base.action import action
from cloudferrylib.os.actions import snap_transfer
from cloudferrylib.os.actions import task_transfer
from cloudferrylib.utils.drivers import ssh_ceph_to_ceph
from cloudferrylib.utils import rbd_util
from cloudferrylib.utils import utils as utl
import copy
OLD_ID = 'old_id'
class DeployVolSnapshots(action.Action):
def run(self, storage_info=None, identity_info=None, **kwargs):
storage_info = copy.deepcopy(storage_info)
deploy_info = copy.deepcopy(storage_info)
deploy_info.update(identity_info)
storage_info.update(identity_info)
volume_resource = self.cloud.resources[utl.STORAGE_RESOURCE]
for vol_id, vol in deploy_info[utl.VOLUMES_TYPE].iteritems():
if vol['snapshots']:
vol_info = vol[utl.VOLUME_BODY]
snapshots_list = \
[snap_info for snap_info in vol['snapshots'].values()]
snapshots_list.sort(key=lambda x: x['created_at'])
for snap in snapshots_list:
if snapshots_list.index(snap) == 0:
act_snap_transfer = \
snap_transfer.SnapTransfer(
self.init,
ssh_ceph_to_ceph.SSHCephToCeph,
1)
else:
snap_num = snapshots_list.index(snap)
snap['prev_snapname'] = \
snapshots_list[snap_num - 1]['name']
act_snap_transfer = \
snap_transfer.SnapTransfer(
self.init,
ssh_ceph_to_ceph.SSHCephToCeph,
2)
act_snap_transfer.run(volume=vol_info, snapshot_info=snap)
volume_resource.create_snapshot(
volume_id=vol_id,
display_name=snap['display_name'],
display_description=snap['display_description'])
act_snap_transfer = snap_transfer.SnapTransfer(
self.init,
ssh_ceph_to_ceph.SSHCephToCeph,
3)
act_snap_transfer.run(volume=vol_info,
snapshot_info=snapshots_list[-1])
for snap in snapshots_list:
if volume_resource.config.storage.host:
act_delete_redundant_snap = \
rbd_util.RbdUtil(cloud=self.cloud,
config_migrate=self.cfg.migrate,
host=vol_info[utl.HOST_DST])
act_delete_redundant_snap.snap_rm(
vol_info[utl.PATH_DST],
snap['name'])
else:
act_delete_redundant_snap = \
rbd_util.RbdUtil(cloud=self.cloud,
config_migrate=self.cfg.migrate)
act_delete_redundant_snap.snap_rm(
vol_info[utl.PATH_DST],
snap['name'], vol_info[utl.HOST_DST])
else:
one_volume_info = {
'one_volume_info': {
utl.VOLUMES_TYPE: {
vol_id: vol
}
}
}
act_transport_vol_data = \
task_transfer.TaskTransfer(self.init,
'SSHCephToCeph',
input_info='one_volume_info')
act_transport_vol_data.run(**one_volume_info)
return {}
| japaniel/CloudFerry | cloudferrylib/os/actions/deploy_snapshots.py | Python | apache-2.0 | 4,490 |
"""
mbed SDK
Copyright (c) 2011-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Title: GNU ARM Eclipse (http://gnuarmeclipse.github.io) exporter.
Description: Creates a managed build project that can be imported by
the GNU ARM Eclipse plug-ins.
Author: Liviu Ionescu <[email protected]>
"""
import os
import copy
import tempfile
import shutil
import copy
from subprocess import call, Popen, PIPE
from os.path import splitext, basename, relpath, dirname, exists, join, dirname
from random import randint
from json import load
from tools.export.exporters import Exporter
from tools.options import list_profiles
from tools.targets import TARGET_MAP
from tools.utils import NotSupportedException
from tools.build_api import prepare_toolchain
# =============================================================================
class UID:
"""
Helper class, used to generate unique ids required by .cproject symbols.
"""
@property
def id(self):
return "%0.9u" % randint(0, 999999999)
# Global UID generator instance.
# Passed to the template engine, and referred as {{u.id}}.
# Each invocation generates a new number.
u = UID()
# =============================================================================
class GNUARMEclipse(Exporter):
NAME = 'GNU ARM Eclipse'
TOOLCHAIN = 'GCC_ARM'
# Indirectly support all GCC_ARM targets.
TARGETS = [target for target, obj in TARGET_MAP.iteritems()
if 'GCC_ARM' in obj.supported_toolchains]
# override
@property
def flags(self):
"""Returns a dictionary of toolchain flags.
Keys of the dictionary are:
cxx_flags - c++ flags
c_flags - c flags
ld_flags - linker flags
asm_flags - assembler flags
common_flags - common options
The difference from the parent function is that it does not
add macro definitions, since they are passed separately.
"""
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
flags['c_flags'] += self.toolchain.get_config_option(config_header)
flags['cxx_flags'] += self.toolchain.get_config_option(
config_header)
return flags
def toolchain_flags(self, toolchain):
"""Returns a dictionary of toolchain flags.
Keys of the dictionary are:
cxx_flags - c++ flags
c_flags - c flags
ld_flags - linker flags
asm_flags - assembler flags
common_flags - common options
The difference from the above is that it takes a parameter.
"""
# Note: use the config options from the currently selected toolchain.
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in toolchain.flags.iteritems()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
header_options = self.toolchain.get_config_option(config_header)
flags['c_flags'] += header_options
flags['cxx_flags'] += header_options
return flags
# override
def generate(self):
"""
Generate the .project and .cproject files.
"""
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
print
print 'Create a GNU ARM Eclipse C++ managed project'
print 'Project name: {0}'.format(self.project_name)
print 'Target: {0}'.format(self.toolchain.target.name)
print 'Toolchain: {0}'.format(self.TOOLCHAIN)
self.resources.win_to_unix()
# TODO: use some logger to display additional info if verbose
libraries = []
# print 'libraries'
# print self.resources.libraries
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
self.system_libraries = [
'stdc++', 'supc++', 'm', 'c', 'gcc', 'nosys'
]
# Read in all profiles, we'll extract compiler options.
profiles = self.get_all_profiles()
profile_ids = [s.lower() for s in profiles]
profile_ids.sort()
# TODO: get the list from existing .cproject
build_folders = [s.capitalize() for s in profile_ids]
build_folders.append('BUILD')
# print build_folders
objects = [self.filter_dot(s) for s in self.resources.objects]
for bf in build_folders:
objects = [o for o in objects if not o.startswith(bf + '/')]
# print 'objects'
# print objects
self.compute_exclusions()
self.include_path = [
self.filter_dot(s) for s in self.resources.inc_dirs]
print 'Include folders: {0}'.format(len(self.include_path))
self.as_defines = self.toolchain.get_symbols(True)
self.c_defines = self.toolchain.get_symbols()
self.cpp_defines = self.c_defines
print 'Symbols: {0}'.format(len(self.c_defines))
self.ld_script = self.filter_dot(
self.resources.linker_script)
print 'Linker script: {0}'.format(self.ld_script)
self.options = {}
for id in profile_ids:
# There are 4 categories of options, a category common too
# all tools and a specific category for each of the tools.
opts = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = id
opts['name'] = opts['id'].capitalize()
print
print 'Build configuration: {0}'.format(opts['name'])
profile = profiles[id]
profile_toolchain = profile[self.TOOLCHAIN]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
target_name = self.toolchain.target.name
toolchain = prepare_toolchain(
src_paths, target_name, self.TOOLCHAIN, build_profile=profile_toolchain)
# Hack to fill in build_dir
toolchain.build_dir = self.toolchain.build_dir
flags = self.toolchain_flags(toolchain)
print 'Common flags:', ' '.join(flags['common_flags'])
print 'C++ flags:', ' '.join(flags['cxx_flags'])
print 'C flags:', ' '.join(flags['c_flags'])
print 'ASM flags:', ' '.join(flags['asm_flags'])
print 'Linker flags:', ' '.join(flags['ld_flags'])
# Most GNU ARM Eclipse options have a parent,
# either debug or release.
if '-O0' in flags['common_flags'] or '-Og' in flags['common_flags']:
opts['parent_id'] = 'debug'
else:
opts['parent_id'] = 'release'
self.process_options(opts, flags)
opts['as']['defines'] = self.as_defines
opts['c']['defines'] = self.c_defines
opts['cpp']['defines'] = self.cpp_defines
opts['common']['include_paths'] = self.include_path
opts['common']['excluded_folders'] = '|'.join(
self.excluded_folders)
opts['ld']['library_paths'] = [
self.filter_dot(s) for s in self.resources.lib_dirs]
opts['ld']['object_files'] = objects
opts['ld']['user_libraries'] = libraries
opts['ld']['system_libraries'] = self.system_libraries
opts['ld']['script'] = self.ld_script
# Unique IDs used in multiple places.
# Those used only once are implemented with {{u.id}}.
uid = {}
uid['config'] = u.id
uid['tool_c_compiler'] = u.id
uid['tool_c_compiler_input'] = u.id
uid['tool_cpp_compiler'] = u.id
uid['tool_cpp_compiler_input'] = u.id
opts['uid'] = uid
self.options[id] = opts
jinja_ctx = {
'name': self.project_name,
# Compiler & linker command line options
'options': self.options,
# Must be an object with an `id` property, which
# will be called repeatedly, to generate multiple UIDs.
'u': u,
}
# TODO: it would be good to have jinja stop if one of the
# expected context values is not defined.
self.gen_file('gnuarmeclipse/.project.tmpl', jinja_ctx,
'.project', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/.cproject.tmpl', jinja_ctx,
'.cproject', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/makefile.targets.tmpl', jinja_ctx,
'makefile.targets', trim_blocks=True, lstrip_blocks=True)
if not exists('.mbedignore'):
print
print 'Create .mbedignore'
with open('.mbedignore', 'w') as f:
for bf in build_folders:
print bf + '/'
f.write(bf + '/\n')
print
print 'Done. Import the \'{0}\' project in Eclipse.'.format(self.project_name)
# override
@staticmethod
def build(project_name, log_name="build_log.txt", cleanup=True):
"""
Headless build an Eclipse project.
The following steps are performed:
- a temporary workspace is created,
- the project is imported,
- a clean build of all configurations is performed and
- the temporary workspace is removed.
The build results are in the Debug & Release folders.
All executables (eclipse & toolchain) must be in the PATH.
The general method to start a headless Eclipse build is:
$ eclipse \
--launcher.suppressErrors \
-nosplash \
-application org.eclipse.cdt.managedbuilder.core.headlessbuild \
-data /path/to/workspace \
-import /path/to/project \
-cleanBuild "project[/configuration] | all"
"""
# TODO: possibly use the log file.
# Create a temporary folder for the workspace.
tmp_folder = tempfile.mkdtemp()
cmd = [
'eclipse',
'--launcher.suppressErrors',
'-nosplash',
'-application org.eclipse.cdt.managedbuilder.core.headlessbuild',
'-data', tmp_folder,
'-import', os.getcwd(),
'-cleanBuild', project_name
]
p = Popen(' '.join(cmd), shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
stdout_string = "=" * 10 + "STDOUT" + "=" * 10 + "\n"
err_string = "=" * 10 + "STDERR" + "=" * 10 + "\n"
err_string += err
ret_string = "SUCCESS\n"
if ret_code != 0:
ret_string += "FAILURE\n"
print "%s\n%s\n%s\n%s" % (stdout_string, out, err_string, ret_string)
if log_name:
# Write the output to the log file
with open(log_name, 'w+') as f:
f.write(stdout_string)
f.write(out)
f.write(err_string)
f.write(ret_string)
# Cleanup the exported and built files
if cleanup:
if exists(log_name):
os.remove(log_name)
os.remove('.project')
os.remove('.cproject')
if exists('Debug'):
shutil.rmtree('Debug')
if exists('Release'):
shutil.rmtree('Release')
if exists('makefile.targets'):
os.remove('makefile.targets')
# Always remove the temporary folder.
if exists(tmp_folder):
shutil.rmtree(tmp_folder)
if ret_code == 0:
# Return Success
return 0
# Seems like something went wrong.
return -1
# -------------------------------------------------------------------------
@staticmethod
def get_all_profiles():
tools_path = dirname(dirname(dirname(__file__)))
file_names = [join(tools_path, "profiles", fn) for fn in os.listdir(
join(tools_path, "profiles")) if fn.endswith(".json")]
# print file_names
profile_names = [basename(fn).replace(".json", "")
for fn in file_names]
# print profile_names
profiles = {}
for fn in file_names:
content = load(open(fn))
profile_name = basename(fn).replace(".json", "")
profiles[profile_name] = content
return profiles
# -------------------------------------------------------------------------
# Process source files/folders exclusions.
def compute_exclusions(self):
"""
With the project root as the only source folder known to CDT,
based on the list of source files, compute the folders to not
be included in the build.
The steps are:
- get the list of source folders, as dirname(source_file)
- compute the top folders (subfolders of the project folder)
- iterate all subfolders and add them to a tree, with all
nodes markes as 'not used'
- iterate the source folders and mark them as 'used' in the
tree, including all intermediate nodes
- recurse the tree and collect all unused folders; descend
the hierarchy only for used nodes
"""
source_folders = [self.filter_dot(s) for s in set(dirname(
src) for src in self.resources.c_sources + self.resources.cpp_sources + self.resources.s_sources)]
if '.' in source_folders:
source_folders.remove('.')
# print 'source folders'
# print source_folders
# Source folders were converted before and are guaranteed to
# use the POSIX separator.
top_folders = [f for f in set(s.split('/')[0]
for s in source_folders)]
# print 'top folders'
# print top_folders
self.source_tree = {}
for top_folder in top_folders:
for root, dirs, files in os.walk(top_folder, topdown=True):
# print root, dirs, files
# Paths returned by os.walk() must be split with os.dep
# to accomodate Windows weirdness.
parts = root.split(os.sep)
# Ignore paths that include parts starting with dot.
skip = False
for part in parts:
if part.startswith('.'):
skip = True
break
if skip:
continue
# Further process only leaf paths, (that do not have
# sub-folders).
if len(dirs) == 0:
# The path is reconstructed using POSIX separators.
self.add_source_folder_to_tree('/'.join(parts))
for folder in source_folders:
self.add_source_folder_to_tree(folder, True)
# print
# print self.source_tree
# self.dump_paths(self.source_tree)
# self.dump_tree(self.source_tree)
# print 'excludings'
self.excluded_folders = ['BUILD']
self.recurse_excludings(self.source_tree)
print 'Source folders: {0}, with {1} exclusions'.format(len(source_folders), len(self.excluded_folders))
def add_source_folder_to_tree(self, path, is_used=False):
"""
Decompose a path in an array of folder names and create the tree.
On the second pass the nodes should be already there; mark them
as used.
"""
# print path, is_used
# All paths arriving here are guaranteed to use the POSIX
# separators, os.walk() paths were also explicitly converted.
parts = path.split('/')
# print parts
node = self.source_tree
prev = None
for part in parts:
if part not in node.keys():
new_node = {}
new_node['name'] = part
new_node['children'] = {}
if prev != None:
new_node['parent'] = prev
node[part] = new_node
node[part]['is_used'] = is_used
prev = node[part]
node = node[part]['children']
def recurse_excludings(self, nodes):
"""
Recurse the tree and collect all unused folders; descend
the hierarchy only for used nodes.
"""
for k in nodes.keys():
node = nodes[k]
if node['is_used'] == False:
parts = []
cnode = node
while True:
parts.insert(0, cnode['name'])
if 'parent' not in cnode:
break
cnode = cnode['parent']
# Compose a POSIX path.
path = '/'.join(parts)
# print path
self.excluded_folders.append(path)
else:
self.recurse_excludings(node['children'])
# -------------------------------------------------------------------------
@staticmethod
def filter_dot(str):
"""
Remove the './' prefix, if present.
This function assumes that resources.win_to_unix()
replaced all windows backslashes with slashes.
"""
if str == None:
return None
if str[:2] == './':
return str[2:]
return str
# -------------------------------------------------------------------------
def dump_tree(self, nodes, depth=0):
for k in nodes.keys():
node = nodes[k]
parent_name = node['parent'][
'name'] if 'parent' in node.keys() else ''
print ' ' * depth, node['name'], node['is_used'], parent_name
if len(node['children'].keys()) != 0:
self.dump_tree(node['children'], depth + 1)
def dump_paths(self, nodes, depth=0):
for k in nodes.keys():
node = nodes[k]
parts = []
while True:
parts.insert(0, node['name'])
if 'parent' not in node:
break
node = node['parent']
path = '/'.join(parts)
print path, nodes[k]['is_used']
self.dump_paths(nodes[k]['children'], depth + 1)
# -------------------------------------------------------------------------
def process_options(self, opts, flags_in):
"""
CDT managed projects store lots of build options in separate
variables, with separate IDs in the .cproject file.
When the CDT build is started, all these options are brought
together to compose the compiler and linker command lines.
Here the process is reversed, from the compiler and linker
command lines, the options are identified and various flags are
set to control the template generation process.
Once identified, the options are removed from the command lines.
The options that were not identified are options that do not
have CDT equivalents and will be passed in the 'Other options'
categories.
Although this process does not have a very complicated logic,
given the large number of explicit configuration options
used by the GNU ARM Eclipse managed build plug-in, it is tedious...
"""
# Make a copy of the flags, to be one by one removed after processing.
flags = copy.deepcopy(flags_in)
if False:
print
print 'common_flags', flags['common_flags']
print 'asm_flags', flags['asm_flags']
print 'c_flags', flags['c_flags']
print 'cxx_flags', flags['cxx_flags']
print 'ld_flags', flags['ld_flags']
# Initialise the 'last resort' options where all unrecognised
# options will be collected.
opts['as']['other'] = ''
opts['c']['other'] = ''
opts['cpp']['other'] = ''
opts['ld']['other'] = ''
MCPUS = {
'Cortex-M0': {'mcpu': 'cortex-m0', 'fpu_unit': None},
'Cortex-M0+': {'mcpu': 'cortex-m0plus', 'fpu_unit': None},
'Cortex-M1': {'mcpu': 'cortex-m1', 'fpu_unit': None},
'Cortex-M3': {'mcpu': 'cortex-m3', 'fpu_unit': None},
'Cortex-M4': {'mcpu': 'cortex-m4', 'fpu_unit': None},
'Cortex-M4F': {'mcpu': 'cortex-m4', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7': {'mcpu': 'cortex-m7', 'fpu_unit': None},
'Cortex-M7F': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7FD': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv5d16'},
'Cortex-A9': {'mcpu': 'cortex-a9', 'fpu_unit': 'vfpv3'}
}
# Remove options that are supplied by CDT
self.remove_option(flags['common_flags'], '-c')
self.remove_option(flags['common_flags'], '-MMD')
# As 'plan B', get the CPU from the target definition.
core = self.toolchain.target.core
opts['common']['arm.target.family'] = None
# cortex-m0, cortex-m0-small-multiply, cortex-m0plus,
# cortex-m0plus-small-multiply, cortex-m1, cortex-m1-small-multiply,
# cortex-m3, cortex-m4, cortex-m7.
str = self.find_options(flags['common_flags'], '-mcpu=')
if str != None:
opts['common']['arm.target.family'] = str[len('-mcpu='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
else:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
opts['common']['arm.target.family'] = MCPUS[core]['mcpu']
opts['common']['arm.target.arch'] = 'none'
str = self.find_options(flags['common_flags'], '-march=')
arch = str[len('-march='):]
archs = {'armv6-m': 'armv6-m', 'armv7-m': 'armv7-m', 'armv7-a': 'armv7-a'}
if arch in archs:
opts['common']['arm.target.arch'] = archs[arch]
self.remove_option(flags['common_flags'], str)
opts['common']['arm.target.instructionset'] = 'thumb'
if '-mthumb' in flags['common_flags']:
self.remove_option(flags['common_flags'], '-mthumb')
self.remove_option(flags['ld_flags'], '-mthumb')
elif '-marm' in flags['common_flags']:
opts['common']['arm.target.instructionset'] = 'arm'
self.remove_option(flags['common_flags'], '-marm')
self.remove_option(flags['ld_flags'], '-marm')
opts['common']['arm.target.thumbinterwork'] = False
if '-mthumb-interwork' in flags['common_flags']:
opts['common']['arm.target.thumbinterwork'] = True
self.remove_option(flags['common_flags'], '-mthumb-interwork')
opts['common']['arm.target.endianness'] = None
if '-mlittle-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'little'
self.remove_option(flags['common_flags'], '-mlittle-endian')
elif '-mbig-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'big'
self.remove_option(flags['common_flags'], '-mbig-endian')
opts['common']['arm.target.fpu.unit'] = None
# default, fpv4spd16, fpv5d16, fpv5spd16
str = self.find_options(flags['common_flags'], '-mfpu=')
if str != None:
fpu = str[len('-mfpu='):]
fpus = {
'fpv4-sp-d16': 'fpv4spd16',
'fpv5-d16': 'fpv5d16',
'fpv5-sp-d16': 'fpv5spd16'
}
if fpu in fpus:
opts['common']['arm.target.fpu.unit'] = fpus[fpu]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
if opts['common']['arm.target.fpu.unit'] == None:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
if MCPUS[core]['fpu_unit']:
opts['common'][
'arm.target.fpu.unit'] = MCPUS[core]['fpu_unit']
# soft, softfp, hard.
str = self.find_options(flags['common_flags'], '-mfloat-abi=')
if str != None:
opts['common']['arm.target.fpu.abi'] = str[
len('-mfloat-abi='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
opts['common']['arm.target.unalignedaccess'] = None
if '-munaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'enabled'
self.remove_option(flags['common_flags'], '-munaligned-access')
elif '-mno-unaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'disabled'
self.remove_option(flags['common_flags'], '-mno-unaligned-access')
# Default optimisation level for Release.
opts['common']['optimization.level'] = '-Os'
# If the project defines an optimisation level, it is used
# only for the Release configuration, the Debug one used '-Og'.
str = self.find_options(flags['common_flags'], '-O')
if str != None:
levels = {
'-O0': 'none', '-O1': 'optimize', '-O2': 'more',
'-O3': 'most', '-Os': 'size', '-Og': 'debug'
}
if str in levels:
opts['common']['optimization.level'] = levels[str]
self.remove_option(flags['common_flags'], str)
include_files = []
for all_flags in [flags['common_flags'], flags['c_flags'], flags['cxx_flags']]:
while '-include' in all_flags:
ix = all_flags.index('-include')
str = all_flags[ix + 1]
if str not in include_files:
include_files.append(str)
self.remove_option(all_flags, '-include')
self.remove_option(all_flags, str)
opts['common']['include_files'] = include_files
if '-ansi' in flags['c_flags']:
opts['c']['compiler.std'] = '-ansi'
self.remove_option(flags['c_flags'], str)
else:
str = self.find_options(flags['c_flags'], '-std')
std = str[len('-std='):]
c_std = {
'c90': 'c90', 'c89': 'c90', 'gnu90': 'gnu90', 'gnu89': 'gnu90',
'c99': 'c99', 'c9x': 'c99', 'gnu99': 'gnu99', 'gnu9x': 'gnu98',
'c11': 'c11', 'c1x': 'c11', 'gnu11': 'gnu11', 'gnu1x': 'gnu11'
}
if std in c_std:
opts['c']['compiler.std'] = c_std[std]
self.remove_option(flags['c_flags'], str)
if '-ansi' in flags['cxx_flags']:
opts['cpp']['compiler.std'] = '-ansi'
self.remove_option(flags['cxx_flags'], str)
else:
str = self.find_options(flags['cxx_flags'], '-std')
std = str[len('-std='):]
cpp_std = {
'c++98': 'cpp98', 'c++03': 'cpp98',
'gnu++98': 'gnucpp98', 'gnu++03': 'gnucpp98',
'c++0x': 'cpp0x', 'gnu++0x': 'gnucpp0x',
'c++11': 'cpp11', 'gnu++11': 'gnucpp11',
'c++1y': 'cpp1y', 'gnu++1y': 'gnucpp1y',
'c++14': 'cpp14', 'gnu++14': 'gnucpp14',
'c++1z': 'cpp1z', 'gnu++1z': 'gnucpp1z',
}
if std in cpp_std:
opts['cpp']['compiler.std'] = cpp_std[std]
self.remove_option(flags['cxx_flags'], str)
# Common optimisation options.
optimization_options = {
'-fmessage-length=0': 'optimization.messagelength',
'-fsigned-char': 'optimization.signedchar',
'-ffunction-sections': 'optimization.functionsections',
'-fdata-sections': 'optimization.datasections',
'-fno-common': 'optimization.nocommon',
'-fno-inline-functions': 'optimization.noinlinefunctions',
'-ffreestanding': 'optimization.freestanding',
'-fno-builtin': 'optimization.nobuiltin',
'-fsingle-precision-constant': 'optimization.spconstant',
'-fPIC': 'optimization.PIC',
'-fno-move-loop-invariants': 'optimization.nomoveloopinvariants',
}
for option in optimization_options:
opts['common'][optimization_options[option]] = False
if option in flags['common_flags']:
opts['common'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common warning options.
warning_options = {
'-fsyntax-only': 'warnings.syntaxonly',
'-pedantic': 'warnings.pedantic',
'-pedantic-errors': 'warnings.pedanticerrors',
'-w': 'warnings.nowarn',
'-Wunused': 'warnings.unused',
'-Wuninitialized': 'warnings.uninitialized',
'-Wall': 'warnings.allwarn',
'-Wextra': 'warnings.extrawarn',
'-Wmissing-declarations': 'warnings.missingdeclaration',
'-Wconversion': 'warnings.conversion',
'-Wpointer-arith': 'warnings.pointerarith',
'-Wpadded': 'warnings.padded',
'-Wshadow': 'warnings.shadow',
'-Wlogical-op': 'warnings.logicalop',
'-Waggregate-return': 'warnings.agreggatereturn',
'-Wfloat-equal': 'warnings.floatequal',
'-Werror': 'warnings.toerrors',
}
for option in warning_options:
opts['common'][warning_options[option]] = False
if option in flags['common_flags']:
opts['common'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common debug options.
debug_levels = {
'-g': 'default',
'-g1': 'minimal',
'-g3': 'max',
}
opts['common']['debugging.level'] = 'none'
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.level'] = debug_levels[option]
self.remove_option(flags['common_flags'], option)
debug_formats = {
'-ggdb': 'gdb',
'-gstabs': 'stabs',
'-gstabs+': 'stabsplus',
'-gdwarf-2': 'dwarf2',
'-gdwarf-3': 'dwarf3',
'-gdwarf-4': 'dwarf4',
'-gdwarf-5': 'dwarf5',
}
opts['common']['debugging.format'] = ''
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.format'] = debug_formats[option]
self.remove_option(flags['common_flags'], option)
opts['common']['debugging.prof'] = False
if '-p' in flags['common_flags']:
opts['common']['debugging.prof'] = True
self.remove_option(flags['common_flags'], '-p')
opts['common']['debugging.gprof'] = False
if '-pg' in flags['common_flags']:
opts['common']['debugging.gprof'] = True
self.remove_option(flags['common_flags'], '-gp')
# Assembler options.
opts['as']['usepreprocessor'] = False
while '-x' in flags['asm_flags']:
ix = flags['asm_flags'].index('-x')
str = flags['asm_flags'][ix + 1]
if str == 'assembler-with-cpp':
opts['as']['usepreprocessor'] = True
else:
# Collect all other assembler options.
opts['as']['other'] += ' -x ' + str
self.remove_option(flags['asm_flags'], '-x')
self.remove_option(flags['asm_flags'], 'assembler-with-cpp')
opts['as']['nostdinc'] = False
if '-nostdinc' in flags['asm_flags']:
opts['as']['nostdinc'] = True
self.remove_option(flags['asm_flags'], '-nostdinc')
opts['as']['verbose'] = False
if '-v' in flags['asm_flags']:
opts['as']['verbose'] = True
self.remove_option(flags['asm_flags'], '-v')
# C options.
opts['c']['nostdinc'] = False
if '-nostdinc' in flags['c_flags']:
opts['c']['nostdinc'] = True
self.remove_option(flags['c_flags'], '-nostdinc')
opts['c']['verbose'] = False
if '-v' in flags['c_flags']:
opts['c']['verbose'] = True
self.remove_option(flags['c_flags'], '-v')
warning_options = {
'-Wmissing-prototypes': 'warnings.missingprototypes',
'-Wstrict-prototypes': 'warnings.strictprototypes',
'-Wbad-function-cast': 'warnings.badfunctioncast',
}
for option in warning_options:
opts['c'][warning_options[option]] = False
if option in flags['common_flags']:
opts['c'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# C++ options.
opts['cpp']['nostdinc'] = False
if '-nostdinc' in flags['cxx_flags']:
opts['cpp']['nostdinc'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc')
opts['cpp']['nostdincpp'] = False
if '-nostdinc++' in flags['cxx_flags']:
opts['cpp']['nostdincpp'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc++')
optimization_options = {
'-fno-exceptions': 'optimization.noexceptions',
'-fno-rtti': 'optimization.nortti',
'-fno-use-cxa-atexit': 'optimization.nousecxaatexit',
'-fno-threadsafe-statics': 'optimization.nothreadsafestatics',
}
for option in optimization_options:
opts['cpp'][optimization_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
warning_options = {
'-Wabi': 'warnabi',
'-Wctor-dtor-privacy': 'warnings.ctordtorprivacy',
'-Wnoexcept': 'warnings.noexcept',
'-Wnon-virtual-dtor': 'warnings.nonvirtualdtor',
'-Wstrict-null-sentinel': 'warnings.strictnullsentinel',
'-Wsign-promo': 'warnings.signpromo',
'-Weffc++': 'warneffc',
}
for option in warning_options:
opts['cpp'][warning_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
opts['cpp']['verbose'] = False
if '-v' in flags['cxx_flags']:
opts['cpp']['verbose'] = True
self.remove_option(flags['cxx_flags'], '-v')
# Linker options.
linker_options = {
'-nostartfiles': 'nostart',
'-nodefaultlibs': 'nodeflibs',
'-nostdlib': 'nostdlibs',
}
for option in linker_options:
opts['ld'][linker_options[option]] = False
if option in flags['ld_flags']:
opts['ld'][linker_options[option]] = True
self.remove_option(flags['ld_flags'], option)
opts['ld']['gcsections'] = False
if '-Wl,--gc-sections' in flags['ld_flags']:
opts['ld']['gcsections'] = True
self.remove_option(flags['ld_flags'], '-Wl,--gc-sections')
opts['ld']['flags'] = []
to_remove = []
for opt in flags['ld_flags']:
if opt.startswith('-Wl,--wrap,'):
opts['ld']['flags'].append(
'--wrap=' + opt[len('-Wl,--wrap,'):])
to_remove.append(opt)
for opt in to_remove:
self.remove_option(flags['ld_flags'], opt)
# Other tool remaining options are separated by category.
opts['as']['otherwarnings'] = self.find_options(
flags['asm_flags'], '-W')
opts['c']['otherwarnings'] = self.find_options(
flags['c_flags'], '-W')
opts['c']['otheroptimizations'] = self.find_options(flags[
'c_flags'], '-f')
opts['cpp']['otherwarnings'] = self.find_options(
flags['cxx_flags'], '-W')
opts['cpp']['otheroptimizations'] = self.find_options(
flags['cxx_flags'], '-f')
# Other common remaining options are separated by category.
opts['common']['optimization.other'] = self.find_options(
flags['common_flags'], '-f')
opts['common']['warnings.other'] = self.find_options(
flags['common_flags'], '-W')
# Remaining common flags are added to each tool.
opts['as']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['asm_flags'])
opts['c']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['c_flags'])
opts['cpp']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['cxx_flags'])
opts['ld']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['ld_flags'])
if len(self.system_libraries) > 0:
opts['ld']['other'] += ' -Wl,--start-group '
opts['ld'][
'other'] += ' '.join('-l' + s for s in self.system_libraries)
opts['ld']['other'] += ' -Wl,--end-group '
# Strip all 'other' flags, since they might have leading spaces.
opts['as']['other'] = opts['as']['other'].strip()
opts['c']['other'] = opts['c']['other'].strip()
opts['cpp']['other'] = opts['cpp']['other'].strip()
opts['ld']['other'] = opts['ld']['other'].strip()
if False:
print
print opts
print
print 'common_flags', flags['common_flags']
print 'asm_flags', flags['asm_flags']
print 'c_flags', flags['c_flags']
print 'cxx_flags', flags['cxx_flags']
print 'ld_flags', flags['ld_flags']
@staticmethod
def find_options(lst, option):
tmp = [str for str in lst if str.startswith(option)]
if len(tmp) > 0:
return tmp[0]
else:
return None
@staticmethod
def find_options(lst, prefix):
other = ''
opts = [str for str in lst if str.startswith(prefix)]
if len(opts) > 0:
for opt in opts:
other += ' ' + opt
GNUARMEclipse.remove_option(lst, opt)
return other.strip()
@staticmethod
def remove_option(lst, option):
if option in lst:
lst.remove(option)
# =============================================================================
| adustm/mbed | tools/export/gnuarmeclipse/__init__.py | Python | apache-2.0 | 40,515 |
class Post(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __iter__(self):
return iter(self.__dict__)
| emitrom/integra-openstack-ui | workflows/post/post.py | Python | apache-2.0 | 151 |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-10 23:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_auto_20190430_1254'),
]
operations = [
migrations.AddField(
model_name='backupoperation_decl',
name='uuid',
field=models.CharField(blank=True, help_text=b'unique identifer of this request', max_length=80, null=True),
),
]
| opencord/xos | xos/core/migrations/0012_backupoperation_decl_uuid.py | Python | apache-2.0 | 1,124 |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime
import json
import os
import time
from utils import getUserId
from utils import validate_websafe_key
from utils import ndb_to_message
from utils import message_to_ndb
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Session
from models import SessionForm
from models import SessionCreateForm
from models import SessionForms
from models import SessionType
from models import Speaker
from models import SpeakerForm
from models import SpeakerCreateForm
from models import SpeakerForms
from models import WishList
from models import WishListForm
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
# - - - Conference Defaults - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [
"Default",
"Topic"
],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT ANNOUNCEMENTS"
CONF_POST_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
register=messages.BooleanField(2))
CONF_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1))
SESSION_POST_REQUEST = endpoints.ResourceContainer(SessionCreateForm,
websafeConferenceKey=messages.StringField(1))
SESSIONS_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
sessionType=messages.StringField(2))
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
websafeSpeakerKey=messages.StringField(1))
WISHLIST_PUT_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage,
add=messages.StringField(1), remove=messages.StringField(2))
SPEAKER_QUERY_BY_NAME = endpoints.ResourceContainer(message_types.VoidMessage,
firstName=messages.StringField(1), lastName=messages.StringField(2))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name,
getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if
non-existent."""
## TODO 2
## step 1: make sure user is authed
## uncomment the following lines:
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
## step 2: create a new Profile from logged in user data
## you can use user.nickname() to get displayName
## and user.email() to get mainEmail
if not profile:
profile = Profile(userId=None, key=p_key,
displayName=user.nickname(), mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED), )
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm, path='profile',
http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
# TODO 1
# 1. change request class
# 2. pass request to _doProfile function
@endpoints.method(ProfileMiniForm, ProfileForm, path='profile',
http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning
ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in
request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10],
"%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10],
"%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# Look for TODO 2
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(
params={'email': user.email(), 'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
""" Gets details about the specified conference """
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conference = conf_key.get()
return self._copyConferenceToForm(conference, "")
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in
f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous
# filters
# disallow the filter if inequality was performed on a
# different field before
# track the field on which the inequality operation is
# performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences', http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in
conferences])
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated', http_method='GET',
name='getConferencesCreated')
def getConferencesCreated(self, request):
""" Get a list of all the conferences created by the logged in user """
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
profile_key = ndb.Key(Profile, user_id)
conferences = Conference.query(ancestor=profile_key)
profile = profile_key.get()
display_name = getattr(profile, 'displayName')
return ConferenceForms(
items=[self._copyConferenceToForm(conf, display_name) for conf in
conferences])
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException("There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(CONF_POST_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
if request.register == False:
return self._conferenceRegistration(request, False)
else:
return self._conferenceRegistration(request)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending', http_method='GET',
name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
# TODO:
# step 1: get user profile
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorizaton required.')
profile_key = ndb.Key(Profile, getUserId(user))
profile = profile_key.get()
# step 2: get conferenceKeysToAttend from profile.
# to make a ndb key from websafe key you can use:
# ndb.Key(urlsafe=my_websafe_key_string)
conferenceWsKeysToAttend = profile.conferenceKeysToAttend
conferenceKeysToAttend = []
for wsKey in conferenceWsKeysToAttend:
key = ndb.Key(urlsafe=wsKey)
conferenceKeysToAttend.append(key)
# step 3: fetch conferences from datastore.
# Use get_multi(array_of_keys) to fetch all keys at once.
# Do not fetch them one by one!
conferences = ndb.get_multi(conferenceKeysToAttend)
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in
conferences])
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)).fetch(
projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:', ', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get', http_method='GET',
name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
# TODO 1
# return an existing announcement from Memcache or an empty string.
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if announcement is None:
announcement = ""
return StringMessage(data=announcement)
# - - - Sessions - - - - - - - - - - - - - - - - - - - - - - - - -
def _getSessionFormsFromWsKeys(self, ws_session_keys):
"""
Returns a list of sessions as a session_forms message given an array of
websafe session keys
:param ws_session_keys: String, websafe session keys
:return: session_forms
"""
session_keys = []
for ws_session_key in ws_session_keys:
session_key = ndb.Key(urlsafe=ws_session_key)
session_keys.append(session_key)
sessions = ndb.get_multi(session_keys)
session_forms = SessionForms(
items=[self._copySessionToForm(session) for session in sessions])
return session_forms
def _copySessionToForm(self, session):
"""
Converts a session object into a session_form message
:param session: A session object
:return: session_form
"""
session_form = SessionForm()
exclusions = ['typeOfSession', 'speaker']
session_form = ndb_to_message(session, session_form, exclusions)
if session.typeOfSession:
session_form.typeOfSession = SessionType(session.typeOfSession)
if session.speaker:
session_form.speaker = self._getSpeakerFormFromWsKey(
session.speaker)
session_form.check_initialized()
return session_form
def _createSessionObject(self, request):
"""
:param request: the endpoint request
:return: session_form, message of the newly created session
"""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required.')
user_id = getUserId(user)
# make sure we're given a websafe conference key
conference_key = validate_websafe_key(request.websafeConferenceKey,
'Conference')
# if we're given a websafe speaker key, make sure it's valid
if request.speaker:
validate_websafe_key(request.speaker, 'Speaker')
# get the conference
conference = conference_key.get()
# make sure the user can edit this conference
if conference.organizerUserId != user_id:
raise endpoints.BadRequestException(
'You cannot edit this conference.')
# create a session object
session = Session()
# list the fields we want to exclude
exclusions = ['websafeConferenceKey', 'typeOfSession']
# use our handy copy function to copy the other fields
session = message_to_ndb(request, session, exclusions)
# deal with typeOfSession and get the enum value
if request.typeOfSession:
session.typeOfSession = str(SessionType(request.typeOfSession))
else:
session.typeOfSession = str(SessionType.NOT_SPECIFIED)
# allocate an id and create the key
session_id = Session.allocate_ids(size=1, parent=conference_key)[0]
session.key = ndb.Key(Session, session_id, parent=conference_key)
# save the session to ndb
session.put()
# kick off the featured speaker task
taskqueue.add(url='/tasks/set_featured_speaker',
params={'conference_key': conference_key.urlsafe()})
# return the newly created session
return self._copySessionToForm(session)
@endpoints.method(SESSION_POST_REQUEST, SessionForm,
path='conference/{websafeConferenceKey}/session',
http_method='POST', name='createConferenceSession')
def createConferenceSession(self, request):
""" Create a session for a conference. """
return self._createSessionObject(request)
@endpoints.method(SESSIONS_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/session',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
""" Get the list of sessions for a conference. """
conference_key = validate_websafe_key(request.websafeConferenceKey,
'Conference')
# Get all the sessions where the provided Conference is the ancestor
q = Session.query(ancestor=conference_key)
# If sessionType is provided as a query string, apply as a filter
if request.sessionType:
session_type = request.sessionType.upper()
if hasattr(SessionType, session_type):
q = q.filter(Session.typeOfSession == session_type)
# return the list of sessions
q = q.order(Session.startTime)
return SessionForms(
items=[self._copySessionToForm(session) for session in q])
# - - - - SPEAKERS - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _copySpeakerToForm(self, speaker):
""" Copies the fields from a Speaker object to a Speaker message """
speaker_form = SpeakerForm()
speaker_form = ndb_to_message(speaker, speaker_form)
speaker_form.check_initialized()
return speaker_form
def _getSpeakerFormFromWsKey(self, ws_speaker_key):
"""
Returns a Speaker message given a websafe Speaker key.
:param ws_speaker_key: String, websafe Speaker key
:return: speaker_form
"""
# if ndb.Key(urlsafe=ws_speaker_key).kind() != 'Speaker':
# raise endpoints.BadRequestException('Invalid speaker provided.')
speaker_key = ndb.Key(urlsafe=ws_speaker_key)
speaker = speaker_key.get()
speaker_form = self._copySpeakerToForm(speaker)
return speaker_form
@endpoints.method(SpeakerCreateForm, SpeakerForm, path='speaker',
http_method='POST', name='createSpeaker')
def createSpeaker(self, request):
""" Create a speaker """
# Make sure the user is logged in.
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required.')
user_id = getUserId(user)
user_key = ndb.Key(Profile, user_id)
# Create the Speaker object
speaker = Speaker()
# Copy the fields from the request to the Speaker
speaker = message_to_ndb(request, speaker)
# Allocate the Speaker id and set the key with the User as parent
speaker_id = speaker.allocate_ids(size=1, parent=user_key)[0]
speaker.key = ndb.Key(Speaker, speaker_id, parent=user_key)
# Write the speaker to the db
speaker.put()
# Create a SpeakerForm and copy the fields from the request
speaker_form = SpeakerForm()
speaker_form = ndb_to_message(speaker, speaker_form)
# Send back the SpeakerForm including the websafe key
return speaker_form
@endpoints.method(SPEAKER_GET_REQUEST, SpeakerForm,
path='speaker/{websafeSpeakerKey}', http_method='GET',
name='getSpeaker')
def getSpeaker(self, request):
""" Get all the information about a speaker. """
# validate the websafe speaker key and retrieve the entity key
speaker_key = validate_websafe_key(request.websafeSpeakerKey,
'Speaker')
# get the speaker from the db
speaker = speaker_key.get()
# return a message object with the speaker info
return self._copySpeakerToForm(speaker)
@endpoints.method(message_types.VoidMessage, SpeakerForms, path='speaker',
http_method='GET', name='getSpeakerList')
def getSpeakerList(self, request):
""" List all of the speakers. """
q = Speaker.query()
# Order the speakers by last name then first name
q = q.order(Speaker.lastName)
q = q.order(Speaker.firstName)
speakers = q.fetch()
# return the list of speakers
return SpeakerForms(
items=[self._copySpeakerToForm(speaker) for speaker in speakers])
@endpoints.method(SPEAKER_GET_REQUEST, SessionForms,
path='speakers/{websafeSpeakerKey}/sessions',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
""" Get the sessions at which a speaker is speaker across all
Conferences.
"""
# Validate the websafe speaker key and retrieve the entity key
speaker_key = validate_websafe_key(request.websafeSpeakerKey,
'Speaker')
# query sessions where the speaker is the requested speaker
q = Session.query()
q = q.filter(Session.speaker == speaker_key.urlsafe())
sessions = q.fetch()
# return the list of sessions
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions])
# - - - - Wish List - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _createWishlist(self, user_key):
''' Creates a wishlist for a user '''
wishlist_id = WishList.allocate_ids(size=1, parent=user_key)[0]
wishlist_key = ndb.Key(WishList, wishlist_id, parent=user_key)
wishlist = WishList()
wishlist.key = wishlist_key
return wishlist
def _copyWishListToForm(self, wishlist):
''' Creates a message from a wishlist '''
wishlist_form = WishListForm()
wishlist_form.sessions = self._getSessionFormsFromWsKeys(
wishlist.sessions)
return wishlist_form
@endpoints.method(CONF_GET_REQUEST, WishListForm,
path='user/wishlist/{websafeConferenceKey}',
http_method='GET',
name='getSessionsInWishlistByConference')
def getSessionsInWishlistByConference(self, request):
""" List the wishlist items for the specified conference. """
# validate the websafe conference key
conference_key = validate_websafe_key(request.websafeConferenceKey,
'Conference')
# confirm the user
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required.')
user_id = getUserId(user)
user_key = ndb.Key(Profile, user_id)
# get the user's wishlist sessions as a projection
q_wishlist = WishList.query(ancestor=user_key)
# wl_sessions = q_wishlist.fetch(1, projection=[WishList.sessions])
wishlist = q_wishlist.fetch(1)[0]
wishlist_session_keys = []
# for session in wl_sessions:
for session in wishlist.sessions:
wishlist_session_keys.append(ndb.Key(urlsafe=session))
# query Sessions where the specified Conference is the ancestor
session_q = Session.query(ancestor=conference_key)
# filter the Sessions to include only the sessions in the wishlist
session_q = session_q.filter(Session.key.IN(wishlist_session_keys))
# get the keys of those sessions, which are the ones we're looking for
conf_session_keys = session_q.fetch(keys_only=True)
# create a wishlist
short_wishlist = WishList()
# copy the found Session keys into the wishlist as websafe keys
for key in conf_session_keys:
short_wishlist.sessions.append(key.urlsafe())
# return the reduced wishlist as a message
return self._copyWishListToForm(short_wishlist)
@endpoints.method(WISHLIST_PUT_REQUEST, WishListForm, path='user/wishlist',
http_method='PUT', name='updateWishlist')
def updateWishlist(self, request):
""" Add or remove sessions to the logged in user's wishlist """
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required.')
# Validate the websafe session key to add
ws_add_key = None
ws_remove_key = None
if request.add:
ws_add_key = validate_websafe_key(request.add, 'Session', False)
# Validate the websafe session key to remove
if request.remove:
ws_remove_key = validate_websafe_key(request.remove, 'Session',
False)
# Get the user wishlist
user_id = getUserId(user)
user_key = ndb.Key(Profile, user_id)
wishlist = WishList.query(ancestor=user_key).fetch(1)
# If there wasn't previously a wishlist, create it
if not wishlist:
wishlist = self._createWishlist(user_key)
else:
wishlist = wishlist[0]
# If there is a session to add, add it
if ws_add_key:
if ws_add_key not in wishlist.sessions:
wishlist.sessions.append(ws_add_key)
# If there is a session to remove, remove it
if ws_remove_key:
if ws_remove_key in wishlist.sessions:
wishlist.sessions.remove(ws_remove_key)
# Save the wishlist to db
wishlist.put()
# Create a message of the newly created wishlist
wishlist_form = self._copyWishListToForm(wishlist)
return wishlist_form
@endpoints.method(message_types.VoidMessage, WishListForm,
path='user/wishlist', http_method='GET',
name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
""" List all of the sessions in the logged in user's wishlist """
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required.')
user_id = getUserId(user)
user_key = ndb.Key(Profile, user_id)
# Get the user's wishlist
wishlist = WishList.query(ancestor=user_key).fetch(1)
if wishlist:
wishlist = wishlist[0]
# Return the wishlist
return self._copyWishListToForm(wishlist)
# - - - - FEATURED SPEAKER - - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _get_conf_featured_speaker(conference_key):
# Get all the sessions for a Conference ordered by created datetime
q = Session.query(ancestor=conference_key)
q = q.order(-Session.created_at)
# Just need the speaker websafe key
sessions = q.fetch(projection=Session.speaker)
# Count the sessions for each speaker
speaker_counter = {}
for session in sessions:
if session.speaker:
if session.speaker not in speaker_counter:
speaker_counter[session.speaker] = 1
else:
speaker_counter[session.speaker] += 1
# Find the maximum number of times a speaker is speaking
if not speaker_counter:
return None
max_speaker_count = max(speaker_counter.values())
# Get all the speakers who are speaking the max number of times
max_speakers = [key for key in speaker_counter.keys() if
speaker_counter[key] == max_speaker_count]
# featured_speaker_ws_key = ''
featured_speaker_ws_key = None
# If there is only one speaker, that's our featured speaker
if len(max_speakers) == 1:
featured_speaker_ws_key = max_speakers[0]
# Else, cycle through the sessions and get the first speaker who
# is in the set of speakers speaking the max number of times
else:
for session in sessions:
if session.speaker in max_speakers:
featured_speaker_ws_key = session.speaker
break
if featured_speaker_ws_key:
return featured_speaker_ws_key
else:
return None
@staticmethod
def _featured_speaker_memcache_key(conference_key):
# Create and return a memcache key for the featured speaker
memcache_key = '-'.join(("feature-speaker", str(conference_key)))
return memcache_key
@classmethod
def _cache_featured_speaker(cls, ws_conference_key):
# Get the conference key from the websafe key
conference_key = ndb.Key(urlsafe=ws_conference_key)
# Get the featured speaker
featured_speaker = cls._get_conf_featured_speaker(conference_key)
# Get the memcache key
memcache_key = cls._featured_speaker_memcache_key(ws_conference_key)
# If there is a featured speaker, set the message and save it to cache
if featured_speaker:
speaker_key = ndb.Key(urlsafe=featured_speaker)
speaker = speaker_key.get()
speaker_name = ' '.join((speaker.firstName, speaker.lastName))
message = '%s %s' % (speaker_name, 'is the featured speaker.')
memcache.set(memcache_key, message)
else:
message = ""
memcache.delete(memcache_key)
return message
@classmethod
def _cache_featured_speakers(cls):
# Get the keys for all conferences
conferences = Conference.query().fetch(keys_only=True)
# For each Conference websafe key, cache the featured speaker
for conference in conferences:
if conference:
cls._cache_featured_speaker(conference.urlsafe())
return
@endpoints.method(CONF_GET_REQUEST, StringMessage,
path='conference/{websafeConferenceKey}/featuredSpeaker',
http_method='GET', name='getConferenceFeaturedSpeaker')
def getConferenceFeaturedSpeaker(self, request):
""" Gets the featured speaker for a conference """
ws_conference_key = validate_websafe_key(request.websafeConferenceKey,
'Conference', False)
# Get the memcache key we're looking for
memcache_key = self._featured_speaker_memcache_key(ws_conference_key)
# retrieve the message from memcache
message = memcache.get(memcache_key)
# If there is a message, return it
if message is None:
message = ""
return StringMessage(data=message)
# - - - - Query Problem - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/queryProblem',
http_method='GET', name='conferenceQueryProblem')
def conferenceQueryProblem(self, request):
""" Returns sessions before 7pm that are not workshops """
# Validate the websafe conference key and retrieve the entity key
conference_key = validate_websafe_key(request.websafeConferenceKey,
'Conference')
# Query for all sessions which are children of the conference
q = Session.query(ancestor=conference_key)
# Filter for startTime less than 7pm (19:00)
startTimeFilter = datetime.strptime('19:00:00', '%H:%M:%S').time()
q = q.filter(Session.startTime < startTimeFilter)
q = q.filter(Session.startTime != None)
q = q.order(Session.startTime)
# Get the result with a projection of typeOfSession
earlySessions = q.fetch(projection=[Session.typeOfSession])
# Iterate through the results and keep only non-workshop results
keys = [s.key for s in earlySessions if s.typeOfSession != 'WORKSHOP']
# Get the db results for the reduced set of keys
sessions = ndb.get_multi(keys)
# Return the result as a list of sessions
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions])
# - - - - Additional Queries - - - - - - - - - - - - - - - - - - - - - -
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conference/query/seatsAvailable',
http_method='GET',
name='getConferencesWithSpace')
def getConferencesWithSpace(self, request):
''' Get a list of conferences with seats available. '''
q = Conference.query()
q = q.filter(Conference.seatsAvailable > 0)
q = q.order(Conference.seatsAvailable)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q])
@endpoints.method(SPEAKER_QUERY_BY_NAME, SessionForms,
path='session/query/byName',
http_method='POST',
name='getSessionsBySpeakerName')
def getSessionsBySpeakerName(self, request):
""" Get a list of sessions by speaker first and/or last name. """
first_name = request.firstName
last_name = request.lastName
if not first_name and not last_name:
return SessionForms(items=[])
# query speakers for first name and last name, if provided
speaker_q = Speaker.query()
if first_name:
speaker_q = speaker_q.filter(Speaker.firstName == first_name)
if last_name:
speaker_q = speaker_q.filter(Speaker.lastName == last_name)
speaker_keys = speaker_q.fetch(keys_only=True)
# convert the speaker keys to websafe speaker keys
ws_speaker_keys = []
for key in speaker_keys:
ws_speaker_keys.append(key.urlsafe())
# query the sessions that have one of these websafe speaker keys
session_q = Session.query()
session_q = session_q.filter(Session.speaker.IN(ws_speaker_keys))
# return the sessions
return SessionForms(items=[self._copySessionToForm(session) for
session in session_q])
# registers API
api = endpoints.api_server([ConferenceApi])
| kirklink/udacity-fullstack-p4 | conference.py | Python | apache-2.0 | 40,994 |
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
from neutron.agent.common import config
from neutron.agent.common import ovs_lib
from neutron.common import constants
from networking_vsphere.drivers import ovs_firewall as ovs_fw
from networking_vsphere.tests import base
fake_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'sg_provider_rules': [],
'security_group_rules': [
{"direction": "ingress",
"protocol": "tcp",
"port_range_min": 2001,
"port_range_max": 2009,
"source_port_range_min": 67,
"source_port_range_max": 68,
"ethertype": "IPv4",
"source_ip_prefix": "150.1.1.0/22",
"dest_ip_prefix": "170.1.1.0/22"}]}
fake_res_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'security_groups': "abc",
'lvid': "100",
'device': "123"}
cookie = ("0x%x" % (hash("123") & 0xffffffffffffffff))
class TestOVSFirewallDriver(base.TestCase):
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'check_ovs_firewall_restart')
@mock.patch('networking_vsphere.drivers.ovs_firewall.OVSFirewallDriver.'
'setup_base_flows')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.create')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.set_secure_mode')
@mock.patch('neutron.agent.common.ovs_lib.OVSBridge.get_port_ofport')
@mock.patch('neutron.agent.ovsdb.api.'
'API.get')
def setUp(self, mock_ovsdb_api, mock_get_port_ofport, mock_set_secure_mode,
mock_create_ovs_bridge, mock_setup_base_flows,
mock_check_ovs_firewall_restart,):
super(TestOVSFirewallDriver, self).setUp()
config.register_root_helper(cfg.CONF)
cfg.CONF.set_override('security_bridge_mapping',
"fake_sec_br:fake_if", 'SECURITYGROUP')
mock_get_port_ofport.return_value = 5
self.ovs_firewall = ovs_fw.OVSFirewallDriver()
self.ovs_firewall.sg_br = mock.Mock()
self.mock_br = ovs_lib.DeferredOVSBridge(self.ovs_firewall.sg_br)
self.LOG = ovs_fw.LOG
def test_get_compact_port(self):
compact_port = {'security_group_source_groups': 'abc',
'mac_address': '00:11:22:33:44:55',
'network_id': "netid",
'id': "123",
'device': "123",
'security_groups': "abc",
'lvid': "100"}
res = self.ovs_firewall._get_compact_port(fake_port)
self.assertEqual(compact_port, res)
def test_remove_ports_from_provider_cache(self):
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['123', '125'])
self.assertEqual(set(['124']), self.ovs_firewall.provider_port_cache)
self.ovs_firewall.provider_port_cache = set(['123', '124', '125'])
self.ovs_firewall.remove_ports_from_provider_cache(['121', '125'])
self.assertEqual(set(['123', '124']),
self.ovs_firewall.provider_port_cache)
def test_add_ovs_flow(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal")
mock_add_flow.assert_called_with(priority=0, actions='normal',
table=1)
def test_add_ovs_flow_with_protocol(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with protocol
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
protocol="arp")
mock_add_flow.assert_called_with(table=1, priority=0,
proto="arp", actions="normal")
def test_add_ovs_flow_with_dest_mac(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with dl_dest
dest_mac = "01:00:00:00:00:00"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
dl_dest=dest_mac)
mock_add_flow.assert_called_with(table=1, priority=0,
dl_dst=dest_mac,
actions="normal")
def test_add_ovs_flow_with_tcpflag(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with tcp_flags
t_flag = "+rst"
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
tcp_flag=t_flag)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_TCP,
tcp_flags=t_flag,
actions="normal")
def test_add_ovs_flow_with_icmptype(self):
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
# rule with icmp_req_type
self.ovs_firewall._add_ovs_flow(self.mock_br, 0, 1, "normal",
icmp_req_type=11)
mock_add_flow.assert_called_with(table=1, priority=0,
proto=constants.PROTO_NAME_ICMP,
icmp_type=11, actions="normal")
def test_add_ports_to_filter(self):
self.ovs_firewall.filtered_ports = {}
self.ovs_firewall.add_ports_to_filter([fake_port])
self.assertIsNotNone(self.ovs_firewall.filtered_ports)
ret_port = self.ovs_firewall.filtered_ports["123"]
self.assertEqual(fake_res_port, ret_port)
def test_setup_aap_flows(self):
port_with_app = copy.deepcopy(fake_port)
key = "allowed_address_pairs"
port_with_app[key] = [{'ip_address': '10.0.0.2',
'mac_address': 'aa:bb:cc:dd:ee:aa'},
{'ip_address': '10.0.0.3',
'mac_address': 'aa:bb:cc:dd:ee:ab'}]
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertEqual(2, mock_add_flow.call_count)
def test_setup_aap_flows_invalid_call(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._setup_aap_flows(self.mock_br, port_with_app)
self.assertFalse(mock_add_flow.called)
def test_get_net_prefix_len(self):
ip_addr = "150.1.1.0/22"
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertNotEqual(0, length)
ip_addr = None
length = self.ovs_firewall._get_net_prefix_len(ip_addr)
self.assertEqual(0, length)
def test_get_protocol(self):
proto = self.ovs_firewall._get_protocol("IPv4", None)
self.assertEqual(['ip'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", None)
self.assertEqual(['ipv6'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'icmp')
self.assertEqual(['icmp6'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'icmp')
self.assertEqual(['icmp'], proto)
proto = self.ovs_firewall._get_protocol("IPv4", 'udp')
self.assertEqual(['udp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'tcp')
self.assertEqual(['tcp'], proto)
proto = self.ovs_firewall._get_protocol("IPv6", 'unknown')
self.assertEqual(['ipv6', 'unknown'], proto)
def test_add_flow_with_range(self):
flow = {"priority": 1}
res_flow = {"priority": 1,
"tp_dst": 1,
"tp_src": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 2, 1, 2)
mock_add_flows_sec_br.called_with(res_flow)
self.assertEqual(4, mock_add_flows_sec_br.call_count)
def test_add_flow_with_multiple_range(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port, flow,
direction, 1, 3, 1, 2)
self.assertEqual(6, mock_add_flows_sec_br.call_count)
def test_add_flow_with_range_all_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 65535)
self.assertEqual(1, mock_add_flows_sec_br.call_count)
def test_add_flow_with_range_some_ports(self):
flow = {"priority": 1}
port = fake_port
direction = "fake_direction"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_add_flows_to_sec_br'
) as mock_add_flows_sec_br:
self.ovs_firewall._add_flow_with_range(self.mock_br, port,
flow, direction, 1, 100)
self.assertEqual(100, mock_add_flows_sec_br.call_count)
def test_add_flows_to_sec_br_ingress_direction(self):
flows = {}
port = fake_port
direction = "ingress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(1, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'70.0.0.5']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(2, mock_add_flow.call_count)
def test_add_flows_to_sec_br_egress_direction_multiple_fixed_ips(self):
flows = {}
port = fake_port
flows['dl_src'] = '01:02:03:04:05:06'
flows['proto'] = 'ip'
flows['dl_vlan'] = 25
port['fixed_ips'] = [u'70.0.0.5', u'80.0.0.6']
direction = "egress"
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows_to_sec_br(self.mock_br, port,
flows, direction)
self.assertTrue(mock_add_flow.called)
self.assertEqual(4, mock_add_flow.call_count)
def test_add_flows_call_no_vlan(self):
port_with_app = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=None), \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow,\
mock.patch.object(self.LOG, 'error') as mock_error_log:
self.ovs_firewall._add_flows(self.mock_br, port_with_app, cookie)
self.assertFalse(mock_add_flow.called)
self.assertTrue(mock_error_log.called)
def test_add_flows_call_tcp(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['tcp']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall, '_add_flow_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertTrue(mock_add_range_flows.called)
def test_add_flows_call_normal(self):
port = copy.deepcopy(fake_port)
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall, '_get_protocol',
return_value=['ip']) as mock_get_proto, \
mock.patch.object(self.ovs_firewall, '_add_flow_with_range'
) as mock_add_range_flows, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'add_flow') as mock_add_flow:
self.ovs_firewall._add_flows(self.mock_br, port, cookie)
self.assertTrue(mock_get_vlan.called)
self.assertTrue(mock_get_proto.called)
self.assertFalse(mock_add_range_flows.called)
self.assertTrue(mock_add_flow.called)
def test_prepare_port_filter(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.mock_br, 'add_flow'):
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
ret_port = self.ovs_firewall.filtered_ports['123']
self.assertEqual(fake_res_port, ret_port)
self.assertEqual(set(['123']),
self.ovs_firewall.provider_port_cache)
def test_prepare_port_filter_exception(self):
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows',
side_effect=Exception()
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.prepare_port_filter(fake_port)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
self.assertFalse(mock_add_flow_fn.called)
self.assertTrue(mock_exception_log.called)
self.assertEqual(set(), self.ovs_firewall.provider_port_cache)
def test_remove_only_tenant_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(4, mock_del_flows.call_count)
def test_remove_all_flows(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows:
self.ovs_firewall._remove_flows(self.mock_br, "123", True)
self.assertTrue(mock_get_vlan.called)
self.assertEqual(7, mock_del_flows.call_count)
def test_remove_flows_invalid_port(self):
res_port = copy.deepcopy(fake_res_port)
res_port.pop('mac_address')
self.ovs_firewall.filtered_ports["123"] = res_port
with mock.patch.object(self.ovs_firewall, '_get_port_vlan',
return_value=100) as mock_get_vlan, \
mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.mock_br, 'delete_flows'
) as mock_del_flows, \
mock.patch.object(self.LOG, 'debug') as mock_debug_log:
self.ovs_firewall._remove_flows(self.mock_br, "123")
self.assertTrue(mock_get_vlan.called)
self.assertEqual(1, mock_del_flows.call_count)
self.assertEqual(2, mock_debug_log.call_count)
def test_clean_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"])
mock_rem_flow.assert_called_with(self.mock_br, "123")
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_clean_port_filters_remove_port(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertNotIn("123", self.ovs_firewall.filtered_ports)
self.assertNotIn("123", self.ovs_firewall.provider_port_cache)
def test_clean_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows',
side_effect=Exception()
) as mock_rem_flow, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.clean_port_filters(["123"], True)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
self.assertTrue(mock_exception_log.called)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_normal_update_port_filters(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123")
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(1, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
def test_update_port_filters_for_provider_update(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set()
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows'
) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_setup_aap_flows'
) as mock_aap_flow_fn, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123", True)
mock_aap_flow_fn.assert_called_with(self.mock_br, fake_port)
mock_add_flow_fn.assert_called_with(self.mock_br, fake_port,
cookie)
self.assertEqual(2, mock_add_flow_fn.call_count)
self.assertIn("123", self.ovs_firewall.filtered_ports)
self.assertIn("123", self.ovs_firewall.provider_port_cache)
def test_update_port_filters_exception(self):
self.ovs_firewall.filtered_ports["123"] = fake_res_port
self.ovs_firewall.provider_port_cache = set(['123'])
with mock.patch.object(self.ovs_firewall.sg_br, 'deferred',
return_value=self.mock_br), \
mock.patch.object(self.ovs_firewall, '_remove_flows',
side_effect=Exception()) as mock_rem_flow, \
mock.patch.object(self.ovs_firewall, '_add_flows'
) as mock_add_flow_fn, \
mock.patch.object(self.LOG, 'exception'
) as mock_exception_log:
self.ovs_firewall.update_port_filter(fake_port)
mock_rem_flow.assert_called_with(self.mock_br, "123")
self.assertFalse(mock_add_flow_fn.called)
self.assertIn("123", self.ovs_firewall.filtered_ports)
self.assertTrue(mock_exception_log.called)
def test_ovs_firewall_restart_with_canary_flow(self):
flow = "cookie=0x0, duration=4633.482s, table=23, n_packets=0" + \
"n_bytes=0, idle_age=4633, priority=0 actions=drop"
with mock.patch.object(self.ovs_firewall.sg_br,
"dump_flows_for_table",
return_value=flow) as mock_dump_flow:
canary_flow = self.ovs_firewall.check_ovs_firewall_restart()
self.assertTrue(mock_dump_flow.called)
self.assertTrue(canary_flow)
def test_ovs_firewall_restart_without_canary_flow(self):
flow = ""
with mock.patch.object(self.ovs_firewall.sg_br,
"dump_flows_for_table",
return_value=flow) as mock_dump_flow:
canary_flow = self.ovs_firewall.check_ovs_firewall_restart()
self.assertTrue(mock_dump_flow.called)
self.assertFalse(canary_flow)
| ekosareva/vmware-dvs | networking_vsphere/tests/unit/drivers/test_ovs_firewall.py | Python | apache-2.0 | 28,260 |
# Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from swift.common.utils import json
from swift3.response import InvalidArgument, MalformedACLError, \
S3NotImplemented, InvalidRequest, AccessDenied
from swift3.etree import Element, SubElement
from swift3.utils import LOGGER, sysmeta_header
from swift3.cfg import CONF
from swift3.exception import InvalidSubresource
XMLNS_XSI = 'http://www.w3.org/2001/XMLSchema-instance'
PERMISSIONS = ['FULL_CONTROL', 'READ', 'WRITE', 'READ_ACP', 'WRITE_ACP']
LOG_DELIVERY_USER = '.log_delivery'
"""
An entry point of this approach is here.
We should understand what we have to design to achieve real S3 ACL.
S3's ACL Model is as follows:
AccessControlPolicy:
Owner:
AccessControlList:
Grant[n]:
(Grantee, Permission)
Each bucket or object has its own acl consists of Owner and
AcessControlList. AccessControlList can contain some Grants.
By default, AccessControlList has only one Grant to allow FULL
CONTROLL to owner. Each Grant includes single pair with Grantee,
Permission. Grantee is the user (or user group) allowed the given
permission.
If you wanna get more information about S3's ACL model in detail,
please see official documentation here,
http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html
"""
def encode_acl(resource, acl):
"""
Encode an ACL instance to Swift metadata.
Given a resource type and an ACL instance, this method returns HTTP
headers, which can be used for Swift metadata.
"""
header_value = {"Owner": acl.owner.id}
grants = []
for grant in acl.grants:
grant = {"Permission": grant.permission,
"Grantee": str(grant.grantee)}
grants.append(grant)
header_value.update({"Grant": grants})
headers = {}
key = sysmeta_header(resource, 'acl')
headers[key] = json.dumps(header_value, separators=(',', ':'))
return headers
def decode_acl(resource, headers):
"""
Decode Swift metadata to an ACL instance.
Given a resource type and HTTP headers, this method returns an ACL
instance.
"""
value = ''
key = sysmeta_header(resource, 'acl')
if key in headers:
value = headers[key]
if value == '':
# Fix me: In the case of value is empty or not dict instance,
# I want an instance of Owner as None.
# However, in the above process would occur error in reference
# to an instance variable of Owner.
return ACL(Owner(None, None), [])
try:
encode_value = json.loads(value)
if not isinstance(encode_value, dict):
return ACL(Owner(None, None), [])
id = None
name = None
grants = []
if 'Owner' in encode_value:
id = encode_value['Owner']
name = encode_value['Owner']
if 'Grant' in encode_value:
for grant in encode_value['Grant']:
grantee = None
# pylint: disable-msg=E1101
for group in Group.__subclasses__():
if group.__name__ == grant['Grantee']:
grantee = group()
if not grantee:
grantee = User(grant['Grantee'])
permission = grant['Permission']
grants.append(Grant(grantee, permission))
return ACL(Owner(id, name), grants)
except Exception as e:
LOGGER.debug(e)
pass
raise InvalidSubresource((resource, 'acl', value))
class Grantee(object):
"""
Base class for grantee.
:Definition (methods):
init -> create a Grantee instance
elem -> create an ElementTree from itself
:Definition (static methods):
from_header -> convert a grantee string in the HTTP header
to an Grantee instance.
from_elem -> convert a ElementTree to an Grantee instance.
TODO (not yet):
NOTE: Needs confirmation whether we really need these methods or not.
encode (method) -> create a JSON which includes whole own elements
encode_from_elem (static method) -> convert from an ElementTree to a JSON
elem_from_json (static method) -> convert from a JSON to an ElementTree
from_json (static method) -> convert a Json string to an Grantee instance.
"""
def __contains__(self, key):
"""
The key argument is a S3 user id. This method checks that the user id
belongs to this class.
"""
raise S3NotImplemented()
def elem(self):
"""
Get an etree element of this instance.
"""
raise S3NotImplemented()
@staticmethod
def from_elem(elem):
type = elem.get('{%s}type' % XMLNS_XSI)
if type == 'CanonicalUser':
value = elem.find('./ID').text
return User(value)
elif type == 'Group':
value = elem.find('./URI').text
subclass = get_group_subclass_from_uri(value)
return subclass()
elif type == 'AmazonCustomerByEmail':
raise S3NotImplemented()
else:
raise MalformedACLError()
@staticmethod
def from_header(grantee):
"""
Convert a grantee string in the HTTP header to an Grantee instance.
"""
type, value = grantee.split('=', 1)
value = value.strip('"\'')
if type == 'id':
return User(value)
elif type == 'emailAddress':
raise S3NotImplemented()
elif type == 'uri':
# retrun a subclass instance of Group class
subclass = get_group_subclass_from_uri(value)
return subclass()
else:
raise InvalidArgument(type, value,
'Argument format not recognized')
class User(Grantee):
"""
Canonical user class for S3 accounts.
"""
type = 'CanonicalUser'
def __init__(self, name):
self.id = name
self.display_name = name
def __contains__(self, key):
return key == self.id
def elem(self):
elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI})
elem.set('{%s}type' % XMLNS_XSI, self.type)
SubElement(elem, 'ID').text = self.id
SubElement(elem, 'DisplayName').text = self.display_name
return elem
def __str__(self):
return self.display_name
class Owner(object):
"""
Owner class for S3 accounts
"""
def __init__(self, id, name):
self.id = id
self.name = name
def get_group_subclass_from_uri(uri):
"""
Convert a URI to one of the predefined groups.
"""
for group in Group.__subclasses__(): # pylint: disable-msg=E1101
if group.uri == uri:
return group
raise InvalidArgument('uri', uri, 'Invalid group uri')
class Group(Grantee):
"""
Base class for Amazon S3 Predefined Groups
"""
type = 'Group'
uri = ''
def __init__(self):
# Initialize method to clarify this has nothing to do
pass
def elem(self):
elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI})
elem.set('{%s}type' % XMLNS_XSI, self.type)
SubElement(elem, 'URI').text = self.uri
return elem
def __str__(self):
return self.__class__.__name__
def canned_acl_grantees(bucket_owner, object_owner=None):
"""
A set of predefined grants supported by AWS S3.
"""
owner = object_owner or bucket_owner
return {
'private': [
('FULL_CONTROL', User(owner.name)),
],
'public-read': [
('READ', AllUsers()),
('FULL_CONTROL', User(owner.name)),
],
'public-read-write': [
('READ', AllUsers()),
('WRITE', AllUsers()),
('FULL_CONTROL', User(owner.name)),
],
'authenticated-read': [
('READ', AuthenticatedUsers()),
('FULL_CONTROL', User(owner.name)),
],
'bucket-owner-read': [
('READ', User(bucket_owner.name)),
('FULL_CONTROL', User(owner.name)),
],
'bucket-owner-full-control': [
('FULL_CONTROL', User(owner.name)),
('FULL_CONTROL', User(bucket_owner.name)),
],
'log-delivery-write': [
('WRITE', LogDelivery()),
('READ_ACP', LogDelivery()),
('FULL_CONTROL', User(owner.name)),
],
}
class AuthenticatedUsers(Group):
"""
This group represents all AWS accounts. Access permission to this group
allows any AWS account to access the resource. However, all requests must
be signed (authenticated).
"""
uri = 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
def __contains__(self, key):
# Swift3 handles only signed requests.
return True
class AllUsers(Group):
"""
Access permission to this group allows anyone to access the resource. The
requests can be signed (authenticated) or unsigned (anonymous). Unsigned
requests omit the Authentication header in the request.
Note: Swift3 regards unsigned requests as Swift API accesses, and bypasses
them to Swift. As a result, AllUsers behaves completely same as
AuthenticatedUsers.
"""
uri = 'http://acs.amazonaws.com/groups/global/AllUsers'
def __contains__(self, key):
return True
class LogDelivery(Group):
"""
WRITE and READ_ACP permissions on a bucket enables this group to write
server access logs to the bucket.
"""
uri = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
def __contains__(self, key):
if ':' in key:
tenant, user = key.split(':', 1)
else:
user = key
return user == LOG_DELIVERY_USER
class Grant(object):
"""
Grant Class which includes both Grantee and Permission
"""
def __init__(self, grantee, permission):
"""
:param grantee: a grantee class or its subclass
:param permission: string
"""
if permission.upper() not in PERMISSIONS:
raise S3NotImplemented()
if not isinstance(grantee, Grantee):
raise
self.grantee = grantee
self.permission = permission
@classmethod
def from_elem(cls, elem):
"""
Convert an ElementTree to an ACL instance
"""
grantee = Grantee.from_elem(elem.find('./Grantee'))
permission = elem.find('./Permission').text
return cls(grantee, permission)
def elem(self):
"""
Create an etree element.
"""
elem = Element('Grant')
elem.append(self.grantee.elem())
SubElement(elem, 'Permission').text = self.permission
return elem
def allow(self, grantee, permission):
return permission == self.permission and grantee in self.grantee
class ACL(object):
"""
S3 ACL class.
Refs (S3 API - acl-overview:
http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html):
The sample ACL includes an Owner element identifying the owner via the
AWS account's canonical user ID. The Grant element identifies the grantee
(either an AWS account or a predefined group), and the permission granted.
This default ACL has one Grant element for the owner. You grant permissions
by adding Grant elements, each grant identifying the grantee and the
permission.
"""
metadata_name = 'acl'
root_tag = 'AccessControlPolicy'
max_xml_length = 200 * 1024
def __init__(self, owner, grants=[]):
"""
:param owner: Owner Class for ACL instance
"""
self.owner = owner
self.grants = grants
@classmethod
def from_elem(cls, elem):
"""
Convert an ElementTree to an ACL instance
"""
id = elem.find('./Owner/ID').text
try:
name = elem.find('./Owner/DisplayName').text
except AttributeError:
name = id
grants = [Grant.from_elem(e)
for e in elem.findall('./AccessControlList/Grant')]
return cls(Owner(id, name), grants)
def elem(self):
"""
Decode the value to an ACL instance.
"""
elem = Element(self.root_tag)
owner = SubElement(elem, 'Owner')
SubElement(owner, 'ID').text = self.owner.id
SubElement(owner, 'DisplayName').text = self.owner.name
SubElement(elem, 'AccessControlList').extend(
g.elem() for g in self.grants
)
return elem
def check_owner(self, user_id):
"""
Check that the user is an owner.
"""
if not CONF.s3_acl:
# Ignore Swift3 ACL.
return
if not self.owner.id:
if CONF.allow_no_owner:
# No owner means public.
return
raise AccessDenied()
if user_id != self.owner.id:
raise AccessDenied()
def check_permission(self, user_id, permission):
"""
Check that the user has a permission.
"""
if not CONF.s3_acl:
# Ignore Swift3 ACL.
return
try:
# owners have full control permission
self.check_owner(user_id)
return
except AccessDenied:
pass
if permission in PERMISSIONS:
for g in self.grants:
if g.allow(user_id, 'FULL_CONTROL') or \
g.allow(user_id, permission):
return
raise AccessDenied()
@classmethod
def from_headers(cls, headers, bucket_owner, object_owner=None,
as_private=True):
"""
Convert HTTP headers to an ACL instance.
"""
grants = []
try:
for key, value in headers.items():
if key.lower().startswith('x-amz-grant-'):
permission = key[len('x-amz-grant-'):]
permission = permission.upper().replace('-', '_')
if permission not in PERMISSIONS:
continue
for grantee in value.split(','):
grants.append(
Grant(Grantee.from_header(grantee), permission))
if 'x-amz-acl' in headers:
try:
acl = headers['x-amz-acl']
if len(grants) > 0:
err_msg = 'Specifying both Canned ACLs and Header ' \
'Grants is not allowed'
raise InvalidRequest(err_msg)
grantees = canned_acl_grantees(
bucket_owner, object_owner)[acl]
for permission, grantee in grantees:
grants.append(Grant(grantee, permission))
except KeyError:
# expects canned_acl_grantees()[] raises KeyError
raise InvalidArgument('x-amz-acl', headers['x-amz-acl'])
except (KeyError, ValueError):
# TODO: think about we really catch this except sequence
raise InvalidRequest()
if len(grants) == 0:
# No ACL headers
if as_private:
return ACLPrivate(bucket_owner, object_owner)
else:
return None
return cls(object_owner or bucket_owner, grants)
class CannedACL(object):
"""
A dict-like object that returns canned ACL.
"""
def __getitem__(self, key):
def acl(key, bucket_owner, object_owner=None):
grants = []
grantees = canned_acl_grantees(bucket_owner, object_owner)[key]
for permission, grantee in grantees:
grants.append(Grant(grantee, permission))
return ACL(object_owner or bucket_owner, grants)
return partial(acl, key)
canned_acl = CannedACL()
ACLPrivate = canned_acl['private']
ACLPublicRead = canned_acl['public-read']
ACLPublicReadWrite = canned_acl['public-read-write']
ACLAuthenticatedRead = canned_acl['authenticated-read']
ACLBucketOwnerRead = canned_acl['bucket-owner-read']
ACLBucketOwnerFullControl = canned_acl['bucket-owner-full-control']
ACLLogDeliveryWrite = canned_acl['log-delivery-write']
| KoreaCloudObjectStorage/swift3 | swift3/subresource.py | Python | apache-2.0 | 16,885 |
import gdb
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark == None:
return None
quark = long(quark)
if quark == 0:
return None
try:
val = read_global_var ("quarks")
max_q = long(read_global_var ("quark_seq_id"))
except:
try:
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
except:
return None;
if quark < max_q:
return val[quark].string()
return None
# We override the node printers too, so that node->next is not expanded
class GListNodePrinter:
"Prints a GList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
"Prints a GSList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
"Prints a GList"
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
"Prints a GHashTable"
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.keys = ht["keys"]
self.values = ht["values"]
self.hashes = ht["hashes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
self.pos = self.pos + 1
if long (self.hashes[self.pos]) >= 2:
key = self.keys[self.pos]
val = self.values[self.pos]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
# Queue value for next result
self.value = ('[%dv]'% (self.pos), val)
# Return key
return ('[%dk]'% (self.pos), key)
raise StopIteration
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
if is_g_type_instance (val):
return GTypePrettyPrinter (val)
def pretty_printer_lookup (val):
# None yet, want things like hash table and list
type = val.type.unqualified()
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj == None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
"""Foreach on list"""
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
| jonnyniv/boost_converter | host/gui/GTK+/share/glib-2.0/gdb/glib.py | Python | apache-2.0 | 7,426 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
makes two changes:
1. It removes support for lists as a level of nesting in nested structures.
2. It adds support for `SparseTensorValue` as an atomic element.
The motivation for this change is twofold:
1. It seems more natural for lists to be treated (e.g. in Dataset constructors)
as tensors, rather than lists of (lists of...) tensors.
2. This is needed because `SparseTensorValue` is implemented as a `namedtuple`
that would normally be flattened and we want to be able to create sparse
tensor from `SparseTensorValue's similarly to creating tensors from numpy
arrays.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six as _six
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import nest
from tensorflow.python.util.compat import collections_abc as _collections_abc
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(list(dict_))
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _yield_value(iterable):
if isinstance(iterable, _collections_abc.Mapping):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield iterable[key]
elif isinstance(iterable, _sparse_tensor.SparseTensorValue):
yield iterable
else:
for value in iterable:
yield value
# See the swig file (../../util/util.i) for documentation.
is_sequence = _pywrap_utils.IsSequenceForData
# See the swig file (../../util/util.i) for documentation.
flatten = _pywrap_utils.FlattenForData
def assert_same_structure(nest1, nest2, check_types=True):
"""Asserts that two structures are nested in the same way.
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences should be same as
well. For dictionary, "type" of dictionary is considered to include its
keys. In other words, two dictionaries with different keys are considered
to have a different "type". If set to `False`, two iterables are
considered same as long as they yield the elements that have same
structures.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
_pywrap_utils.AssertSameStructureForData(nest1, nest2, check_types)
def _packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _yield_value(structure):
if is_sequence(s):
new_index, child = _packed_nest_with_indices(s, flat, index)
packed.append(nest._sequence_like(s, child)) # pylint: disable=protected-access
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (is_sequence(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError("flat_sequence must be a sequence")
if not is_sequence(structure):
if len(flat_sequence) != 1:
raise ValueError("Structure is a scalar but len(flat_sequence) == %d > 1"
% len(flat_sequence))
return flat_sequence[0]
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but flat_sequence "
"had %d elements. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence))
_, packed = _packed_nest_with_indices(structure, flat_sequence, 0)
return nest._sequence_like(structure, packed) # pylint: disable=protected-access
def map_structure(func, *structure, **check_types_dict):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
func: A callable that accepts as many arguments are there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered scalars.
**check_types_dict: only valid keyword argument is `check_types`. If set to
`True` (default) the types of iterables within the structures have to be
same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError("Only valid keyword argument is check_types")
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = (flatten(s) for s in structure)
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries])
def _yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if is_sequence(shallow_tree):
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
for input_leaf in _yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will not raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"]]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if is_sequence(shallow_tree):
if not is_sequence(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
"structure has type %s, while shallow structure has type %s."
% (type(input_tree), type(shallow_tree)))
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
if check_types and isinstance(shallow_tree, _collections_abc.Mapping):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
"structure has keys %s, while shallow structure has keys %s." %
(list(input_tree), list(shallow_tree)))
input_tree = sorted(_six.iteritems(input_tree))
shallow_tree = sorted(_six.iteritems(shallow_tree))
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types)
def flatten_up_to(shallow_tree, input_tree):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
assert_shallow_structure(shallow_tree, input_tree)
return list(_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(shallow_tree, func, *inputs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function, therefore, will return something with the same base structure
as `shallow_tree`.
Examples:
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with same structure as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
for input_tree in inputs:
assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = (
flatten_up_to(shallow_tree, input_tree) for input_tree in inputs)
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results)
| annarev/tensorflow | tensorflow/python/data/util/nest.py | Python | apache-2.0 | 16,774 |
from django.test import TestCase
from threadlocals.threadlocals import set_current_user
from django.contrib.auth import get_user_model
from powerdns.models import (
Domain,
DomainRequest,
Record,
RecordRequest,
)
from .utils import (
ServiceFactory,
assert_does_exist,
assert_not_exists,
)
class TestRequests(TestCase):
"""Tests for domain/record requests"""
def setUp(self):
self.user1 = get_user_model().objects.create_user(
'user1', '[email protected]', 'password'
)
self.user2 = get_user_model().objects.create_user(
'user2', '[email protected]', 'password'
)
self.domain = Domain.objects.create(
name='example.com',
type='NATIVE',
owner=self.user1
)
self.record = Record.objects.create(
domain=self.domain,
name='forum.example.com',
type='CNAME',
content='phpbb.example.com',
owner=self.user1,
)
def test_subdomain_creation(self):
set_current_user(self.user1)
request = DomainRequest.objects.create(
parent_domain=self.domain,
target_name='subdomain.example.com',
target_owner=self.user1,
target_service=ServiceFactory(),
)
request.accept()
assert_does_exist(
Domain, name='subdomain.example.com', owner=self.user1
)
def test_domain_change(self):
request = DomainRequest.objects.create(
domain=self.domain,
target_name='example.com',
target_type='MASTER',
owner=self.user2,
target_owner=self.user1,
target_service=ServiceFactory(),
)
request.accept()
assert_does_exist(
Domain,
name='example.com',
type='MASTER',
owner=self.user1
)
assert_not_exists(Domain, name='example.com', type='NATIVE')
def test_record_creation(self):
request = RecordRequest.objects.create(
domain=self.domain,
target_type='CNAME',
target_name='site.example.com',
target_content='www.example.com',
owner=self.user1,
target_owner=self.user2,
)
request.accept()
assert_does_exist(
Record,
content='www.example.com',
owner=self.user2,
)
def test_record_change(self):
request = RecordRequest.objects.create(
domain=self.domain,
record=self.record,
target_type='CNAME',
target_name='forum.example.com',
target_content='djangobb.example.com',
target_owner=self.user2,
owner=self.user1,
)
request.accept()
assert_does_exist(Record, content='djangobb.example.com')
assert_not_exists(Record, content='phpbb.example.com')
| dominikkowalski/django-powerdns-dnssec | powerdns/tests/test_requests.py | Python | bsd-2-clause | 2,984 |
import os
import csv
import pickle
from indra.literature import id_lookup
from indra.sources import trips, reach, index_cards
from assembly_eval import have_file, run_assembly
if __name__ == '__main__':
pmc_ids = [s.strip() for s in open('pmcids.txt', 'rt').readlines()]
# Load the REACH reading output
with open('reach/reach_stmts_batch_4_eval.pkl') as f:
reach_stmts = pickle.load(f)
# Load the PMID to PMCID map
pmcid_to_pmid = {}
with open('pmc_batch_4_id_map.txt') as f:
csvreader = csv.reader(f, delimiter='\t')
for row in csvreader:
pmcid_to_pmid[row[0]] = row[1]
for pmcid in pmc_ids:
print 'Processing %s...' % pmcid
# Process TRIPS
trips_fname = 'trips/' + pmcid + '.ekb'
tp = trips.process_xml(open(trips_fname).read())
# Get REACH statements
reach_stmts_for_pmcid = reach_stmts.get(pmcid_to_pmid[pmcid], [])
if not reach_stmts_for_pmcid:
print "No REACH statements for %s" % pmcid
# Get NACTEM/ISI statements
fname = 'nactem/' + pmcid + '.cards'
if not os.path.exists(fname):
nactem_stmts = []
else:
icp = index_cards.process_json_file(fname, 'nactem')
nactem_stmts = icp.statements
# Combine all statements
all_statements = tp.statements + reach_stmts_for_pmcid + nactem_stmts
# Run assembly
run_assembly(all_statements, 'combined', pmcid)
| pvtodorov/indra | indra/benchmarks/assembly_eval/combine4/run_combined.py | Python | bsd-2-clause | 1,490 |
from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
STATUS_COLORS = {
'default': 'blue',
'queued': 'blue',
'undetermined': 'blue',
'infected': 'red',
'uninfected': 'green',
'deposited': 'blue',
'rejected': 'red',
'accepted': 'green',
'valid': 'green',
'invalid': 'red',
'undefined': 'blue'
}
BOX_COLORS = {
'blue': 'primary',
'red': 'danger',
'green': 'success',
'grey': 'default'
}
@register.filter
@stringfilter
def status_color(status):
"""
This method will return grey for a unkown status.
"""
return STATUS_COLORS.get(status, 'grey')
@register.filter
def box_color(status):
"""
This method will return grey for a unkown status.
"""
return BOX_COLORS.get(STATUS_COLORS.get(status, 'grey'), 'default')
@register.filter
def status_sps(status):
"""
This method will return valid, invalid or undefined for a given result of
models.PackageMember.sps_validation_status().
status: Tuple(None, {})
status: Tuple(True, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
status: Tuple(False, {'is_valid': True, 'sps_errors': [], 'dtd_errors': []})
"""
if status[0] is True:
return 'valid'
if status[0] is False:
return 'invalid'
return 'undefined'
@register.filter
def widget_scielops_colors_weight(xmls):
"""
This method will return a color for the SciELO PS widget. The color will
be matched according to the error level of any of the members of the package.
status: Dict with xml's returned by models.Package.xmls().
"""
if len(xmls['invalid']) > 0:
return STATUS_COLORS['invalid']
if len(xmls['undefined']) > 0:
return STATUS_COLORS['undefined']
if len(xmls['valid']) == 0:
return STATUS_COLORS['undefined']
return STATUS_COLORS['valid'] | gustavofonseca/penne-core | frontdesk/templatetags/frontdesk.py | Python | bsd-2-clause | 1,931 |
default_app_config = 'comet.apps.CometIndicatorConfig' | LegoStormtroopr/comet-indicator-registry | comet/__init__.py | Python | bsd-2-clause | 54 |
# Copyright 2012 Energid Technologies
from energid_nlp import logic
GENERATION_PROPOSITION = '$generate'
class Error(Exception):
pass
class Generator:
def __init__(self, kb):
self.kb = kb
def generate_prim(self, concept):
if isinstance(concept, logic.Description):
return self.generate_prim(concept.base)
elif isinstance(concept, logic.Expr):
return self.generate_prim(concept.op)
else:
result = '%s' % (concept,)
return str(result)
def generate(self, concept):
if (isinstance(concept, str) or
isinstance(concept, logic.Description) or
isinstance(concept, logic.Expr)):
template = self.kb.slot_value(concept, GENERATION_PROPOSITION)
if template is None:
return self.generate_prim(concept)
else:
return self.generate_template(concept, template.op)
else:
return self.generate_prim(concept)
def generate_template(self, concept, template):
result_string = ''
start = 0
while start < len(template):
slot_start = template.find('{', start)
if slot_start == -1:
# No more slot refs
result_string = result_string + template[start:]
break
result_string = result_string + template[start:slot_start]
slot_end = template.find('}', slot_start + 1)
if slot_end == -1:
raise Error("Generation template %r for %s has an unclosed '{'" % (
template, concept))
slot_name = template[slot_start + 1:slot_end]
slot_value = self.kb.slot_value(concept, slot_name)
if slot_value is not None:
result_string = result_string + self.generate(slot_value)
start = slot_end + 1
# Strip whitepace out of result, in case slots in template
# couldn't be filled.
return result_string.strip()
| wiseman/energid_nlp | energid_nlp/generation.py | Python | bsd-2-clause | 1,810 |
"""Magic functions for running cells in various scripts."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import errno
import os
import sys
import signal
import time
from subprocess import Popen, PIPE
import atexit
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics, magics_class, line_magic, cell_magic
)
from IPython.lib.backgroundjobs import BackgroundJobManager
from IPython.utils import py3compat
from IPython.utils.process import arg_split
from traitlets import List, Dict, default
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
def script_args(f):
"""single decorator for adding script args"""
args = [
magic_arguments.argument(
'--out', type=str,
help="""The variable in which to store stdout from the script.
If the script is backgrounded, this will be the stdout *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--err', type=str,
help="""The variable in which to store stderr from the script.
If the script is backgrounded, this will be the stderr *pipe*,
instead of the stderr text itself.
"""
),
magic_arguments.argument(
'--bg', action="store_true",
help="""Whether to run the script in the background.
If given, the only way to see the output of the command is
with --out/err.
"""
),
magic_arguments.argument(
'--proc', type=str,
help="""The variable in which to store Popen instance.
This is used only when --bg option is given.
"""
),
]
for arg in args:
f = arg(f)
return f
@magics_class
class ScriptMagics(Magics):
"""Magics for talking to scripts
This defines a base `%%script` cell magic for running a cell
with a program in a subprocess, and registers a few top-level
magics that call %%script with common interpreters.
"""
script_magics = List(
help="""Extra script cell magics to define
This generates simple wrappers of `%%script foo` as `%%foo`.
If you want to add script magics that aren't on your path,
specify them in script_paths
""",
).tag(config=True)
@default('script_magics')
def _script_magics_default(self):
"""default to a common list of programs"""
defaults = [
'sh',
'bash',
'perl',
'ruby',
'python',
'python2',
'python3',
'pypy',
]
if os.name == 'nt':
defaults.extend([
'cmd',
])
return defaults
script_paths = Dict(
help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
Only necessary for items in script_magics where the default path will not
find the right interpreter.
"""
).tag(config=True)
def __init__(self, shell=None):
super(ScriptMagics, self).__init__(shell=shell)
self._generate_script_magics()
self.job_manager = BackgroundJobManager()
self.bg_processes = []
atexit.register(self.kill_bg_processes)
def __del__(self):
self.kill_bg_processes()
def _generate_script_magics(self):
cell_magics = self.magics['cell']
for name in self.script_magics:
cell_magics[name] = self._make_script_magic(name)
def _make_script_magic(self, name):
"""make a named magic, that calls %%script with a particular program"""
# expand to explicit path if necessary:
script = self.script_paths.get(name, name)
@magic_arguments.magic_arguments()
@script_args
def named_script_magic(line, cell):
# if line, add it as cl-flags
if line:
line = "%s %s" % (script, line)
else:
line = script
return self.shebang(line, cell)
# write a basic docstring:
named_script_magic.__doc__ = \
"""%%{name} script magic
Run cells with {script} in a subprocess.
This is a shortcut for `%%script {script}`
""".format(**locals())
return named_script_magic
@magic_arguments.magic_arguments()
@script_args
@cell_magic("script")
def shebang(self, line, cell):
"""Run a cell via a shell command
The `%%script` line is like the #! line of script,
specifying a program (bash, perl, ruby, etc.) with which to run.
The rest of the cell is run by that program.
Examples
--------
::
In [1]: %%script bash
...: for i in 1 2 3; do
...: echo $i
...: done
1
2
3
"""
argv = arg_split(line, posix = not sys.platform.startswith('win'))
args, cmd = self.shebang.parser.parse_known_args(argv)
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
print("Couldn't find program: %r" % cmd[0])
return
else:
raise
if not cell.endswith('\n'):
cell += '\n'
cell = cell.encode('utf8', 'replace')
if args.bg:
self.bg_processes.append(p)
self._gc_bg_processes()
if args.out:
self.shell.user_ns[args.out] = p.stdout
if args.err:
self.shell.user_ns[args.err] = p.stderr
self.job_manager.new(self._run_script, p, cell, daemon=True)
if args.proc:
self.shell.user_ns[args.proc] = p
return
try:
out, err = p.communicate(cell)
except KeyboardInterrupt:
try:
p.send_signal(signal.SIGINT)
time.sleep(0.1)
if p.poll() is not None:
print("Process is interrupted.")
return
p.terminate()
time.sleep(0.1)
if p.poll() is not None:
print("Process is terminated.")
return
p.kill()
print("Process is killed.")
except OSError:
pass
except Exception as e:
print("Error while terminating subprocess (pid=%i): %s" \
% (p.pid, e))
return
out = py3compat.bytes_to_str(out)
err = py3compat.bytes_to_str(err)
if args.out:
self.shell.user_ns[args.out] = out
else:
sys.stdout.write(out)
sys.stdout.flush()
if args.err:
self.shell.user_ns[args.err] = err
else:
sys.stderr.write(err)
sys.stderr.flush()
def _run_script(self, p, cell):
"""callback for running the script in the background"""
p.stdin.write(cell)
p.stdin.close()
p.wait()
@line_magic("killbgscripts")
def killbgscripts(self, _nouse_=''):
"""Kill all BG processes started by %%script and its family."""
self.kill_bg_processes()
print("All background processes were killed.")
def kill_bg_processes(self):
"""Kill all BG processes which are still running."""
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.send_signal(signal.SIGINT)
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.terminate()
except:
pass
time.sleep(0.1)
self._gc_bg_processes()
if not self.bg_processes:
return
for p in self.bg_processes:
if p.poll() is None:
try:
p.kill()
except:
pass
self._gc_bg_processes()
def _gc_bg_processes(self):
self.bg_processes = [p for p in self.bg_processes if p.poll() is None]
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/IPython/core/magics/script.py | Python | bsd-2-clause | 8,835 |
# flake8: NOQA
from . import datasets
from . import evaluations
from . import extensions
from . import models
from . import utils
| start-jsk/jsk_apc | demos/grasp_fusion/grasp_fusion_lib/contrib/grasp_fusion/__init__.py | Python | bsd-3-clause | 131 |
# -*- coding: utf-8 -*-
import collections
import itertools
import json
import os
import posixpath
import re
import time
from operator import attrgetter
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import default_storage as storage
from django.db import models, transaction
from django.dispatch import receiver
from django.db.models import Max, Q, signals as dbsignals
from django.utils.translation import trans_real, ugettext_lazy as _
import caching.base as caching
import commonware.log
from django_extensions.db.fields.json import JSONField
from django_statsd.clients import statsd
from jinja2.filters import do_dictsort
from olympia import amo
from olympia.amo.models import (
SlugField, OnChangeMixin, ModelBase, ManagerBase, manual_order)
from olympia.access import acl
from olympia.addons.utils import (
get_creatured_ids, get_featured_ids, generate_addon_guid)
from olympia.amo import helpers
from olympia.amo.decorators import use_master, write
from olympia.amo.utils import (
attach_trans_dict, cache_ns_key, chunked,
no_translation, send_mail, slugify, sorted_groupby, timer, to_language,
urlparams, find_language, AMOJSONEncoder)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.constants.categories import CATEGORIES, CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.files.utils import (
extract_translations, resolve_i18n_message, parse_addon)
from olympia.reviews.models import Review
from olympia.tags.models import Tag
from olympia.translations.fields import (
LinkifiedField, PurifiedField, save_signal, TranslatedField, Translation)
from olympia.users.models import UserForeignKey, UserProfile
from olympia.versions.compare import version_int
from olympia.versions.models import inherit_nomination, Version
from . import signals
log = commonware.log.getLogger('z.addons')
def clean_slug(instance, slug_field='slug'):
"""Cleans a model instance slug.
This strives to be as generic as possible as it's used by Addons
and Collections, and maybe more in the future.
"""
slug = getattr(instance, slug_field, None) or instance.name
if not slug:
# Initialize the slug with what we have available: a name translation,
# or the id of the instance, or in last resort the model name.
translations = Translation.objects.filter(id=instance.name_id)
if translations.exists():
slug = translations[0]
elif instance.id:
slug = str(instance.id)
else:
slug = instance.__class__.__name__
max_length = instance._meta.get_field_by_name(slug_field)[0].max_length
slug = slugify(slug)[:max_length]
if DeniedSlug.blocked(slug):
slug = slug[:max_length - 1] + '~'
# The following trick makes sure we are using a manager that returns
# all the objects, as otherwise we could have a slug clash on our hands.
# Eg with the "Addon.objects" manager, which doesn't list deleted addons,
# we could have a "clean" slug which is in fact already assigned to an
# already existing (deleted) addon. Also, make sure we use the base class.
manager = models.Manager()
manager.model = instance._meta.proxy_for_model or instance.__class__
qs = manager.values_list(slug_field, flat=True) # Get list of all slugs.
if instance.id:
qs = qs.exclude(pk=instance.id) # Can't clash with itself.
# We first need to make sure there's a clash, before trying to find a
# suffix that is available. Eg, if there's a "foo-bar" slug, "foo" is still
# available.
clash = qs.filter(**{slug_field: slug})
if clash.exists():
# Leave space for 99 clashes.
slug = slugify(slug)[:max_length - 2]
# There is a clash, so find a suffix that will make this slug unique.
lookup = {'%s__startswith' % slug_field: slug}
clashes = qs.filter(**lookup)
# Try numbers between 1 and the number of clashes + 1 (+ 1 because we
# start the range at 1, not 0):
# if we have two clashes "foo1" and "foo2", we need to try "foox"
# for x between 1 and 3 to be absolutely sure to find an available one.
for idx in range(1, len(clashes) + 2):
new = ('%s%s' % (slug, idx))[:max_length]
if new not in clashes:
slug = new
break
else:
# This could happen. The current implementation (using
# ``[:max_length -3]``) only works for the first 100 clashes in the
# worst case (if the slug is equal to or longuer than
# ``max_length - 3`` chars).
# After that, {verylongslug}-100 will be trimmed down to
# {verylongslug}-10, which is already assigned, but it's the last
# solution tested.
raise RuntimeError
setattr(instance, slug_field, slug)
return instance
class AddonQuerySet(caching.CachingQuerySet):
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
if isinstance(val, basestring) and not val.isdigit():
return self.filter(slug=val)
return self.filter(id=val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.filter(disabled_by_user=False)
def public(self):
"""Get public add-ons only"""
return self.filter(self.valid_q([amo.STATUS_PUBLIC]))
def valid(self):
"""Get valid, enabled add-ons only"""
return self.filter(self.valid_q(amo.VALID_ADDON_STATUSES))
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
statuses = (list(amo.VALID_ADDON_STATUSES) +
[amo.STATUS_DISABLED, amo.STATUS_PENDING])
return (self.filter(Q(status__in=statuses) | Q(disabled_by_user=True))
.exclude(type=amo.ADDON_EXTENSION,
_current_version__isnull=True))
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
ids = get_featured_ids(app, lang, type)
return manual_order(self.listed(app), ids, 'addons.id')
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
if len(status) == 0:
status = [amo.STATUS_PUBLIC]
return self.filter(self.valid_q(status), appsupport__app=app.id)
def valid_q(self, status=None, prefix=''):
"""
Return a Q object that selects a valid Addon with the given statuses.
An add-on is valid if not disabled and has a current version.
``prefix`` can be used if you're not working with Addon directly and
need to hop across a join, e.g. ``prefix='addon__'`` in
CollectionAddon.
"""
if not status:
status = [amo.STATUS_PUBLIC]
def q(*args, **kw):
if prefix:
kw = dict((prefix + k, v) for k, v in kw.items())
return Q(*args, **kw)
return q(q(_current_version__isnull=False),
disabled_by_user=False, status__in=status)
class AddonManager(ManagerBase):
def __init__(self, include_deleted=False):
# DO NOT change the default value of include_deleted unless you've read
# through the comment just above the Addon managers
# declaration/instantiation and understand the consequences.
ManagerBase.__init__(self)
self.include_deleted = include_deleted
def get_queryset(self):
qs = super(AddonManager, self).get_queryset()
qs = qs._clone(klass=AddonQuerySet)
if not self.include_deleted:
qs = qs.exclude(status=amo.STATUS_DELETED)
return qs.transform(Addon.transformer)
def id_or_slug(self, val):
"""Get add-ons by id or slug."""
return self.get_queryset().id_or_slug(val)
def enabled(self):
"""Get add-ons that haven't been disabled by their developer(s)."""
return self.get_queryset().enabled()
def public(self):
"""Get public add-ons only"""
return self.get_queryset().public()
def valid(self):
"""Get valid, enabled add-ons only"""
return self.get_queryset().valid()
def valid_and_disabled_and_pending(self):
"""
Get valid, pending, enabled and disabled add-ons.
Used to allow pending theme pages to still be viewed.
"""
return self.get_queryset().valid_and_disabled_and_pending()
def featured(self, app, lang=None, type=None):
"""
Filter for all featured add-ons for an application in all locales.
"""
return self.get_queryset().featured(app, lang=lang, type=type)
def listed(self, app, *status):
"""
Return add-ons that support a given ``app``, have a version with a file
matching ``status`` and are not disabled.
"""
return self.get_queryset().listed(app, *status)
class Addon(OnChangeMixin, ModelBase):
STATUS_CHOICES = amo.STATUS_CHOICES_ADDON
guid = models.CharField(max_length=255, unique=True, null=True)
slug = models.CharField(max_length=30, unique=True, null=True)
name = TranslatedField(default=None)
default_locale = models.CharField(max_length=10,
default=settings.LANGUAGE_CODE,
db_column='defaultlocale')
type = models.PositiveIntegerField(
choices=amo.ADDON_TYPE.items(), db_column='addontype_id', default=0)
status = models.PositiveIntegerField(
choices=STATUS_CHOICES.items(), db_index=True, default=0)
icon_type = models.CharField(max_length=25, blank=True,
db_column='icontype')
homepage = TranslatedField()
support_email = TranslatedField(db_column='supportemail')
support_url = TranslatedField(db_column='supporturl')
description = PurifiedField(short=False)
summary = LinkifiedField()
developer_comments = PurifiedField(db_column='developercomments')
eula = PurifiedField()
privacy_policy = PurifiedField(db_column='privacypolicy')
the_reason = PurifiedField()
the_future = PurifiedField()
average_rating = models.FloatField(max_length=255, default=0, null=True,
db_column='averagerating')
bayesian_rating = models.FloatField(default=0, db_index=True,
db_column='bayesianrating')
total_reviews = models.PositiveIntegerField(default=0,
db_column='totalreviews')
weekly_downloads = models.PositiveIntegerField(
default=0, db_column='weeklydownloads', db_index=True)
total_downloads = models.PositiveIntegerField(
default=0, db_column='totaldownloads')
hotness = models.FloatField(default=0, db_index=True)
average_daily_downloads = models.PositiveIntegerField(default=0)
average_daily_users = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(
db_index=True, null=True,
help_text='Last time this add-on had a file/version update')
disabled_by_user = models.BooleanField(default=False, db_index=True,
db_column='inactive')
view_source = models.BooleanField(default=True, db_column='viewsource')
public_stats = models.BooleanField(default=False, db_column='publicstats')
prerelease = models.BooleanField(default=False)
admin_review = models.BooleanField(default=False, db_column='adminreview')
external_software = models.BooleanField(default=False,
db_column='externalsoftware')
dev_agreement = models.BooleanField(
default=False, help_text="Has the dev agreement been signed?")
auto_repackage = models.BooleanField(
default=True, help_text='Automatically upgrade jetpack add-on to a '
'new sdk version?')
target_locale = models.CharField(
max_length=255, db_index=True, blank=True, null=True,
help_text="For dictionaries and language packs")
locale_disambiguation = models.CharField(
max_length=255, blank=True, null=True,
help_text="For dictionaries and language packs")
wants_contributions = models.BooleanField(default=False)
paypal_id = models.CharField(max_length=255, blank=True)
charity = models.ForeignKey('Charity', null=True)
suggested_amount = models.DecimalField(
max_digits=9, decimal_places=2, blank=True,
null=True, help_text=_(u'Users have the option of contributing more '
'or less than this amount.'))
total_contributions = models.DecimalField(max_digits=9, decimal_places=2,
blank=True, null=True)
annoying = models.PositiveIntegerField(
choices=amo.CONTRIB_CHOICES, default=0,
help_text=_(u'Users will always be asked in the Add-ons'
u' Manager (Firefox 4 and above).'
u' Only applies to desktop.'))
enable_thankyou = models.BooleanField(
default=False, help_text='Should the thank you note be sent to '
'contributors?')
thankyou_note = TranslatedField()
authors = models.ManyToManyField('users.UserProfile', through='AddonUser',
related_name='addons')
categories = models.ManyToManyField('Category', through='AddonCategory')
dependencies = models.ManyToManyField('self', symmetrical=False,
through='AddonDependency',
related_name='addons')
_current_version = models.ForeignKey(Version, db_column='current_version',
related_name='+', null=True,
on_delete=models.SET_NULL)
whiteboard = models.TextField(blank=True)
is_experimental = models.BooleanField(default=False,
db_column='experimental')
# The order of those managers is very important:
# The first one discovered, if it has "use_for_related_fields = True"
# (which it has if it's inheriting from caching.base.CachingManager), will
# be used for relations like `version.addon`. We thus want one that is NOT
# filtered in any case, we don't want a 500 if the addon is not found
# (because it has the status amo.STATUS_DELETED for example).
# The CLASS of the first one discovered will also be used for "many to many
# relations" like `collection.addons`. In that case, we do want the
# filtered version by default, to make sure we're not displaying stuff by
# mistake. You thus want the CLASS of the first one to be filtered by
# default.
# We don't control the instantiation, but AddonManager sets include_deleted
# to False by default, so filtering is enabled by default. This is also why
# it's not repeated for 'objects' below.
unfiltered = AddonManager(include_deleted=True)
objects = AddonManager()
class Meta:
db_table = 'addons'
@staticmethod
def __new__(cls, *args, **kw):
try:
type_idx = Addon._meta._type_idx
except AttributeError:
type_idx = (idx for idx, f in enumerate(Addon._meta.fields)
if f.attname == 'type').next()
Addon._meta._type_idx = type_idx
return object.__new__(cls)
def __unicode__(self):
return u'%s: %s' % (self.id, self.name)
def __init__(self, *args, **kw):
super(Addon, self).__init__(*args, **kw)
if self.type == amo.ADDON_PERSONA:
self.STATUS_CHOICES = Persona.STATUS_CHOICES
def save(self, **kw):
self.clean_slug()
super(Addon, self).save(**kw)
@classmethod
def search_public(cls):
"""Legacy search method for public add-ons.
Note that typically, code using this method do a search in ES but then
will fetch the relevant objects from the database using Addon.objects,
so deleted addons won't be returned no matter what ES returns. See
amo.search.ES and amo.search.ObjectSearchResults for more details.
In new code, use elasticsearch-dsl instead.
"""
return cls.search().filter(
is_disabled=False,
status__in=amo.REVIEWED_STATUSES,
current_version__exists=True)
@use_master
def clean_slug(self, slug_field='slug'):
if self.status == amo.STATUS_DELETED:
return
clean_slug(self, slug_field)
def is_soft_deleteable(self):
return self.status or Version.unfiltered.filter(addon=self).exists()
@transaction.atomic
def delete(self, msg='', reason=''):
# To avoid a circular import.
from . import tasks
# Check for soft deletion path. Happens only if the addon status isn't
# 0 (STATUS_INCOMPLETE) with no versions.
soft_deletion = self.is_soft_deleteable()
if soft_deletion and self.status == amo.STATUS_DELETED:
# We're already done.
return
id = self.id
# Fetch previews before deleting the addon instance, so that we can
# pass the list of files to delete to the delete_preview_files task
# after the addon is deleted.
previews = list(Preview.objects.filter(addon__id=id)
.values_list('id', flat=True))
if soft_deletion:
# /!\ If we ever stop using soft deletion, and remove this code, we
# need to make sure that the logs created below aren't cascade
# deleted!
log.debug('Deleting add-on: %s' % self.id)
to = [settings.FLIGTAR]
user = amo.get_user()
# Don't localize email to admins, use 'en-US' always.
with no_translation():
# The types are lazy translated in apps/constants/base.py.
atype = amo.ADDON_TYPE.get(self.type).upper()
context = {
'atype': atype,
'authors': [u.email for u in self.authors.all()],
'adu': self.average_daily_users,
'guid': self.guid,
'id': self.id,
'msg': msg,
'reason': reason,
'name': self.name,
'slug': self.slug,
'total_downloads': self.total_downloads,
'url': helpers.absolutify(self.get_url_path()),
'user_str': ("%s, %s (%s)" % (user.display_name or
user.username, user.email,
user.id) if user else "Unknown"),
}
email_msg = u"""
The following %(atype)s was deleted.
%(atype)s: %(name)s
URL: %(url)s
DELETED BY: %(user_str)s
ID: %(id)s
GUID: %(guid)s
AUTHORS: %(authors)s
TOTAL DOWNLOADS: %(total_downloads)s
AVERAGE DAILY USERS: %(adu)s
NOTES: %(msg)s
REASON GIVEN BY USER FOR DELETION: %(reason)s
""" % context
log.debug('Sending delete email for %(atype)s %(id)s' % context)
subject = 'Deleting %(atype)s %(slug)s (%(id)d)' % context
# Update or NULL out various fields.
models.signals.pre_delete.send(sender=Addon, instance=self)
self._reviews.all().delete()
# The last parameter is needed to automagically create an AddonLog.
amo.log(amo.LOG.DELETE_ADDON, self.pk, unicode(self.guid), self)
self.update(status=amo.STATUS_DELETED, slug=None,
_current_version=None, modified=datetime.now())
models.signals.post_delete.send(sender=Addon, instance=self)
send_mail(subject, email_msg, recipient_list=to)
else:
# Real deletion path.
super(Addon, self).delete()
for preview in previews:
tasks.delete_preview_files.delay(preview)
return True
@classmethod
def initialize_addon_from_upload(cls, data, upload, channel):
fields = cls._meta.get_all_field_names()
guid = data.get('guid')
old_guid_addon = None
if guid: # It's an extension.
# Reclaim GUID from deleted add-on.
try:
old_guid_addon = Addon.unfiltered.get(guid=guid)
old_guid_addon.update(guid=None)
except ObjectDoesNotExist:
pass
generate_guid = (
not data.get('guid', None) and
data.get('is_webextension', False)
)
if generate_guid:
data['guid'] = guid = generate_addon_guid()
data = cls.resolve_webext_translations(data, upload)
addon = Addon(**dict((k, v) for k, v in data.items() if k in fields))
addon.status = amo.STATUS_NULL
locale_is_set = (addon.default_locale and
addon.default_locale in (
settings.AMO_LANGUAGES +
settings.HIDDEN_LANGUAGES) and
data.get('default_locale') == addon.default_locale)
if not locale_is_set:
addon.default_locale = to_language(trans_real.get_language())
addon.save()
if old_guid_addon:
old_guid_addon.update(guid='guid-reused-by-pk-{}'.format(addon.pk))
old_guid_addon.save()
return addon
@classmethod
def create_addon_from_upload_data(cls, data, upload, channel, user=None,
**kwargs):
addon = cls.initialize_addon_from_upload(data, upload, channel,
**kwargs)
AddonUser(addon=addon, user=user).save()
return addon
@classmethod
def from_upload(cls, upload, platforms, source=None,
channel=amo.RELEASE_CHANNEL_LISTED, parsed_data=None):
if not parsed_data:
parsed_data = parse_addon(upload)
addon = cls.initialize_addon_from_upload(parsed_data, upload, channel)
if upload.validation_timeout:
addon.update(admin_review=True)
Version.from_upload(upload, addon, platforms, source=source,
channel=channel)
amo.log(amo.LOG.CREATE_ADDON, addon)
log.debug('New addon %r from %r' % (addon, upload))
return addon
@classmethod
def resolve_webext_translations(cls, data, upload):
"""Resolve all possible translations from an add-on.
This returns a modified `data` dictionary accordingly with proper
translations filled in.
"""
default_locale = find_language(data.get('default_locale'))
if not data.get('is_webextension') or not default_locale:
# Don't change anything if we don't meet the requirements
return data
fields = ('name', 'homepage', 'summary')
messages = extract_translations(upload)
for field in fields:
data[field] = {
locale: resolve_i18n_message(
data[field],
locale=locale,
default_locale=default_locale,
messages=messages)
for locale in messages
}
return data
def get_url_path(self, more=False, add_prefix=True):
if not self.current_version:
return ''
# If more=True you get the link to the ajax'd middle chunk of the
# detail page.
view = 'addons.detail_more' if more else 'addons.detail'
return reverse(view, args=[self.slug], add_prefix=add_prefix)
def get_dev_url(self, action='edit', args=None, prefix_only=False):
args = args or []
prefix = 'devhub'
type_ = 'themes' if self.type == amo.ADDON_PERSONA else 'addons'
if not prefix_only:
prefix += '.%s' % type_
view_name = '{prefix}.{action}'.format(prefix=prefix,
action=action)
return reverse(view_name, args=[self.slug] + args)
def get_detail_url(self, action='detail', args=None):
if args is None:
args = []
return reverse('addons.%s' % action, args=[self.slug] + args)
def meet_the_dev_url(self):
return reverse('addons.meet', args=[self.slug])
@property
def reviews_url(self):
return helpers.url('addons.reviews.list', self.slug)
def get_ratings_url(self, action='list', args=None, add_prefix=True):
return reverse('ratings.themes.%s' % action,
args=[self.slug] + (args or []),
add_prefix=add_prefix)
@classmethod
def get_type_url(cls, type):
try:
type = amo.ADDON_SLUGS[type]
except KeyError:
return None
return reverse('browse.%s' % type)
def type_url(self):
"""The url for this add-on's type."""
return Addon.get_type_url(self.type)
def share_url(self):
return reverse('addons.share', args=[self.slug])
@amo.cached_property(writable=True)
def listed_authors(self):
return UserProfile.objects.filter(
addons=self,
addonuser__listed=True).order_by('addonuser__position')
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def reviews(self):
return Review.objects.filter(addon=self, reply_to=None)
def get_category(self, app_id):
categories = self.app_categories.get(amo.APP_IDS.get(app_id))
return categories[0] if categories else None
def language_ascii(self):
lang = trans_real.to_language(self.default_locale)
return settings.LANGUAGES.get(lang)
@property
def valid_file_statuses(self):
if self.status == amo.STATUS_PUBLIC:
return [amo.STATUS_PUBLIC]
return amo.VALID_FILE_STATUSES
def find_latest_public_listed_version(self):
"""Retrieve the latest public listed version of an addon.
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions)."""
if self.type == amo.ADDON_PERSONA:
return
try:
statuses = self.valid_file_statuses
status_list = ','.join(map(str, statuses))
fltr = {
'channel': amo.RELEASE_CHANNEL_LISTED,
'files__status__in': statuses
}
return self.versions.no_cache().filter(**fltr).extra(
where=["""
NOT EXISTS (
SELECT 1 FROM files AS f2
WHERE f2.version_id = versions.id AND
f2.status NOT IN (%s))
""" % status_list])[0]
except (IndexError, Version.DoesNotExist):
return None
def find_latest_version(self, channel, ignore=None):
"""Retrieve the latest non-disabled version of an add-on for the
specified channel. If channel is None either channel is returned.
Accepts an optional ignore argument to ignore a specific version."""
# If the add-on is deleted or hasn't been saved yet, it should not
# have a latest version.
if not self.id or self.status == amo.STATUS_DELETED:
return None
# We can't use .exclude(files__status=STATUS_DISABLED) because this
# excludes a version if any of the files are the disabled and there may
# be files we do want to include. Having a single beta file /does/
# mean we want the whole version disqualified though.
statuses_without_disabled = (
set(amo.STATUS_CHOICES_FILE.keys()) -
{amo.STATUS_DISABLED, amo.STATUS_BETA})
try:
latest_qs = (
Version.objects.filter(addon=self)
.exclude(files__status=amo.STATUS_BETA)
.filter(files__status__in=statuses_without_disabled))
if ignore is not None:
latest_qs = latest_qs.exclude(pk=ignore.pk)
if channel is not None:
latest_qs = latest_qs.filter(channel=channel)
latest = latest_qs.latest()
latest.addon = self
except Version.DoesNotExist:
latest = None
return latest
def find_latest_version_including_rejected(self, channel):
"""Similar to latest_version but includes rejected versions. Used so
we correctly attach review content to the last version reviewed. If
channel is None either channel is returned."""
try:
latest_qs = self.versions.exclude(files__status=amo.STATUS_BETA)
if channel is not None:
latest_qs = latest_qs.filter(channel=channel)
latest = latest_qs.latest()
except Version.DoesNotExist:
latest = None
return latest
@write
def update_version(self, ignore=None, _signal=True):
"""
Update the current_version field on this add-on if necessary.
Returns True if we updated the current_version field.
The optional ``ignore`` parameter, if present, is a a version
to not consider as part of the update, since it may be in the
process of being deleted.
Pass ``_signal=False`` if you want to no signals fired at all.
"""
if self.is_persona():
# Themes should only have a single version. So, if there is not
# current version set, we just need to copy over the latest version
# to current_version and we should never have to set it again.
if not self._current_version:
latest_version = self.find_latest_version(None)
if latest_version:
self.update(_current_version=latest_version, _signal=False)
return True
return False
new_current_version = self.find_latest_public_listed_version()
updated = {}
send_signal = False
if self._current_version != new_current_version:
updated['_current_version'] = new_current_version
send_signal = True
# update_version can be called by a post_delete signal (such
# as File's) when deleting a version. If so, we should avoid putting
# that version-being-deleted in any fields.
if ignore is not None:
updated = {k: v for k, v in updated.iteritems() if v != ignore}
if updated:
diff = [self._current_version, new_current_version]
# Pass along _signal to the .update() to prevent it from firing
# signals if we don't want them.
updated['_signal'] = _signal
try:
self.update(**updated)
if send_signal and _signal:
signals.version_changed.send(sender=self)
log.info(u'Version changed from current: %s to %s '
u'for addon %s'
% tuple(diff + [self]))
except Exception, e:
log.error(u'Could not save version changes current: %s to %s '
u'for addon %s (%s)' %
tuple(diff + [self, e]))
return bool(updated)
def increment_theme_version_number(self):
"""Increment theme version number by 1."""
latest_version = self.find_latest_version(None)
version = latest_version or self.current_version
version.version = str(float(version.version) + 1)
# Set the current version.
self.update(_current_version=version.save())
def invalidate_d2c_versions(self):
"""Invalidates the cache of compatible versions.
Call this when there is an event that may change what compatible
versions are returned so they are recalculated.
"""
key = cache_ns_key('d2c-versions:%s' % self.id, increment=True)
log.info('Incrementing d2c-versions namespace for add-on [%s]: %s' % (
self.id, key))
@property
def current_version(self):
"""Return the latest public listed version of an addon
If the add-on is not public, it can return a listed version awaiting
review (since non-public add-ons should not have public versions).
If the add-on has not been created yet or is deleted, it returns None.
"""
if not self.id or self.status == amo.STATUS_DELETED:
return None
try:
return self._current_version
except ObjectDoesNotExist:
pass
return None
@amo.cached_property(writable=True)
def latest_unlisted_version(self):
"""Shortcut property for Addon.find_latest_version(
channel=RELEASE_CHANNEL_UNLISTED)."""
return self.find_latest_version(channel=amo.RELEASE_CHANNEL_UNLISTED)
@amo.cached_property
def binary(self):
"""Returns if the current version has binary files."""
version = self.current_version
if version:
return version.files.filter(binary=True).exists()
return False
@amo.cached_property
def binary_components(self):
"""Returns if the current version has files with binary_components."""
version = self.current_version
if version:
return version.files.filter(binary_components=True).exists()
return False
def get_icon_dir(self):
return os.path.join(helpers.user_media_path('addon_icons'),
'%s' % (self.id / 1000))
def get_icon_url(self, size, use_default=True):
"""
Returns the addon's icon url according to icon_type.
If it's a persona, it will return the icon_url of the associated
Persona instance.
If it's a theme and there is no icon set, it will return the default
theme icon.
If it's something else, it will return the default add-on icon, unless
use_default is False, in which case it will return None.
"""
icon_type_split = []
if self.icon_type:
icon_type_split = self.icon_type.split('/')
# Get the closest allowed size without going over
if (size not in amo.ADDON_ICON_SIZES and
size >= amo.ADDON_ICON_SIZES[0]):
size = [s for s in amo.ADDON_ICON_SIZES if s < size][-1]
elif size < amo.ADDON_ICON_SIZES[0]:
size = amo.ADDON_ICON_SIZES[0]
# Figure out what to return for an image URL
if self.type == amo.ADDON_PERSONA:
return self.persona.icon_url
if not self.icon_type:
if self.type == amo.ADDON_THEME:
icon = amo.ADDON_ICONS[amo.ADDON_THEME]
return "%simg/icons/%s" % (settings.STATIC_URL, icon)
else:
if not use_default:
return None
return self.get_default_icon_url(size)
elif icon_type_split[0] == 'icon':
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL,
icon_type_split[1],
size
)
else:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
modified = int(time.mktime(self.modified.timetuple()))
path = '/'.join([
split_id.group(2) or '0',
'{0}-{1}.png?modified={2}'.format(self.id, size, modified),
])
return helpers.user_media_url('addon_icons') + path
def get_default_icon_url(self, size):
return '{0}img/addon-icons/{1}-{2}.png'.format(
settings.STATIC_URL, 'default', size
)
@write
def update_status(self, ignore_version=None):
self.reload()
if (self.status in [amo.STATUS_NULL, amo.STATUS_DELETED] or
self.is_disabled or self.is_persona()):
self.update_version(ignore=ignore_version)
return
versions = self.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED)
status = None
if not versions.exists():
status = amo.STATUS_NULL
reason = 'no listed versions'
elif not versions.filter(
files__status__in=amo.VALID_FILE_STATUSES).exists():
status = amo.STATUS_NULL
reason = 'no listed version with valid file'
elif (self.status == amo.STATUS_PUBLIC and
not versions.filter(files__status=amo.STATUS_PUBLIC).exists()):
if versions.filter(
files__status=amo.STATUS_AWAITING_REVIEW).exists():
status = amo.STATUS_NOMINATED
reason = 'only an unreviewed file'
else:
status = amo.STATUS_NULL
reason = 'no reviewed files'
elif self.status == amo.STATUS_PUBLIC:
latest_version = self.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (latest_version and latest_version.has_files and
(latest_version.all_files[0].status ==
amo.STATUS_AWAITING_REVIEW)):
# Addon is public, but its latest file is not (it's the case on
# a new file upload). So, call update, to trigger watch_status,
# which takes care of setting nomination time when needed.
status = self.status
reason = 'triggering watch_status'
if status is not None:
log.info('Changing add-on status [%s]: %s => %s (%s).'
% (self.id, self.status, status, reason))
self.update(status=status)
amo.log(amo.LOG.CHANGE_STATUS, self.get_status_display(), self)
self.update_version(ignore=ignore_version)
@staticmethod
def attach_related_versions(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
all_ids = set(filter(None, (a._current_version_id for a in addons)))
versions = list(Version.objects.filter(id__in=all_ids).order_by())
for version in versions:
try:
addon = addon_dict[version.addon_id]
except KeyError:
log.debug('Version %s has an invalid add-on id.' % version.id)
continue
if addon._current_version_id == version.id:
addon._current_version = version
version.addon = addon
@staticmethod
def attach_listed_authors(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = (UserProfile.objects.no_cache()
.filter(addons__in=addons, addonuser__listed=True)
.extra(select={'addon_id': 'addons_users.addon_id',
'position': 'addons_users.position'}))
qs = sorted(qs, key=lambda u: (u.addon_id, u.position))
for addon_id, users in itertools.groupby(qs, key=lambda u: u.addon_id):
addon_dict[addon_id].listed_authors = list(users)
# FIXME: set listed_authors to empty list on addons without listed
# authors.
@staticmethod
def attach_previews(addons, addon_dict=None, no_transforms=False):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = Preview.objects.filter(addon__in=addons,
position__gte=0).order_by()
if no_transforms:
qs = qs.no_transforms()
qs = sorted(qs, key=lambda x: (x.addon_id, x.position, x.created))
for addon, previews in itertools.groupby(qs, lambda x: x.addon_id):
addon_dict[addon].all_previews = list(previews)
# FIXME: set all_previews to empty list on addons without previews.
@staticmethod
def attach_static_categories(addons, addon_dict=None):
if addon_dict is None:
addon_dict = dict((a.id, a) for a in addons)
qs = AddonCategory.objects.values_list(
'addon', 'category').filter(addon__in=addon_dict)
qs = sorted(qs, key=lambda x: (x[0], x[1]))
for addon_id, cats_iter in itertools.groupby(qs, key=lambda x: x[0]):
# The second value of each tuple in cats_iter are the category ids
# we want.
addon_dict[addon_id].category_ids = [c[1] for c in cats_iter]
addon_dict[addon_id].all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id
in addon_dict[addon_id].category_ids
if cat_id in CATEGORIES_BY_ID]
@staticmethod
@timer
def transformer(addons):
if not addons:
return
addon_dict = {a.id: a for a in addons}
# Attach categories. This needs to be done before separating addons
# from personas, because Personas need categories for the theme_data
# JSON dump, rest of the add-ons need the first category to be
# displayed in detail page / API.
Addon.attach_static_categories(addons, addon_dict=addon_dict)
personas = [a for a in addons if a.type == amo.ADDON_PERSONA]
addons = [a for a in addons if a.type != amo.ADDON_PERSONA]
# Set _current_version.
Addon.attach_related_versions(addons, addon_dict=addon_dict)
# Attach listed authors.
Addon.attach_listed_authors(addons, addon_dict=addon_dict)
# Persona-specific stuff
for persona in Persona.objects.no_cache().filter(addon__in=personas):
addon = addon_dict[persona.addon_id]
addon.persona = persona
addon.weekly_downloads = persona.popularity
# Attach previews.
Addon.attach_previews(addons, addon_dict=addon_dict)
return addon_dict
@property
def show_beta(self):
return self.status == amo.STATUS_PUBLIC and self.current_beta_version
def show_adu(self):
return self.type != amo.ADDON_SEARCH
@amo.cached_property(writable=True)
def current_beta_version(self):
"""Retrieves the latest version of an addon, in the beta channel."""
versions = self.versions.filter(files__status=amo.STATUS_BETA)[:1]
if versions:
return versions[0]
@property
def icon_url(self):
return self.get_icon_url(32)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
if app:
qs = Addon.objects.listed(app)
else:
qs = Addon.objects.valid()
return (qs.exclude(id=self.id)
.filter(addonuser__listed=True,
authors__in=self.listed_authors)
.distinct())
@property
def contribution_url(self, lang=settings.LANGUAGE_CODE,
app=settings.DEFAULT_APP):
return reverse('addons.contribute', args=[self.slug])
@property
def thumbnail_url(self):
"""
Returns the addon's thumbnail url or a default.
"""
try:
preview = self.all_previews[0]
return preview.thumbnail_url
except IndexError:
return settings.STATIC_URL + '/img/icons/no-preview.png'
def can_request_review(self):
"""Return whether an add-on can request a review or not."""
if (self.is_disabled or
self.status in (amo.STATUS_PUBLIC,
amo.STATUS_NOMINATED,
amo.STATUS_DELETED)):
return False
latest_version = self.find_latest_version_including_rejected(
channel=amo.RELEASE_CHANNEL_LISTED)
return latest_version is not None and latest_version.files.exists()
def is_persona(self):
return self.type == amo.ADDON_PERSONA
@property
def is_disabled(self):
"""True if this Addon is disabled.
It could be disabled by an admin or disabled by the developer
"""
return self.status == amo.STATUS_DISABLED or self.disabled_by_user
@property
def is_deleted(self):
return self.status == amo.STATUS_DELETED
def is_unreviewed(self):
return self.status in amo.UNREVIEWED_ADDON_STATUSES
def is_public(self):
return self.status == amo.STATUS_PUBLIC and not self.disabled_by_user
def has_complete_metadata(self, has_listed_versions=None):
"""See get_required_metadata for has_listed_versions details."""
return all(self.get_required_metadata(
has_listed_versions=has_listed_versions))
def get_required_metadata(self, has_listed_versions=None):
"""If has_listed_versions is not specified this method will return the
current (required) metadata (truthy values if present) for this Addon.
If has_listed_versions is specified then the method will act as if
Addon.has_listed_versions() returns that value. Used to predict if the
addon will require extra metadata before a version is created."""
if has_listed_versions is None:
has_listed_versions = self.has_listed_versions()
if not has_listed_versions:
# Add-ons with only unlisted versions have no required metadata.
return []
latest_version = self.find_latest_version_including_rejected(
channel=amo.RELEASE_CHANNEL_LISTED)
return [
self.all_categories,
self.summary,
(latest_version and latest_version.license),
]
def should_redirect_to_submit_flow(self):
return (
self.status == amo.STATUS_NULL and
not self.has_complete_metadata() and
self.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED))
def is_pending(self):
return self.status == amo.STATUS_PENDING
def is_rejected(self):
return self.status == amo.STATUS_REJECTED
def can_be_deleted(self):
return not self.is_deleted
def has_listed_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_LISTED).exists()
def has_unlisted_versions(self):
return self.versions.filter(
channel=amo.RELEASE_CHANNEL_UNLISTED).exists()
@classmethod
def featured_random(cls, app, lang):
return get_featured_ids(app, lang)
@property
def requires_restart(self):
"""Whether the add-on current version requires a browser restart to
work."""
return self.current_version and self.current_version.requires_restart
def is_featured(self, app, lang=None):
"""Is add-on globally featured for this app and language?"""
if app:
return self.id in get_featured_ids(app, lang)
def has_full_profile(self):
"""Is developer profile public (completed)?"""
return self.the_reason and self.the_future
def has_profile(self):
"""Is developer profile (partially or entirely) completed?"""
return self.the_reason or self.the_future
@amo.cached_property
def tags_partitioned_by_developer(self):
"""Returns a tuple of developer tags and user tags for this addon."""
tags = self.tags.not_denied()
if self.is_persona:
return [], tags
user_tags = tags.exclude(addon_tags__user__in=self.listed_authors)
dev_tags = tags.exclude(id__in=[t.id for t in user_tags])
return dev_tags, user_tags
@amo.cached_property(writable=True)
def compatible_apps(self):
"""Shortcut to get compatible apps for the current version."""
# Search providers and personas don't list their supported apps.
if self.type in amo.NO_COMPAT:
return dict((app, None) for app in
amo.APP_TYPE_SUPPORT[self.type])
if self.current_version:
return self.current_version.compatible_apps
else:
return {}
def accepts_compatible_apps(self):
"""True if this add-on lists compatible apps."""
return self.type not in amo.NO_COMPAT
def incompatible_latest_apps(self):
"""Returns a list of applications with which this add-on is
incompatible (based on the latest version of each app).
"""
return [app for app, ver in self.compatible_apps.items() if ver and
version_int(ver.max.version) < version_int(app.latest_version)]
def has_author(self, user, roles=None):
"""True if ``user`` is an author with any of the specified ``roles``.
``roles`` should be a list of valid roles (see amo.AUTHOR_ROLE_*). If
not specified, has_author will return true if the user has any role.
"""
if user is None or user.is_anonymous():
return False
if roles is None:
roles = dict(amo.AUTHOR_CHOICES).keys()
return AddonUser.objects.filter(addon=self, user=user,
role__in=roles).exists()
@property
def takes_contributions(self):
return (self.status == amo.STATUS_PUBLIC and
self.wants_contributions and
(self.paypal_id or self.charity_id))
@classmethod
def _last_updated_queries(cls):
"""
Get the queries used to calculate addon.last_updated.
"""
status_change = Max('versions__files__datestatuschanged')
public = (
Addon.objects.no_cache().filter(
status=amo.STATUS_PUBLIC,
versions__files__status=amo.STATUS_PUBLIC)
.exclude(type=amo.ADDON_PERSONA)
.values('id').annotate(last_updated=status_change))
stati = amo.VALID_ADDON_STATUSES
exp = (Addon.objects.no_cache().exclude(status__in=stati)
.filter(versions__files__status__in=amo.VALID_FILE_STATUSES)
.values('id')
.annotate(last_updated=Max('versions__files__created')))
personas = (Addon.objects.no_cache().filter(type=amo.ADDON_PERSONA)
.extra(select={'last_updated': 'created'}))
return dict(public=public, exp=exp, personas=personas)
@amo.cached_property(writable=True)
def all_categories(self):
return filter(
None, [cat.to_static_category() for cat in self.categories.all()])
@amo.cached_property(writable=True)
def all_previews(self):
return list(self.get_previews())
def get_previews(self):
"""Exclude promo graphics."""
return self.previews.exclude(position=-1)
@property
def app_categories(self):
app_cats = {}
categories = sorted_groupby(
sorted(self.all_categories, key=attrgetter('weight', 'name')),
key=lambda x: amo.APP_IDS.get(x.application))
for app, cats in categories:
app_cats[app] = list(cats)
return app_cats
def remove_locale(self, locale):
"""NULLify strings in this locale for the add-on and versions."""
for o in itertools.chain([self], self.versions.all()):
Translation.objects.remove_for(o, locale)
def get_localepicker(self):
"""For language packs, gets the contents of localepicker."""
if (self.type == amo.ADDON_LPAPP and
self.status == amo.STATUS_PUBLIC and
self.current_version):
files = (self.current_version.files
.filter(platform=amo.PLATFORM_ANDROID.id))
try:
return unicode(files[0].get_localepicker(), 'utf-8')
except IndexError:
pass
return ''
def can_review(self, user):
return not(user and self.has_author(user))
@property
def all_dependencies(self):
"""Return all the (valid) add-ons this add-on depends on."""
return list(self.dependencies.valid().all()[:3])
def has_installed(self, user):
if not user or not isinstance(user, UserProfile):
return False
return self.installed.filter(user=user).exists()
def get_latest_file(self):
"""Get the latest file from the current version."""
cur = self.current_version
if cur:
res = cur.files.order_by('-created')
if res:
return res[0]
def in_escalation_queue(self):
return self.escalationqueue_set.exists()
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the addon.
"""
if require_author:
require_owner = False
ignore_disabled = True
admin = False
return acl.check_addon_ownership(request, self, admin=admin,
viewer=(not require_owner),
ignore_disabled=ignore_disabled)
@property
def feature_compatibility(self):
try:
feature_compatibility = self.addonfeaturecompatibility
except AddonFeatureCompatibility.DoesNotExist:
# If it does not exist, return a blank one, no need to create. It's
# the caller responsibility to create when needed to avoid
# unexpected database writes.
feature_compatibility = AddonFeatureCompatibility()
return feature_compatibility
dbsignals.pre_save.connect(save_signal, sender=Addon,
dispatch_uid='addon_translations')
@receiver(signals.version_changed, dispatch_uid='version_changed')
def version_changed(sender, **kw):
from . import tasks
tasks.version_changed.delay(sender.id)
@receiver(dbsignals.post_save, sender=Addon,
dispatch_uid='addons.search.index')
def update_search_index(sender, instance, **kw):
from . import tasks
if not kw.get('raw'):
tasks.index_addons.delay([instance.id])
@Addon.on_change
def watch_status(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
"""
Set nomination date if the addon is new in queue or updating.
The nomination date cannot be reset, say, when a developer cancels
their request for review and re-requests review.
If a version is rejected after nomination, the developer has
to upload a new version.
"""
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
latest_version = instance.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if (new_status not in amo.VALID_ADDON_STATUSES or
not new_status or not latest_version):
return
if old_status not in amo.UNREVIEWED_ADDON_STATUSES:
# New: will (re)set nomination only if it's None.
latest_version.reset_nomination_time()
elif latest_version.has_files:
# Updating: inherit nomination from last nominated version.
# Calls `inherit_nomination` manually given that signals are
# deactivated to avoid circular calls.
inherit_nomination(None, latest_version)
@Addon.on_change
def watch_disabled(old_attr=None, new_attr=None, instance=None, sender=None,
**kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
attrs = dict((k, v) for k, v in old_attr.items()
if k in ('disabled_by_user', 'status'))
if Addon(**attrs).is_disabled and not instance.is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.unhide_disabled_file()
if instance.is_disabled and not Addon(**attrs).is_disabled:
for f in File.objects.filter(version__addon=instance.id):
f.hide_disabled_file()
@Addon.on_change
def watch_developer_notes(old_attr=None, new_attr=None, instance=None,
sender=None, **kwargs):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
whiteboard_changed = (
new_attr.get('whiteboard') and
old_attr.get('whiteboard') != new_attr.get('whiteboard'))
developer_comments_changed = (new_attr.get('_developer_comments_cache') and
old_attr.get('_developer_comments_cache') !=
new_attr.get('_developer_comments_cache'))
if whiteboard_changed or developer_comments_changed:
instance.versions.update(has_info_request=False)
def attach_translations(addons):
"""Put all translations into a translations dict."""
attach_trans_dict(Addon, addons)
def attach_tags(addons):
addon_dict = dict((a.id, a) for a in addons)
qs = (Tag.objects.not_denied().filter(addons__in=addon_dict)
.values_list('addons__id', 'tag_text'))
for addon, tags in sorted_groupby(qs, lambda x: x[0]):
addon_dict[addon].tag_list = [t[1] for t in tags]
class Persona(caching.CachingMixin, models.Model):
"""Personas-specific additions to the add-on model."""
STATUS_CHOICES = amo.STATUS_CHOICES_PERSONA
addon = models.OneToOneField(Addon, null=True)
persona_id = models.PositiveIntegerField(db_index=True)
# name: deprecated in favor of Addon model's name field
# description: deprecated, ditto
header = models.CharField(max_length=64, null=True)
footer = models.CharField(max_length=64, null=True)
accentcolor = models.CharField(max_length=10, null=True)
textcolor = models.CharField(max_length=10, null=True)
author = models.CharField(max_length=255, null=True)
display_username = models.CharField(max_length=255, null=True)
submit = models.DateTimeField(null=True)
approve = models.DateTimeField(null=True)
movers = models.FloatField(null=True, db_index=True)
popularity = models.IntegerField(null=False, default=0, db_index=True)
license = models.PositiveIntegerField(
choices=amo.PERSONA_LICENSES_CHOICES, null=True, blank=True)
# To spot duplicate submissions.
checksum = models.CharField(max_length=64, blank=True, default='')
dupe_persona = models.ForeignKey('self', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'personas'
def __unicode__(self):
return unicode(self.addon.name)
def is_new(self):
return self.persona_id == 0
def _image_url(self, filename):
host = helpers.user_media_url('addons')
image_url = posixpath.join(host, str(self.addon.id), filename or '')
# TODO: Bust the cache on the hash of the image contents or something.
if self.addon.modified is not None:
modified = int(time.mktime(self.addon.modified.timetuple()))
else:
modified = 0
return '%s?%s' % (image_url, modified)
def _image_path(self, filename):
return os.path.join(helpers.user_media_path('addons'),
str(self.addon.id), filename)
@amo.cached_property
def thumb_url(self):
"""
Handles deprecated GetPersonas URL.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview.jpg')
@amo.cached_property
def thumb_path(self):
"""
Handles deprecated GetPersonas path.
In days of yore, preview.jpg used to be a separate image.
In modern days, we use the same image for big preview + thumb.
"""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview.jpg')
@amo.cached_property
def icon_url(self):
"""URL to personas square preview."""
if self.is_new():
return self._image_url('icon.png')
else:
return self._image_url('preview_small.jpg')
@amo.cached_property
def icon_path(self):
"""Path to personas square preview."""
if self.is_new():
return self._image_path('icon.png')
else:
return self._image_path('preview_small.jpg')
@amo.cached_property
def preview_url(self):
"""URL to Persona's big, 680px, preview."""
if self.is_new():
return self._image_url('preview.png')
else:
return self._image_url('preview_large.jpg')
@amo.cached_property
def preview_path(self):
"""Path to Persona's big, 680px, preview."""
if self.is_new():
return self._image_path('preview.png')
else:
return self._image_path('preview_large.jpg')
@amo.cached_property
def header_url(self):
return self._image_url(self.header)
@amo.cached_property
def footer_url(self):
return self.footer and self._image_url(self.footer) or ''
@amo.cached_property
def header_path(self):
return self._image_path(self.header)
@amo.cached_property
def footer_path(self):
return self.footer and self._image_path(self.footer) or ''
@amo.cached_property
def update_url(self):
locale = settings.LANGUAGE_URL_MAP.get(trans_real.get_language())
return settings.NEW_PERSONAS_UPDATE_URL % {
'locale': locale or settings.LANGUAGE_CODE,
'id': self.addon.id
}
@amo.cached_property
def theme_data(self):
"""Theme JSON Data for Browser/extension preview."""
def hexcolor(color):
return '#%s' % color
addon = self.addon
return {
'id': unicode(self.addon.id), # Personas dislikes ints
'name': unicode(addon.name),
'accentcolor': hexcolor(self.accentcolor),
'textcolor': hexcolor(self.textcolor),
'category': (unicode(addon.all_categories[0].name) if
addon.all_categories else ''),
# TODO: Change this to be `addons_users.user.display_name`.
'author': self.display_username,
'description': unicode(addon.description),
'header': self.header_url,
'footer': self.footer_url or '',
'headerURL': self.header_url,
'footerURL': self.footer_url or '',
'previewURL': self.preview_url,
'iconURL': self.icon_url,
'updateURL': self.update_url,
'detailURL': helpers.absolutify(self.addon.get_url_path()),
'version': '1.0'
}
@property
def json_data(self):
"""Persona JSON Data for Browser/extension preview."""
return json.dumps(self.theme_data,
separators=(',', ':'), cls=AMOJSONEncoder)
def authors_other_addons(self, app=None):
"""
Return other addons by the author(s) of this addon,
optionally takes an app.
"""
qs = (Addon.objects.valid()
.exclude(id=self.addon.id)
.filter(type=amo.ADDON_PERSONA))
return (qs.filter(addonuser__listed=True,
authors__in=self.addon.listed_authors)
.distinct())
@amo.cached_property(writable=True)
def listed_authors(self):
return self.addon.listed_authors
class AddonCategory(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
category = models.ForeignKey('Category')
feature = models.BooleanField(default=False)
feature_locales = models.CharField(max_length=255, default='', null=True)
objects = caching.CachingManager()
class Meta:
db_table = 'addons_categories'
unique_together = ('addon', 'category')
@classmethod
def creatured_random(cls, category, lang):
return get_creatured_ids(category, lang)
class AddonUser(caching.CachingMixin, models.Model):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
user = UserForeignKey()
role = models.SmallIntegerField(default=amo.AUTHOR_ROLE_OWNER,
choices=amo.AUTHOR_CHOICES)
listed = models.BooleanField(_(u'Listed'), default=True)
position = models.IntegerField(default=0)
objects = caching.CachingManager()
def __init__(self, *args, **kwargs):
super(AddonUser, self).__init__(*args, **kwargs)
self._original_role = self.role
self._original_user_id = self.user_id
class Meta:
db_table = 'addons_users'
class AddonDependency(models.Model):
addon = models.ForeignKey(Addon, related_name='addons_dependencies')
dependent_addon = models.ForeignKey(Addon, related_name='dependent_on')
class Meta:
db_table = 'addons_dependencies'
unique_together = ('addon', 'dependent_addon')
class AddonFeatureCompatibility(ModelBase):
addon = models.OneToOneField(
Addon, primary_key=True, on_delete=models.CASCADE)
e10s = models.PositiveSmallIntegerField(
choices=amo.E10S_COMPATIBILITY_CHOICES, default=amo.E10S_UNKNOWN)
def __unicode__(self):
return unicode(self.addon) if self.pk else u""
def get_e10s_classname(self):
return amo.E10S_COMPATIBILITY_CHOICES_API[self.e10s]
class DeniedGuid(ModelBase):
guid = models.CharField(max_length=255, unique=True)
comments = models.TextField(default='', blank=True)
class Meta:
db_table = 'denied_guids'
def __unicode__(self):
return self.guid
class Category(OnChangeMixin, ModelBase):
# Old name translations, we now have constants translated via gettext, but
# this is for backwards-compatibility, for categories which have a weird
# type/application/slug combo that is not in the constants.
db_name = TranslatedField(db_column='name')
slug = SlugField(max_length=50,
help_text='Used in Category URLs.')
type = models.PositiveIntegerField(db_column='addontype_id',
choices=do_dictsort(amo.ADDON_TYPE))
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
null=True, blank=True,
db_column='application_id')
count = models.IntegerField('Addon count', default=0)
weight = models.IntegerField(
default=0, help_text='Category weight used in sort ordering')
misc = models.BooleanField(default=False)
addons = models.ManyToManyField(Addon, through='AddonCategory')
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
@property
def name(self):
try:
value = CATEGORIES[self.application][self.type][self.slug].name
except KeyError:
# If we can't find the category in the constants dict, fall back
# to the db field.
value = self.db_name
return unicode(value)
def __unicode__(self):
return unicode(self.name)
def get_url_path(self):
try:
type = amo.ADDON_SLUGS[self.type]
except KeyError:
type = amo.ADDON_SLUGS[amo.ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def to_static_category(self):
"""Return the corresponding StaticCategory instance from a Category."""
try:
staticcategory = CATEGORIES[self.application][self.type][self.slug]
except KeyError:
staticcategory = None
return staticcategory
@classmethod
def from_static_category(cls, static_category):
"""Return a Category instance created from a StaticCategory.
Does not save it into the database. Useful in tests."""
return cls(**static_category.__dict__)
dbsignals.pre_save.connect(save_signal, sender=Category,
dispatch_uid='category_translations')
class Preview(ModelBase):
addon = models.ForeignKey(Addon, related_name='previews')
caption = TranslatedField()
position = models.IntegerField(default=0)
sizes = JSONField(max_length=25, default={})
class Meta:
db_table = 'previews'
ordering = ('position', 'created')
def _image_url(self, url_template):
if self.modified is not None:
modified = int(time.mktime(self.modified.timetuple()))
else:
modified = 0
args = [self.id / 1000, self.id, modified]
return url_template % tuple(args)
def _image_path(self, url_template):
args = [self.id / 1000, self.id]
return url_template % tuple(args)
def as_dict(self, src=None):
d = {'full': urlparams(self.image_url, src=src),
'thumbnail': urlparams(self.thumbnail_url, src=src),
'caption': unicode(self.caption)}
return d
@property
def is_landscape(self):
size = self.image_size
if not size:
return False
return size[0] > size[1]
@property
def thumbnail_url(self):
template = (
helpers.user_media_url('previews') +
'thumbs/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def image_url(self):
template = (
helpers.user_media_url('previews') +
'full/%s/%d.png?modified=%s')
return self._image_url(template)
@property
def thumbnail_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'thumbs',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def image_path(self):
template = os.path.join(
helpers.user_media_path('previews'),
'full',
'%s',
'%d.png'
)
return self._image_path(template)
@property
def thumbnail_size(self):
return self.sizes.get('thumbnail', []) if self.sizes else []
@property
def image_size(self):
return self.sizes.get('image', []) if self.sizes else []
dbsignals.pre_save.connect(save_signal, sender=Preview,
dispatch_uid='preview_translations')
def delete_preview_files(sender, instance, **kw):
"""On delete of the Preview object from the database, unlink the image
and thumb on the file system """
for filename in [instance.image_path, instance.thumbnail_path]:
if storage.exists(filename):
log.info('Removing filename: %s for preview: %s'
% (filename, instance.pk))
storage.delete(filename)
models.signals.post_delete.connect(delete_preview_files,
sender=Preview,
dispatch_uid='delete_preview_files')
class AppSupport(ModelBase):
"""Cache to tell us if an add-on's current version supports an app."""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min = models.BigIntegerField("Minimum app version", null=True)
max = models.BigIntegerField("Maximum app version", null=True)
class Meta:
db_table = 'appsupport'
unique_together = ('addon', 'app')
class Charity(ModelBase):
name = models.CharField(max_length=255)
url = models.URLField()
paypal = models.CharField(max_length=255)
class Meta:
db_table = 'charities'
@property
def outgoing_url(self):
if self.pk == amo.FOUNDATION_ORG:
return self.url
return get_outgoing_url(unicode(self.url))
class DeniedSlug(ModelBase):
name = models.CharField(max_length=255, unique=True, default='')
class Meta:
db_table = 'addons_denied_slug'
def __unicode__(self):
return self.name
@classmethod
def blocked(cls, slug):
return slug.isdigit() or cls.objects.filter(name=slug).exists()
class FrozenAddon(models.Model):
"""Add-ons in this table never get a hotness score."""
addon = models.ForeignKey(Addon)
class Meta:
db_table = 'frozen_addons'
def __unicode__(self):
return 'Frozen: %s' % self.addon_id
@receiver(dbsignals.post_save, sender=FrozenAddon)
def freezer(sender, instance, **kw):
# Adjust the hotness of the FrozenAddon.
if instance.addon_id:
Addon.objects.get(id=instance.addon_id).update(hotness=0)
class CompatOverride(ModelBase):
"""Helps manage compat info for add-ons not hosted on AMO."""
name = models.CharField(max_length=255, blank=True, null=True)
guid = models.CharField(max_length=255, unique=True)
addon = models.ForeignKey(Addon, blank=True, null=True,
help_text='Fill this out to link an override '
'to a hosted add-on')
class Meta:
db_table = 'compat_override'
unique_together = ('addon', 'guid')
def save(self, *args, **kw):
if not self.addon:
qs = Addon.objects.filter(guid=self.guid)
if qs:
self.addon = qs[0]
return super(CompatOverride, self).save(*args, **kw)
def __unicode__(self):
if self.addon:
return unicode(self.addon)
elif self.name:
return '%s (%s)' % (self.name, self.guid)
else:
return self.guid
def is_hosted(self):
"""Am I talking about an add-on on AMO?"""
return bool(self.addon_id)
@staticmethod
def transformer(overrides):
if not overrides:
return
id_map = dict((o.id, o) for o in overrides)
qs = CompatOverrideRange.objects.filter(compat__in=id_map)
for compat_id, ranges in sorted_groupby(qs, 'compat_id'):
id_map[compat_id].compat_ranges = list(ranges)
# May be filled in by a transformer for performance.
@amo.cached_property(writable=True)
def compat_ranges(self):
return list(self._compat_ranges.all())
def collapsed_ranges(self):
"""Collapse identical version ranges into one entity."""
Range = collections.namedtuple('Range', 'type min max apps')
AppRange = collections.namedtuple('AppRange', 'app min max')
rv = []
def sort_key(x):
return (x.min_version, x.max_version, x.type)
for key, compats in sorted_groupby(self.compat_ranges, key=sort_key):
compats = list(compats)
first = compats[0]
item = Range(first.override_type(), first.min_version,
first.max_version, [])
for compat in compats:
app = AppRange(amo.APPS_ALL[compat.app],
compat.min_app_version, compat.max_app_version)
item.apps.append(app)
rv.append(item)
return rv
OVERRIDE_TYPES = (
(0, 'Compatible (not supported)'),
(1, 'Incompatible'),
)
class CompatOverrideRange(ModelBase):
"""App compatibility for a certain version range of a RemoteAddon."""
compat = models.ForeignKey(CompatOverride, related_name='_compat_ranges')
type = models.SmallIntegerField(choices=OVERRIDE_TYPES, default=1)
min_version = models.CharField(
max_length=255, default='0',
help_text=u'If not "0", version is required to exist for the override'
u' to take effect.')
max_version = models.CharField(
max_length=255, default='*',
help_text=u'If not "*", version is required to exist for the override'
u' to take effect.')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, default='0')
max_app_version = models.CharField(max_length=255, default='*')
class Meta:
db_table = 'compat_override_range'
def override_type(self):
"""This is what Firefox wants to see in the XML output."""
return {0: 'compatible', 1: 'incompatible'}[self.type]
class IncompatibleVersions(ModelBase):
"""
Denormalized table to join against for fast compat override filtering.
This was created to be able to join against a specific version record since
the CompatOverrideRange can be wildcarded (e.g. 0 to *, or 1.0 to 1.*), and
addon versioning isn't as consistent as Firefox versioning to trust
`version_int` in all cases. So extra logic needed to be provided for when
a particular version falls within the range of a compatibility override.
"""
version = models.ForeignKey(Version, related_name='+')
app = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='app_id')
min_app_version = models.CharField(max_length=255, blank=True, default='0')
max_app_version = models.CharField(max_length=255, blank=True, default='*')
min_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
max_app_version_int = models.BigIntegerField(blank=True, null=True,
editable=False, db_index=True)
class Meta:
db_table = 'incompatible_versions'
def __unicode__(self):
return u'<IncompatibleVersion V:%s A:%s %s-%s>' % (
self.version.id, self.app.id, self.min_app_version,
self.max_app_version)
def save(self, *args, **kw):
self.min_app_version_int = version_int(self.min_app_version)
self.max_app_version_int = version_int(self.max_app_version)
return super(IncompatibleVersions, self).save(*args, **kw)
def update_incompatible_versions(sender, instance, **kw):
if not instance.compat.addon_id:
return
if not instance.compat.addon.type == amo.ADDON_EXTENSION:
return
from . import tasks
versions = instance.compat.addon.versions.values_list('id', flat=True)
for chunk in chunked(versions, 50):
tasks.update_incompatible_appversions.delay(chunk)
models.signals.post_save.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
models.signals.post_delete.connect(update_incompatible_versions,
sender=CompatOverrideRange,
dispatch_uid='cor_update_incompatible')
def track_new_status(sender, instance, *args, **kw):
if kw.get('raw'):
# The addon is being loaded from a fixure.
return
if kw.get('created'):
track_addon_status_change(instance)
models.signals.post_save.connect(track_new_status,
sender=Addon,
dispatch_uid='track_new_addon_status')
@Addon.on_change
def track_status_change(old_attr=None, new_attr=None, **kw):
if old_attr is None:
old_attr = {}
if new_attr is None:
new_attr = {}
new_status = new_attr.get('status')
old_status = old_attr.get('status')
if new_status != old_status:
track_addon_status_change(kw['instance'])
def track_addon_status_change(addon):
statsd.incr('addon_status_change.all.status_{}'
.format(addon.status))
| mstriemer/addons-server | src/olympia/addons/models.py | Python | bsd-3-clause | 80,686 |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename, exists
from json import loads, dumps
from tempfile import NamedTemporaryFile
from tornado.web import authenticated, HTTPError
from qiita_core.qiita_settings import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.util import get_files_from_uploads_folders
from qiita_db.study import Study
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.util import looks_like_qiime_mapping_file
from qiita_db.software import Software, Parameters
from qiita_db.processing_job import ProcessingJob
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_pet.handlers.api_proxy import (
data_types_get_req, sample_template_samples_get_req,
prep_template_samples_get_req, study_prep_get_req,
sample_template_meta_cats_get_req, sample_template_category_get_req,
get_sample_template_processing_status,
check_fp)
SAMPLE_TEMPLATE_KEY_FORMAT = 'sample_template_%s'
def sample_template_checks(study_id, user, check_exists=False):
"""Performs different checks and raises errors if any of the checks fail
Parameters
----------
study_id : int
The study id
user : qiita_db.user.User
The user trying to access the study
check_exists : bool, optional
If true, check if the sample template exists
Raises
------
HTTPError
404 if the study does not exist
403 if the user does not have access to the study
404 if check_exists == True and the sample template doesn't exist
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
raise HTTPError(404, reason='Study does not exist')
if not study.has_access(user):
raise HTTPError(403, reason='User does not have access to study')
# Check if the sample template exists
if check_exists and not SampleTemplate.exists(study_id):
raise HTTPError(404, reason="Study %s doesn't have sample information"
% study_id)
def sample_template_handler_post_request(study_id, user, filepath,
data_type=None, direct_upload=False):
"""Creates a new sample template
Parameters
----------
study_id: int
The study to add the sample information
user: qiita_db.user import User
The user performing the request
filepath: str
The path to the sample template file
data_type: str, optional
If filepath is a QIIME mapping file, the data type of the prep
information file
direct_upload: boolean, optional
If filepath is a direct upload; if False we need to process the
filepath as part of the study upload folder
Returns
-------
dict of {'job': str}
job: the id of the job adding the sample information to the study
Raises
------
HTTPError
404 if the filepath doesn't exist
"""
# Check if the current user has access to the study
sample_template_checks(study_id, user)
# Check if the file exists
if not direct_upload:
fp_rsp = check_fp(study_id, filepath)
if fp_rsp['status'] != 'success':
raise HTTPError(404, reason='Filepath not found')
filepath = fp_rsp['file']
is_mapping_file = looks_like_qiime_mapping_file(filepath)
if is_mapping_file and not data_type:
raise HTTPError(400, reason='Please, choose a data type if uploading '
'a QIIME mapping file')
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('create_sample_template')
params = Parameters.load(
cmd, values_dict={'fp': filepath, 'study_id': study_id,
'is_mapping_file': is_mapping_file,
'data_type': data_type})
job = ProcessingJob.create(user, params, True)
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
def sample_template_handler_patch_request(user, req_op, req_path,
req_value=None, req_from=None,
direct_upload=False):
"""Patches the sample template
Parameters
----------
user: qiita_db.user.User
The user performing the request
req_op : str
The operation to perform on the sample template
req_path : str
The path to the attribute to patch
req_value : str, optional
The new value
req_from : str, optional
The original path of the element
direct_upload : boolean, optional
If the file being uploaded comes from a direct upload (True)
Returns
-------
Raises
------
HTTPError
400 If the path parameter doens't follow the expected format
400 If the given operation is not supported
"""
req_path = [v for v in req_path.split('/') if v]
# At this point we know the path should be at least length 2
if len(req_path) < 2:
raise HTTPError(400, reason='Incorrect path parameter')
study_id = int(req_path[0])
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
if req_op == 'remove':
# Path format
# column: study_id/columns/column_name
# sample: study_id/samples/sample_id
if len(req_path) != 3:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
attr_id = req_path[2]
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_or_column')
params = Parameters.load(
cmd, values_dict={'obj_class': 'SampleTemplate',
'obj_id': study_id,
'sample_or_col': attribute,
'name': attr_id})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
elif req_op == 'replace':
# WARNING: Although the patch operation is a replace, is not a full
# true replace. A replace is in theory equivalent to a remove + add.
# In this case, the replace operation doesn't necessarily removes
# anything (e.g. when only new columns/samples are being added to the)
# sample information.
# Path format: study_id/data
# Forcing to specify data for extensibility. In the future we may want
# to use this function to replace other elements of the sample
# information
if len(req_path) != 2:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
if attribute == 'data':
# Update the sample information
if req_value is None:
raise HTTPError(400, reason="Value is required when updating "
"sample information")
if direct_upload:
# We can assume that the file exist as it was generated by
# the system
filepath = req_value
if not exists(filepath):
reason = ('Upload file not found (%s), please report to '
'[email protected]' % filepath)
raise HTTPError(404, reason=reason)
else:
# Check if the file exists
fp_rsp = check_fp(study_id, req_value)
if fp_rsp['status'] != 'success':
raise HTTPError(404, reason='Filepath not found')
filepath = fp_rsp['file']
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('update_sample_template')
params = Parameters.load(
cmd, values_dict={'study': study_id,
'template_fp': filepath})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
else:
raise HTTPError(404, reason='Attribute %s not found' % attribute)
else:
raise HTTPError(400, reason='Operation %s not supported. Current '
'supported operations: remove, replace' % req_op)
def sample_template_handler_delete_request(study_id, user):
"""Deletes the sample template
Parameters
----------
study_id: int
The study to delete the sample information
user: qiita_db.user
The user performing the request
Returns
-------
dict of {'job': str}
job: the id of the job deleting the sample information to the study
Raises
------
HTTPError
404 If the sample template doesn't exist
"""
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_template')
params = Parameters.load(cmd, values_dict={'study': int(study_id)})
job = ProcessingJob.create(user, params, True)
# Store the job if deleteing the sample template
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
class SampleTemplateHandler(BaseHandler):
@authenticated
def get(self):
study_id = self.get_argument('study_id')
# Check if the current user has access to the study
sample_template_checks(study_id, self.current_user)
self.render('study_ajax/sample_summary.html', study_id=study_id)
@authenticated
def post(self):
study_id = int(self.get_argument('study_id'))
filepath = self.get_argument('filepath')
data_type = self.get_argument('data_type')
direct_upload = self.get_argument('direct_upload', False)
if direct_upload and direct_upload == 'true':
direct_upload = True
with NamedTemporaryFile(suffix='.txt', delete=False) as fp:
fp.write(self.request.files['theFile'][0]['body'])
filepath = fp.name
self.write(sample_template_handler_post_request(
study_id, self.current_user, filepath, data_type=data_type,
direct_upload=direct_upload))
@authenticated
def patch(self):
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value', None)
req_from = self.get_argument('from', None)
direct_upload = self.get_argument('direct_upload', False)
if direct_upload and direct_upload == 'true':
direct_upload = True
with NamedTemporaryFile(suffix='.txt', delete=False) as fp:
fp.write(self.request.files['value'][0]['body'])
req_value = fp.name
self.write(sample_template_handler_patch_request(
self.current_user, req_op, req_path, req_value, req_from,
direct_upload))
@authenticated
def delete(self):
study_id = int(self.get_argument('study_id'))
self.write(sample_template_handler_delete_request(
study_id, self.current_user))
def sample_template_overview_handler_get_request(study_id, user):
# Check if the current user has access to the sample template
sample_template_checks(study_id, user)
# Check if the sample template exists
exists = SampleTemplate.exists(study_id)
# The following information should always be provided:
# The files that have been uploaded to the system and can be a
# sample template file
files = [f for _, f, _ in get_files_from_uploads_folders(study_id)
if f.endswith(('txt', 'tsv', 'xlsx'))]
# If there is a job associated with the sample information, the job id
job = None
job_info = r_client.get(SAMPLE_TEMPLATE_KEY_FORMAT % study_id)
if job_info:
job = loads(job_info)['job_id']
# Specific information if it exists or not:
data_types = []
st_fp_id = None
old_files = []
num_samples = 0
num_cols = 0
columns = []
specimen_id_column = None
sample_restrictions = ''
if exists:
# If it exists we need to provide:
# The id of the sample template file so the user can download it and
# the list of old filepaths
st = SampleTemplate(study_id)
all_st_files = st.get_filepaths()
# The current sample template file is the first one in the list
# (pop(0)) and we are interested only in the id ([0])
st_fp_id = all_st_files.pop(0)[0]
# For the old filepaths we are only interested in their basename
old_files = [basename(fp) for _, fp in all_st_files]
# The number of samples - this is a space efficient way of counting
# the number of samples. Doing len(list(st.keys())) creates a list
# that we are not using
num_samples = sum(1 for _ in st.keys())
columns = st.categories()
# The number of columns
num_cols = len(columns)
specimen_id_column = Study(study_id).specimen_id_column
_, sample_restrictions = st.validate_restrictions()
else:
# It doesn't exist, we also need to provide the data_types in case
# the user uploads a QIIME mapping file
data_types = sorted(data_types_get_req()['data_types'])
return {'exists': exists,
'uploaded_files': files,
'data_types': data_types,
'user_can_edit': Study(study_id).can_edit(user),
'job': job,
'download_id': st_fp_id,
'old_files': old_files,
'num_samples': num_samples,
'num_columns': num_cols,
'columns': columns,
'sample_restrictions': sample_restrictions,
'specimen_id_column': specimen_id_column}
class SampleTemplateOverviewHandler(BaseHandler):
@authenticated
def get(self):
study_id = int(self.get_argument('study_id'))
self.write(
sample_template_overview_handler_get_request(
study_id, self.current_user))
def sample_template_columns_get_req(study_id, column, user):
"""Returns the columns of the sample template
Parameters
----------
study_id: int
The study to retrieve the sample information summary
column: str
The column of interest, if None send all columns
user: qiita_db.user
The user performing the request
Returns
-------
list of str
The result of the search
Raises
------
HTTPError
404 If the sample template doesn't exist
"""
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
if column is None:
reply = SampleTemplate(study_id).categories()
else:
reply = list(SampleTemplate(study_id).get_category(column).values())
return reply
class SampleTemplateColumnsHandler(BaseHandler):
@authenticated
def get(self):
"""Send formatted summary page of sample template"""
sid = int(self.get_argument('study_id'))
column = self.get_argument('column', None)
reply = sample_template_columns_get_req(sid, column, self.current_user)
# we reply with {'values': reply} because tornado expectes a dict
self.write({'values': reply})
def _build_sample_summary(study_id, user_id):
"""Builds the row object for SlickGrid
Parameters
----------
study_id : int
Study to get samples from
user_id : str
User requesting the information
Returns
-------
columns : dicts
keys represent fields and values names for the columns in SlickGrid
rows : list of dicts
[ {field_1: 'value', ...}, ...]
"""
# Load all samples available into dictionary and set
rows = {s: {'sample': s} for s in sample_template_samples_get_req(
study_id, user_id)['samples']}
samples = rows.keys()
# Add one column per prep template highlighting what samples exist
preps = study_prep_get_req(study_id, user_id)["info"]
columns = {}
for preptype in preps:
for prep in preps[preptype]:
field = "prep%d" % prep["id"]
name = "%s (%d)" % (prep["name"], prep["id"])
columns[field] = name
prep_samples = prep_template_samples_get_req(
prep['id'], user_id)['samples']
for s in samples:
rows[s][field] = 'X' if s in prep_samples else ''
return columns, rows
class SampleAJAX(BaseHandler):
@authenticated
def get(self):
"""Show the sample summary page"""
study_id = int(self.get_argument('study_id'))
email = self.current_user.id
res = sample_template_meta_cats_get_req(study_id, email)
if res['status'] == 'error':
if 'does not exist' in res['message']:
raise HTTPError(404, reason=res['message'])
elif 'User does not have access to study' in res['message']:
raise HTTPError(403, reason=res['message'])
else:
raise HTTPError(500, reason=res['message'])
categories = res['categories']
columns, rows = _build_sample_summary(study_id, email)
_, alert_type, alert_msg = get_sample_template_processing_status(
study_id)
self.render('study_ajax/sample_prep_summary.html',
rows=rows, columns=columns, categories=categories,
study_id=study_id, alert_type=alert_type,
alert_message=alert_msg,
user_can_edit=Study(study_id).can_edit(self.current_user))
@authenticated
def post(self):
study_id = int(self.get_argument('study_id'))
meta_col = self.get_argument('meta_col')
values = sample_template_category_get_req(meta_col, study_id,
self.current_user.id)
if values['status'] != 'success':
self.write(values)
else:
self.write({'status': 'success',
'message': '',
'values': values['values']
})
| biocore/qiita | qiita_pet/handlers/study_handlers/sample_template.py | Python | bsd-3-clause | 19,340 |
''' Provide special versions of list and dict, that can automatically notify
about changes when used for property values.
Mutations to these values are detected, and the properties owning the
collection is notified of the changes. Consider the following model
definition:
.. code-block:: python
class SomeModel(Model):
options = List(String)
If we have an instance of this model, ``m`` then we can set the entire
value of the ``options`` property at once:
.. code-block:: python
m.options = ["foo", "bar"]
When we do this in the context of a Bokeh server application that is being
viewed in a browser, this change is automatically noticed, and the
corresponding BokehJS property in the browser is synchronized, possibly
causing some change in the visual state of the application in the browser.
But it is also desirable that changes *inside* the ``options`` list also
be detected. That is, the following kinds of operations should also be
automatically synchronized between BokehJS and a Bokeh server:
.. code-block:: python
m.options.append("baz")
m.options[2] = "quux"
m.options.insert(0, "bar")
The classes in this module provide this functionality.
.. note::
These classes form part of the very low-level machinery that implements
the Bokeh model and property system. It is unlikely that any of these
classes or their methods will be applicable to any standard usage or to
anyone who is not directly developing on Bokeh's own infrastructure.
'''
from __future__ import absolute_import, print_function
from ...util.dependencies import import_optional
pd = import_optional('pandas')
def notify_owner(func):
''' A decorator for mutating methods of property container classes
that notifies owners of the property container about mutating changes.
Args:
func (callable) : the container method to wrap in a notification
Returns:
wrapped method
Examples:
A ``__setitem__`` could be wrapped like this:
.. code-block:: python
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueDict, self).__setitem__(i, y)
The returned wrapped method will have a docstring indicating what
original method it is wrapping.
'''
def wrapper(self, *args, **kwargs):
old = self._saved_copy()
result = func(self, *args, **kwargs)
self._notify_owners(old)
return result
wrapper.__doc__ = "Container method ``%s`` instrumented to notify property owners" % func.__name__
return wrapper
class PropertyValueContainer(object):
''' A base class for property container classes that support change
notifications on mutating operations.
This class maintains an internal list of property owners, and also
provides a private mechanism for methods wrapped with
:func:`~bokeh.core.property_containers.notify_owners` to update
those owners when mutating changes occur.
'''
def __init__(self, *args, **kwargs):
self._owners = set()
super(PropertyValueContainer, self).__init__(*args, **kwargs)
def _register_owner(self, owner, descriptor):
self._owners.add((owner, descriptor))
def _unregister_owner(self, owner, descriptor):
self._owners.discard((owner, descriptor))
def _notify_owners(self, old, hint=None):
for (owner, descriptor) in self._owners:
descriptor._notify_mutated(owner, old, hint=hint)
def _saved_copy(self):
raise RuntimeError("Subtypes must implement this to make a backup copy")
class PropertyValueList(PropertyValueContainer, list):
''' A list property value container that supports change notifications on
mutating operations.
When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are
transparently created to wrap those values. These ``PropertyValueList``
values are subject to normal property validation. If the property type
``foo = List(Str)`` then attempting to set ``x.foo[0] = 10`` will raise
an error.
Instances of ``PropertyValueList`` can be explicitly created by passing
any object that the standard list initializer accepts, for example:
.. code-block:: python
>>> PropertyValueList([10, 20])
[10, 20]
>>> PropertyValueList((10, 20))
[10, 20]
The following mutating operations on lists automatically trigger
notifications:
.. code-block:: python
del x[y]
del x[i:j]
x += y
x *= y
x[i] = y
x[i:j] = y
x.append
x.extend
x.insert
x.pop
x.remove
x.reverse
x.sort
'''
def __init__(self, *args, **kwargs):
return super(PropertyValueList, self).__init__(*args, **kwargs)
def _saved_copy(self):
return list(self)
# delete x[y]
@notify_owner
def __delitem__(self, y):
return super(PropertyValueList, self).__delitem__(y)
# delete x[i:j]
@notify_owner
def __delslice__(self, i, j):
# Note: this is different py2 vs py3, py3 calls __delitem__ with a
# slice index, and does not have this method at all
return super(PropertyValueList, self).__delslice__(i, j)
# x += y
@notify_owner
def __iadd__(self, y):
return super(PropertyValueList, self).__iadd__(y)
# x *= y
@notify_owner
def __imul__(self, y):
return super(PropertyValueList, self).__imul__(y)
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueList, self).__setitem__(i, y)
# x[i:j] = y
@notify_owner
def __setslice__(self, i, j, y):
# Note: this is different py2 vs py3, py3 calls __setitem__ with a
# slice index, and does not have this method at all
return super(PropertyValueList, self).__setslice__(i, j, y)
@notify_owner
def append(self, obj):
return super(PropertyValueList, self).append(obj)
@notify_owner
def extend(self, iterable):
return super(PropertyValueList, self).extend(iterable)
@notify_owner
def insert(self, index, obj):
return super(PropertyValueList, self).insert(index, obj)
@notify_owner
def pop(self, index=-1):
return super(PropertyValueList, self).pop(index)
@notify_owner
def remove(self, obj):
return super(PropertyValueList, self).remove(obj)
@notify_owner
def reverse(self):
return super(PropertyValueList, self).reverse()
@notify_owner
def sort(self, **kwargs):
return super(PropertyValueList, self).sort(**kwargs)
class PropertyValueDict(PropertyValueContainer, dict):
''' A dict property value container that supports change notifications on
mutating operations.
When a Bokeh model has a ``List`` property, the ``PropertyValueLists`` are
transparently created to wrap those values. These ``PropertyValueList``
values are subject to normal property validation. If the property type
``foo = Dict(Str, Str)`` then attempting to set ``x.foo['bar'] = 10`` will
raise an error.
Instances of ``PropertyValueDict`` can be eplicitly created by passing
any object that the standard dict initializer accepts, for example:
.. code-block:: python
>>> PropertyValueDict(dict(a=10, b=20))
{'a': 10, 'b': 20}
>>> PropertyValueDict(a=10, b=20)
{'a': 10, 'b': 20}
>>> PropertyValueDict([('a', 10), ['b', 20]])
{'a': 10, 'b': 20}
The following mutating operations on dicts automatically trigger
notifications:
.. code-block:: python
del x[y]
x[i] = y
x.clear
x.pop
x.popitem
x.setdefault
x.update
'''
def __init__(self, *args, **kwargs):
return super(PropertyValueDict, self).__init__(*args, **kwargs)
def _saved_copy(self):
return dict(self)
# delete x[y]
@notify_owner
def __delitem__(self, y):
return super(PropertyValueDict, self).__delitem__(y)
# x[i] = y
@notify_owner
def __setitem__(self, i, y):
return super(PropertyValueDict, self).__setitem__(i, y)
@notify_owner
def clear(self):
return super(PropertyValueDict, self).clear()
@notify_owner
def pop(self, *args):
return super(PropertyValueDict, self).pop(*args)
@notify_owner
def popitem(self):
return super(PropertyValueDict, self).popitem()
@notify_owner
def setdefault(self, *args):
return super(PropertyValueDict, self).setdefault(*args)
@notify_owner
def update(self, *args, **kwargs):
return super(PropertyValueDict, self).update(*args, **kwargs)
class PropertyValueColumnData(PropertyValueDict):
''' A property value container for ColumnData that supports change
notifications on mutating operations.
This property value container affords specialized code paths for
updating the .data dictionary for ColumnDataSource. When possible,
more efficient ColumnDataChangedEvent hints are generated to perform
the updates:
.. code-block:: python
x[i] = y
x.update
'''
# x[i] = y
# don't wrap with notify_owner --- notifies owners explicitly
def __setitem__(self, i, y):
return self.update([(i, y)])
# don't wrap with notify_owner --- notifies owners explicitly
def update(self, *args, **kwargs):
old = self._saved_copy()
result = super(PropertyValueDict, self).update(*args, **kwargs)
from ...document.events import ColumnDataChangedEvent
# Grab keys to update according to Python docstring for update([E, ]**F)
#
# If E is present and has a .keys() method, then does: for k in E: D[k] = E[k]
# If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v
# In either case, this is followed by: for k in F: D[k] = F[k]
cols = set(kwargs.keys())
if len(args) == 1:
E = args[0]
if hasattr(E, 'keys'):
cols |= set(E.keys())
else:
cols |= { x[0] for x in E }
# we must loop ourselves here instead of calling _notify_owners
# because the hint is customized for each owner separately
for (owner, descriptor) in self._owners:
hint = ColumnDataChangedEvent(owner.document, owner, cols=list(cols))
descriptor._notify_mutated(owner, old, hint=hint)
return result
# don't wrap with notify_owner --- notifies owners explicitly
def _stream(self, doc, source, new_data, rollover=None, setter=None):
''' Internal implementation to handle special-casing stream events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.stream`` method on column data sources exists to provide a
more efficient way to perform streaming (i.e. append-only) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsStreamedEvent`` hint to the message containing
only the small streamed data that BokehJS needs in order to
efficiently synchronize.
.. warning::
This function assumes the integrity of ``new_data`` has already
been verified.
'''
old = self._saved_copy()
import numpy as np
# TODO (bev) Currently this reports old differently for array vs list
# For arrays is reports the actual old value. For lists, the old value
# is actually the already updated value. This is because the method
# self._saved_copy() makes a shallow copy.
for k, v in new_data.items():
if isinstance(self[k], np.ndarray) or isinstance(new_data[k], np.ndarray):
data = np.append(self[k], new_data[k])
if rollover and len(data) > rollover:
data = data[-rollover:]
super(PropertyValueDict, self).__setitem__(k, data)
else:
L = self[k]
L.extend(new_data[k])
if rollover is not None:
del L[:-rollover]
from ...document.events import ColumnsStreamedEvent
self._notify_owners(old,
hint=ColumnsStreamedEvent(doc, source, new_data, rollover, setter))
# don't wrap with notify_owner --- notifies owners explicitly
def _patch(self, doc, source, patches, setter=None):
''' Internal implementation to handle special-casing patch events
on ``ColumnDataSource`` columns.
Normally any changes to the ``.data`` dict attribute on a
``ColumnDataSource`` triggers a notification, causing all of the data
to be synchronized between server and clients.
The ``.patch`` method on column data sources exists to provide a
more efficient way to perform patching (i.e. random access) updates
to a data source, without having to perform a full synchronization,
which would needlessly re-send all the data.
To accomplish this, this function bypasses the wrapped methods on
``PropertyValueDict`` and uses the unwrapped versions on the dict
superclass directly. It then explicitly makes a notification, adding
a special ``ColumnsPatchedEvent`` hint to the message containing
only the small patched data that BokehJS needs in order to efficiently
synchronize.
.. warning::
This function assumes the integrity of ``patches`` has already
been verified.
'''
import numpy as np
old = self._saved_copy()
for name, patch in patches.items():
for ind, value in patch:
if isinstance(ind, (int, slice)):
self[name][ind] = value
else:
shape = self[name][ind[0]][ind[1:]].shape
self[name][ind[0]][ind[1:]] = np.array(value, copy=False).reshape(shape)
from ...document.events import ColumnsPatchedEvent
self._notify_owners(old,
hint=ColumnsPatchedEvent(doc, source, patches, setter))
| Karel-van-de-Plassche/bokeh | bokeh/core/property/containers.py | Python | bsd-3-clause | 14,792 |
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
`DataObject` is a class of object that provides coding between object
attributes and dictionaries, suitable for
In `DataObject` is the mechanism for converting between dictionaries and
objects. These conversions are performed with aid of `Field` instances
declared on `DataObject` subclasses. `Field` classes reside in the
`remoteobjects.field` module.
"""
from copy import deepcopy
import logging
import remoteobjects.fields
classes_by_name = {}
classes_by_constant_field = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a bare class name with no module. If there is
no class by that name, raises `KeyError`.
"""
return classes_by_name[name]
class DataObjectMetaclass(type):
"""Metaclass for `DataObject` classes.
This metaclass installs all `remoteobjects.fields.Property` instances
declared as attributes of the new class, including all `Field` and `Link`
instances.
This metaclass also makes the new class findable through the
`dataobject.find_by_name()` function.
"""
def __new__(cls, name, bases, attrs):
"""Creates and returns a new `DataObject` class with its declared
fields and name."""
fields = {}
new_fields = {}
new_properties = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Property):
new_properties[attrname] = field
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
for field, value in new_properties.items():
obj_cls.add_to_class(field, value)
# Register the new class so Object fields can have forward-referenced it.
classes_by_name[name] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_properties.values():
field.of_cls = obj_cls
return obj_cls
def add_to_class(cls, name, value):
try:
value.install(name, cls)
except (NotImplementedError, AttributeError):
setattr(cls, name, value)
class DataObject(object):
"""An object that can be decoded from or encoded as a dictionary.
DataObject subclasses should be declared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import dataobject, fields
>>> class Asset(dataobject.DataObject):
... name = fields.Field()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
"""Initializes a new `DataObject` with the given field values."""
self.api_data = {}
self.__dict__.update(kwargs)
def __eq__(self, other):
"""Returns whether two `DataObject` instances are equivalent.
If the `DataObject` instances are of the same type and contain the
same data in all their fields, the objects are equivalent.
"""
if type(self) != type(other):
return False
for k, v in self.fields.iteritems():
if isinstance(v, remoteobjects.fields.Field):
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
"""Returns whether two `DataObject` instances are different.
`DataObject` instances are different if they are not equivalent as
determined through `__eq__()`.
"""
return not self == other
@classmethod
def statefields(cls):
return cls.fields.keys() + ['api_data']
def __getstate__(self):
return dict((k, self.__dict__[k]) for k in self.statefields()
if k in self.__dict__)
def get(self, attr, *args):
return getattr(self, attr, *args)
def __iter__(self):
for key in self.fields.keys():
yield key
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
# Start with the last set of data we got from the API
data = deepcopy(self.api_data)
# Now replace the data with what's actually in our object
for field_name, field in self.fields.iteritems():
value = getattr(self, field.attrname, None)
if value is not None:
data[field.api_name] = field.encode(value)
else:
data[field.api_name] = None
# Now delete any fields that ended up being None
# since we should exclude them in the resulting dict.
for k in data.keys():
if data[k] is None:
del data[k]
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into a new `DataObject` instance."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" your
application should be added to an object manually by setting the
object's attributes. Data that constitutes a new object should be
turned into another object with `from_dict()`.
"""
if not isinstance(data, dict):
raise TypeError
# Clear any local instance field data
for k in self.fields.iterkeys():
if k in self.__dict__:
del self.__dict__[k]
self.api_data = data
@classmethod
def subclass_with_constant_field(cls, fieldname, value):
"""Returns the closest subclass of this class that has a `Constant`
field with the given value.
Use this method in combination with the `fields.Constant` field class
to find the most appropriate subclass of `cls` based on a content
field. For example, if you have an ``Asset`` class, but want to
declare subclasses with special behavior based on the ``kind`` field
of the ``Asset`` instances, declare ``kind`` as a `Constant` field on
each subclass. Then when you want to create a new ``Asset`` instance
(as in ``Asset.from_dict()``), you can use this method to select a
more appropriate class to instantiate.
Parameters `fieldname` and `value` are the name and value of the
`Constant` field for which to search respectively.
If a subclass of `cls` has been declared with a `Constant` field of
the given name and value, it will be returned. If multiple subclasses
of `cls` declare a matching `Constant` field, one of the matching
subclasses will be returned, but which subclass is not defined.
"""
try:
clsname = classes_by_constant_field[fieldname][tuple(value)]
except KeyError:
# No matching classes, then.
pass
else:
return find_by_name(clsname)
raise ValueError('No such subclass of %s with field %r equivalent to %r'
% (cls.__name__, fieldname, value))
| mozilla/remoteobjects | remoteobjects/dataobject.py | Python | bsd-3-clause | 9,792 |
# -*- coding: utf-8 -*-
from django import forms
from cms_content.settings import EDITOR
from cms_content.models import CMSArticle
from cms_content import widgets
WIDGET = getattr(widgets, EDITOR)
class CMSArticleAdminForm(forms.ModelForm):
content = forms.CharField(widget=WIDGET)
class Meta:
model = CMSArticle
class CMSArticleFrontendForm(forms.ModelForm):
error_css_class = 'error'
required_css_class = 'required'
content = forms.CharField(widget=WIDGET)
class Meta:
model = CMSArticle
fields = ('title', 'slug', 'content', 'category',)
| indexofire/django-cms-content | cms_content_patch/forms.py | Python | bsd-3-clause | 604 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 SWIPO Project
#
# Authors (this file):
# Stefan Schinkel <[email protected]>
"""
Provides sanity checks for basic for parallel and serial circiuts.
"""
import numpy as np
import networkx as nx
from pyunicorn import ResNetwork
from .ResistiveNetwork_utils import *
debug = 0
""" Test for basic sanity, parallel and serial circiuts
"""
def testParallelTrivial():
r""" Trivial parallel case:
a) 0 --- 1 --- 2
/---- 3 ---\
b) 0 --- 1 --- 2
c) /---- 3 ---\
0 --- 1 --- 2
\____ 4 ___/
ER(a) = 2*ER(b) = 3*ER(c)
"""
nws = []
# construct nw1
idI, idJ = [0, 1], [1, 2]
nws.append(makeNW(idI, idJ, [.1]))
# construct nw2
idI += [0, 3]
idJ += [3, 2]
nws.append(makeNW(idI, idJ, [.1]))
# nw3
idI += [0, 4]
idJ += [4, 2]
nws.append(makeNW(idI, idJ, [.1]))
ER = []
for nw in nws:
rnw = ResNetwork(nw)
ER.append(rnw.effective_resistance(0, 2))
assert abs(ER[0]/2-ER[1]) < .1E-6
assert abs(ER[0]/3-ER[2]) < .1E-6
def testParallelLessTrivial():
""" Less Trivial Parallel Case:
|--- 1 --- 0
a) 2 |
|--- 3 ----4
|--- 1 --- 0 --- 5 --- |
b) 2 | | 7
|--- 3 ----4 --- 6 --- |
|---- 8 ----------- |
| | |
| |----------| |
| | |
|--- 1 --- 0 --- 5 --- | | |
c) 2 | | 7 | 9
|--- 3 ----4 --- 6 --- | | |
| | |
| ----------| |
| | |
|---- 10 -----------|
"""
nws = []
idI = [0, 1, 1, 2, 3]
idJ = [1, 2, 3, 3, 4]
nws.append(makeNW(idI, idJ, [1]*len(idI)))
idI.extend([0, 5, 5, 6, 6])
idJ.extend([5, 6, 7, 7, 4])
nws.append(makeNW(idI, idJ, [1]*len(idI)))
idI.extend([0, 8, 8, 9, 10])
idJ.extend([8, 9, 10, 10, 4])
nws.append(makeNW(idI, idJ, [1]*len(idI)))
ER = []
Gs = []
for nw in nws:
rnw = ResNetwork(nw)
ER.append(rnw.effective_resistance(0, 4))
# Gs.append(nx.DiGraph(nw))
# # showGraphs(Gs)
# # s = ''
# # for i,e in enumerate(ER):
# # s = s + "NW{:d} {:.3f}\t".format(i,e)
# # print "Effective resistances (0,2)\n %s" % (s)
assert abs(ER[0]/2-ER[1]) < .1E-6
assert abs(ER[0]/3-ER[2]) < .1E-6
# """ Less Trivial Parallel Case:
# /--- 1 --- 0
# a) 2 |
# \--- 3 ----4
# /--- 1 --- 0 --- 5 --- \
# b) 2 | | 7
# \--- 3 ----4 --- 6 --- /
# / --- 8 ----------- \
# | \
# /--- 1 --- 0 --- 5 --- \ \
# c) 2 7 9
# \--- 3 ----4 --- 6 --- / /
# | /
# \ --- 10 -----------/
# """
# nws =[]
# #construct nw1
# idI = [0,1,1,2,3]
# idJ = [1,2,3,3,4]
# val = [.1] * 5
# nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])
# idI.extend([0,5,6,7])
# idJ.extend([5,6,7,4])
# val.extend( val * 6)
# nws.append(makeNW(idI,idJ,[.1]*len(idI))[0])
# idI.extend([0,8,9,10])
# idJ.extend([8,9,10,4])
# val.extend( val * 4)
# nws.append(makeNW(idI,idJ,val)[0])
# ER = []
# for nw in nws:
# rnw = ResNetwork(nw)
# ER.append( rnw.effective_resistance(0,4))
# s = ''
# for i,e in enumerate(ER):
# s = s + "NW{:d} {:.3f}\t".format(i,e)
# print "Effective resistances (0,2)\n %s" % (s)
# assert abs(ER[0]/2-ER[1]) < .1E-6
# assert abs(ER[0]/3-ER[2]) < .1E-6
def testParallelRandom():
""" 50 random parallel cases
"""
N = 10
p = .7
runs = 0
while runs < 50:
G = nx.fast_gnp_random_graph(N, p)
a = 0
b = G.number_of_nodes()-1
try:
nx.shortest_path(G, source=a, target=b)
except RuntimeError:
continue
i, j = [], []
for xx in G.edges():
i.append(xx[0])
j.append(xx[1])
# %.1f values for resistance
val = np.round(np.random.ranf(len(i))*100)/10
# and test
nw1 = makeNW(i, j, val)
nw2 = parallelCopy(nw1, a, b)
ER1 = ResNetwork(nw1).effective_resistance(a, b)
ER2 = ResNetwork(nw2).effective_resistance(a, b)
# assertion
assert (ER1/2-ER2) < 1E-6
# increment runs
runs += 1
def testSerialTrivial():
"""Trivial serial test case
a) 0 --- 1 --- 2
b) 0 --- 1 --- 2 --- 3 --- 4
ER(a)/2 = ER(b)
"""
# construct nw1
idI = [0, 1]
idJ = [1, 2]
val = [1, 1]
nw1 = np.zeros((3, 3))
G1 = nx.DiGraph()
for i, j, v in zip(idI, idJ, val):
nw1[i, j] = v
nw1[j, i] = v
# construct nw2
idI = idI + [2, 3]
idJ = idJ + [3, 4]
val = val + [1, 1]
nw2 = np.zeros((5, 5))
for i, j, v in zip(idI, idJ, val):
nw2[i, j] = v
nw2[j, i] = v
# init ResNetworks
rnw1 = ResNetwork(nw1)
rnw2 = ResNetwork(nw2)
ER1 = rnw1.effective_resistance(0, 2)
ER2 = rnw2.effective_resistance(0, 4)
print "Effective resistances (0,2)"
print "NW1 %.3f\tNW2 %.3f\t 2*NW1 = %.3f" % (ER1, ER2, 2*ER1)
assert (ER1*2-ER2) < 1E-6
def testSerialRandom():
""" 50 Random serial test cases
"""
N = 10
p = .7
runs = 0
while runs < 50:
# a random graph
G = nx.fast_gnp_random_graph(N, p)
try:
nx.shortest_path(G, source=0, target=N-1)
except RuntimeError:
continue
# convert to plain ndarray
nw1 = nx2nw(G)
# copy and join network
nw2 = serialCopy(nw1)
# compute effective resistance
ER1 = ResNetwork(
nw1, silence_level=3).effective_resistance(0, len(nw1)-1)
ER2 = ResNetwork(
nw2, silence_level=3).effective_resistance(0, len(nw2)-1)
# increment runs
runs += 1
# assertion
print ER1*2-ER2
assert (ER1*2-ER2) < 1E-6
| leftaroundabout/pyunicorn | tests/test_core/TestResitiveNetwork-circuits.py | Python | bsd-3-clause | 6,379 |
"""
====================================================
Imputing missing values before building an estimator
====================================================
Missing values can be replaced by the mean, the median or the most frequent
value using the basic :class:`sklearn.impute.SimpleImputer`.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Another option is the :class:`sklearn.impute.IterativeImputer`. This uses
round-robin linear regression, treating every variable as an output in
turn. The version implemented assumes Gaussian (output) variables. If your
features are obviously non-Normal, consider transforming them to look more
Normal so as to potentially improve performance.
In addition of using an imputing method, we can also keep an indication of the
missing information using :func:`sklearn.impute.MissingIndicator` which might
carry some information.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
# To use the experimental IterativeImputer, we need to explicitly ask for it:
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline, make_union
from sklearn.impute import SimpleImputer, IterativeImputer, MissingIndicator
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
N_SPLITS = 5
REGRESSOR = RandomForestRegressor(random_state=0)
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(
make_union(imputer, MissingIndicator(missing_values=0)),
REGRESSOR)
impute_scores = cross_val_score(estimator, X_missing, y_missing,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return impute_scores
def get_results(dataset):
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
full_scores = cross_val_score(REGRESSOR, X_full, y_full,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
# Estimate the score after replacing missing values by 0
imputer = SimpleImputer(missing_values=0,
strategy='constant',
fill_value=0)
zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
# Estimate the score after imputation (mean strategy) of the missing values
imputer = SimpleImputer(missing_values=0, strategy="mean")
mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
# Estimate the score after iterative imputation of the missing values
imputer = IterativeImputer(missing_values=0,
random_state=0,
n_nearest_features=5,
sample_posterior=True)
iterative_impute_scores = get_scores_for_imputer(imputer,
X_missing,
y_missing)
return ((full_scores.mean(), full_scores.std()),
(zero_impute_scores.mean(), zero_impute_scores.std()),
(mean_impute_scores.mean(), mean_impute_scores.std()),
(iterative_impute_scores.mean(), iterative_impute_scores.std()))
results_diabetes = np.array(get_results(load_diabetes()))
mses_diabetes = results_diabetes[:, 0] * -1
stds_diabetes = results_diabetes[:, 1]
results_boston = np.array(get_results(load_boston()))
mses_boston = results_boston[:, 0] * -1
stds_boston = results_boston[:, 1]
n_bars = len(mses_diabetes)
xval = np.arange(n_bars)
x_labels = ['Full data',
'Zero imputation',
'Mean Imputation',
'Multivariate Imputation']
colors = ['r', 'g', 'b', 'orange']
# plot diabetes results
plt.figure(figsize=(12, 6))
ax1 = plt.subplot(121)
for j in xval:
ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],
color=colors[j], alpha=0.6, align='center')
ax1.set_title('Imputation Techniques with Diabetes Data')
ax1.set_xlim(left=np.min(mses_diabetes) * 0.9,
right=np.max(mses_diabetes) * 1.1)
ax1.set_yticks(xval)
ax1.set_xlabel('MSE')
ax1.invert_yaxis()
ax1.set_yticklabels(x_labels)
# plot boston results
ax2 = plt.subplot(122)
for j in xval:
ax2.barh(j, mses_boston[j], xerr=stds_boston[j],
color=colors[j], alpha=0.6, align='center')
ax2.set_title('Imputation Techniques with Boston Data')
ax2.set_yticks(xval)
ax2.set_xlabel('MSE')
ax2.invert_yaxis()
ax2.set_yticklabels([''] * n_bars)
plt.show()
| chrsrds/scikit-learn | examples/impute/plot_missing_values.py | Python | bsd-3-clause | 5,503 |
import django.db.models as models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
class Profile(models.Model):
"""
parameters we can get from gigya:
birthMonth,isLoggedIn,city,UID,zip,birthYear,state,provider,email,
UIDSig,photoURL,timestamp,loginProviderUID,signature,isSiteUID,proxiedEmail
,thumbnailURL,nickname,firstName,loginProvider,gender,lastName,profileURL
birthDay,country,isSiteUser
One unique user can have several UID's
"""
user = models.ForeignKey(User, unique=True, null=True)
uid = models.CharField(max_length=255)
login_provider = models.CharField(max_length=150)
timestamp = models.DateTimeField(null=True,blank=True)
isLoggedIn = models.BooleanField(default=False)
birthday = models.DateField(null=True,blank=True)
city = models.CharField(max_length=150, null=True,blank=True)
state = models.CharField(max_length=150, null=True,blank=True)
zip = models.CharField(max_length=30, null=True,blank=True)
country = models.CharField(max_length=30, null=True,blank=True)
photourl = models.CharField(max_length=255, null=True,blank=True)
first_name = models.CharField(max_length=80, null=True,blank=True)
last_name = models.CharField(max_length=80, null=True,blank=True)
gender = models.CharField(max_length=2, null=True,blank=True)
profileUrl = models.CharField(max_length=2, null=True, blank=True)
def create_profile(sender, instance=None, **kwargs):
if instance is None:
return
profile, created = Profile.objects.get_or_create(user=instance)
post_save.connect(create_profile, sender=User) | MediaSapiens/wavesf | apps/externals/gigyauth/models.py | Python | bsd-3-clause | 1,844 |
import os
import numpy as np
import pytest
from pandas import (
Categorical,
DatetimeIndex,
Interval,
IntervalIndex,
NaT,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
)
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, Nano
def test_qcut():
arr = np.random.randn(1000)
# We store the bins as Index that have been
# rounded to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, 0.25, 0.5, 0.75, 1.0])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds():
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles():
arr = np.random.randn(100)
factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same():
with pytest.raises(ValueError, match="edges.*unique"):
qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_qcut_include_lowest():
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[
Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9),
]
)
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas():
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index():
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_qcut_binning_issues(datapath):
# see gh-1978, gh-1979
cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv"))
arr = np.loadtxt(cut_file)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(
zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:])
):
assert sp < sn
assert ep < en
assert ep <= sn
def test_qcut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(ser, [0, 0.333, 0.666, 1])
exp_levels = np.array(
[Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)]
)
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"kwargs,msg",
[
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
],
)
def test_qcut_duplicates_bin(kwargs, msg):
# see gh-7751
values = [0, 0, 0, 0, 1, 2, 3]
if msg is not None:
with pytest.raises(ValueError, match=msg):
qcut(values, 3, **kwargs)
else:
result = qcut(values, 3, **kwargs)
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
tm.assert_index_equal(result.categories, expected)
@pytest.mark.parametrize(
"data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)]
)
@pytest.mark.parametrize("length", [1, 2])
@pytest.mark.parametrize("labels", [None, False])
def test_single_quantile(data, start, end, length, labels):
# see gh-15431
ser = Series([data] * length)
result = qcut(ser, 1, labels=labels)
if labels is None:
intervals = IntervalIndex([Interval(start, end)] * length, closed="right")
expected = Series(intervals).astype(CDT(ordered=True))
else:
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser",
[
Series(DatetimeIndex(["20180101", NaT, "20180103"])),
Series(TimedeltaIndex(["0 days", NaT, "2 days"])),
],
ids=lambda x: str(x.dtype),
)
def test_qcut_nat(ser):
# see gh-19768
intervals = IntervalIndex.from_tuples(
[(ser[0] - Nano(), ser[2] - Day()), np.nan, (ser[2] - Day(), ser[2])]
)
expected = Series(Categorical(intervals, ordered=True))
result = qcut(ser, 2)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)])
def test_datetime_tz_qcut(bins):
# see gh-19872
tz = "US/Eastern"
ser = Series(date_range("20130101", periods=3, tz=tz))
result = qcut(ser, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:59:59.999999999", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"arg,expected_bins",
[
[
timedelta_range("1day", periods=3),
TimedeltaIndex(["1 days", "2 days", "3 days"]),
],
[
date_range("20180101", periods=3),
DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]),
],
],
)
def test_date_like_qcut_bins(arg, expected_bins):
# see gh-19891
ser = Series(arg)
result, result_bins = qcut(ser, 2, retbins=True)
tm.assert_index_equal(result_bins, expected_bins)
| toobaz/pandas | pandas/tests/reshape/test_qcut.py | Python | bsd-3-clause | 6,328 |
# -*- coding: utf-8 -*-
import csv
import json
from cStringIO import StringIO
from datetime import datetime
from django.conf import settings
from django.core import mail
from django.core.cache import cache
import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.amo.tests import formset, initial
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, CompatOverride, CompatOverrideRange
from olympia.amo.urlresolvers import reverse
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import urlparams
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import FeaturedCollection, MonthlyPick
from olympia.compat.cron import compatibility_report
from olympia.compat.models import CompatReport
from olympia.constants.base import VALIDATOR_SKELETON_RESULTS
from olympia.devhub.models import ActivityLog
from olympia.files.models import File, FileUpload
from olympia.stats.models import UpdateCount
from olympia.users.models import UserProfile
from olympia.users.utils import get_task_user
from olympia.versions.models import ApplicationsVersions, Version
from olympia.zadmin import forms, tasks
from olympia.zadmin.forms import DevMailerForm
from olympia.zadmin.models import (
EmailPreviewTopic, ValidationJob, ValidationResult)
from olympia.zadmin.tasks import updated_versions
from olympia.zadmin.views import find_files
class TestSiteEvents(TestCase):
fixtures = ['base/users', 'zadmin/tests/siteevents']
def setUp(self):
super(TestSiteEvents, self).setUp()
self.client.login(username='[email protected]', password='password')
def test_get(self):
url = reverse('zadmin.site_events')
response = self.client.get(url)
assert response.status_code == 200
events = response.context['events']
assert len(events) == 1
def test_add(self):
url = reverse('zadmin.site_events')
new_event = {
'event_type': 2,
'start': '2012-01-01',
'description': 'foo',
}
response = self.client.post(url, new_event, follow=True)
assert response.status_code == 200
events = response.context['events']
assert len(events) == 2
def test_edit(self):
url = reverse('zadmin.site_events', args=[1])
modified_event = {
'event_type': 2,
'start': '2012-01-01',
'description': 'bar',
}
response = self.client.post(url, modified_event, follow=True)
assert response.status_code == 200
events = response.context['events']
assert events[0].description == 'bar'
def test_delete(self):
url = reverse('zadmin.site_events.delete', args=[1])
response = self.client.get(url, follow=True)
assert response.status_code == 200
events = response.context['events']
assert len(events) == 0
class BulkValidationTest(TestCase):
fixtures = ['base/addon_3615', 'base/appversion', 'base/users']
def setUp(self):
super(BulkValidationTest, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.addon = Addon.objects.get(pk=3615)
self.creator = UserProfile.objects.get(username='editor')
self.version = self.addon.get_version()
ApplicationsVersions.objects.filter(
application=amo.FIREFOX.id, version=self.version).update(
max=AppVersion.objects.get(application=1, version='3.7a1pre'))
self.application_version = self.version.apps.all()[0]
self.application = self.application_version.application
self.min = self.application_version.min
self.max = self.application_version.max
self.curr_max = self.appversion('3.7a1pre')
self.counter = 0
self.old_task_user = settings.TASK_USER_ID
settings.TASK_USER_ID = self.creator.id
def tearDown(self):
settings.TASK_USER_ID = self.old_task_user
super(BulkValidationTest, self).tearDown()
def appversion(self, version, application=amo.FIREFOX.id):
return AppVersion.objects.get(application=application,
version=version)
def create_job(self, **kwargs):
kw = dict(application=amo.FIREFOX.id,
curr_max_version=kwargs.pop('current', self.curr_max),
target_version=kwargs.pop('target',
self.appversion('3.7a3')),
creator=self.creator)
kw.update(kwargs)
return ValidationJob.objects.create(**kw)
def create_file(self, version=None, platform=amo.PLATFORM_ALL.id):
if not version:
version = self.version
return File.objects.create(version=version,
filename='file-%s' % self.counter,
platform=platform,
status=amo.STATUS_PUBLIC)
def create_result(self, job, f, **kwargs):
self.counter += 1
kw = dict(file=f,
validation='{}',
errors=0,
warnings=0,
notices=0,
validation_job=job,
task_error=None,
valid=0,
completed=datetime.now())
kw.update(kwargs)
return ValidationResult.objects.create(**kw)
def start_validation(self, new_max='3.7a3'):
self.new_max = self.appversion(new_max)
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': self.new_max.id,
'finish_email': '[email protected]'},
follow=True)
assert r.status_code == 200
class TestBulkValidation(BulkValidationTest):
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_start(self, bulk_validate_file):
new_max = self.appversion('3.7a3')
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': new_max.id,
'finish_email': '[email protected]'},
follow=True)
self.assertNoFormErrors(r)
self.assert3xx(r, reverse('zadmin.validation'))
job = ValidationJob.objects.get()
assert job.application == amo.FIREFOX.id
assert job.curr_max_version.version == self.curr_max.version
assert job.target_version.version == new_max.version
assert job.finish_email == '[email protected]'
assert job.completed is None
assert job.result_set.all().count() == len(self.version.all_files)
assert bulk_validate_file.delay.called
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_ignore_user_disabled_addons(self, bulk_validate_file):
self.addon.update(disabled_by_user=True)
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': self.appversion('3.7a3').id,
'finish_email': '[email protected]'},
follow=True)
self.assertNoFormErrors(r)
self.assert3xx(r, reverse('zadmin.validation'))
assert not bulk_validate_file.delay.called
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_ignore_non_public_addons(self, bulk_validate_file):
target_ver = self.appversion('3.7a3').id
for status in (amo.STATUS_DISABLED, amo.STATUS_NULL,
amo.STATUS_DELETED):
self.addon.update(status=status)
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': target_ver,
'finish_email': '[email protected]'},
follow=True)
self.assertNoFormErrors(r)
self.assert3xx(r, reverse('zadmin.validation'))
assert not bulk_validate_file.delay.called, (
'Addon with status %s should be ignored' % status)
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_ignore_lang_packs(self, bulk_validate_file):
target_ver = self.appversion('3.7a3').id
self.addon.update(type=amo.ADDON_LPAPP)
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': target_ver,
'finish_email': '[email protected]'},
follow=True)
self.assertNoFormErrors(r)
self.assert3xx(r, reverse('zadmin.validation'))
assert not bulk_validate_file.delay.called, (
'Lang pack addons should be ignored')
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_ignore_themes(self, bulk_validate_file):
target_ver = self.appversion('3.7a3').id
self.addon.update(type=amo.ADDON_THEME)
self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': target_ver,
'finish_email': '[email protected]'})
assert not bulk_validate_file.delay.called, (
'Theme addons should be ignored')
@mock.patch('olympia.zadmin.tasks.bulk_validate_file')
def test_validate_all_non_disabled_addons(self, bulk_validate_file):
target_ver = self.appversion('3.7a3').id
bulk_validate_file.delay.called = False
self.addon.update(status=amo.STATUS_PUBLIC)
r = self.client.post(reverse('zadmin.start_validation'),
{'application': amo.FIREFOX.id,
'curr_max_version': self.curr_max.id,
'target_version': target_ver,
'finish_email': '[email protected]'},
follow=True)
self.assertNoFormErrors(r)
self.assert3xx(r, reverse('zadmin.validation'))
assert bulk_validate_file.delay.called, (
'Addon with status %s should be validated' % self.addon.status)
def test_grid(self):
job = self.create_job()
for res in (dict(errors=0), dict(errors=1)):
self.create_result(job, self.create_file(), **res)
r = self.client.get(reverse('zadmin.validation'))
assert r.status_code == 200
doc = pq(r.content)
assert doc('table tr td').eq(0).text() == str(job.pk) # ID
assert doc('table tr td').eq(3).text() == 'Firefox' # Application
assert doc('table tr td').eq(4).text() == self.curr_max.version
assert doc('table tr td').eq(5).text() == '3.7a3'
assert doc('table tr td').eq(6).text() == '2' # tested
assert doc('table tr td').eq(7).text() == '1' # failing
assert doc('table tr td').eq(8).text()[0] == '1' # passing
assert doc('table tr td').eq(9).text() == '0' # exceptions
def test_application_versions_json(self):
r = self.client.post(reverse('zadmin.application_versions_json'),
{'application': amo.FIREFOX.id})
assert r.status_code == 200
data = json.loads(r.content)
empty = True
for id, ver in data['choices']:
empty = False
assert AppVersion.objects.get(pk=id).version == ver
assert not empty, "Unexpected: %r" % data
def test_job_status(self):
job = self.create_job()
def get_data():
self.create_result(job, self.create_file(), **{})
r = self.client.post(reverse('zadmin.job_status'),
{'job_ids': json.dumps([job.pk])})
assert r.status_code == 200
data = json.loads(r.content)[str(job.pk)]
return data
data = get_data()
assert data['completed'] == 1
assert data['total'] == 1
assert data['percent_complete'] == '100'
assert data['job_id'] == job.pk
assert data['completed_timestamp'] == ''
job.update(completed=datetime.now())
data = get_data()
assert data['completed_timestamp'] != '', (
'Unexpected: %s' % data['completed_timestamp'])
class TestBulkUpdate(BulkValidationTest):
def setUp(self):
super(TestBulkUpdate, self).setUp()
self.job = self.create_job(completed=datetime.now())
self.update_url = reverse('zadmin.notify', args=[self.job.pk])
self.list_url = reverse('zadmin.validation')
self.data = {'text': '{{ APPLICATION }} {{ VERSION }}',
'subject': '..'}
self.version_one = Version.objects.create(addon=self.addon)
self.version_two = Version.objects.create(addon=self.addon)
appver = AppVersion.objects.get(application=1, version='3.7a1pre')
for v in self.version_one, self.version_two:
ApplicationsVersions.objects.create(
application=amo.FIREFOX.id, version=v,
min=appver, max=appver)
def test_no_update_link(self):
self.create_result(self.job, self.create_file(), **{})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert doc('table tr td a.set-max-version').text() == (
'Notify and set max versions')
def test_update_link(self):
self.create_result(self.job, self.create_file(), **{'valid': 1})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert doc('table tr td a.set-max-version').text() == (
'Notify and set max versions')
def test_update_url(self):
self.create_result(self.job, self.create_file(), **{'valid': 1})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert doc('table tr td a.set-max-version').attr('data-job-url') == (
self.update_url)
def test_update_anonymous(self):
self.client.logout()
r = self.client.post(self.update_url)
assert r.status_code == 302
def test_version_pks(self):
for version in [self.version_one, self.version_two]:
for x in range(0, 3):
self.create_result(self.job, self.create_file(version))
assert sorted(updated_versions(self.job)) == (
[self.version_one.pk, self.version_two.pk])
def test_update_passing_only(self):
self.create_result(self.job, self.create_file(self.version_one))
self.create_result(self.job, self.create_file(self.version_two),
errors=1)
assert sorted(updated_versions(self.job)) == (
[self.version_one.pk])
def test_update_pks(self):
self.create_result(self.job, self.create_file(self.version))
r = self.client.post(self.update_url, self.data)
assert r.status_code == 302
assert self.version.apps.all()[0].max == self.job.target_version
def test_update_unclean_pks(self):
self.create_result(self.job, self.create_file(self.version))
self.create_result(self.job, self.create_file(self.version),
errors=1)
r = self.client.post(self.update_url, self.data)
assert r.status_code == 302
assert self.version.apps.all()[0].max == self.job.curr_max_version
def test_update_pks_logs(self):
self.create_result(self.job, self.create_file(self.version))
assert ActivityLog.objects.for_addons(self.addon).count() == 0
self.client.post(self.update_url, self.data)
upd = amo.LOG.MAX_APPVERSION_UPDATED.id
logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd)
assert logs.count() == 1
assert logs[0].user == get_task_user()
def test_update_wrong_version(self):
self.create_result(self.job, self.create_file(self.version))
av = self.version.apps.all()[0]
av.max = self.appversion('3.6')
av.save()
self.client.post(self.update_url, self.data)
assert self.version.apps.all()[0].max == self.appversion('3.6')
def test_update_all_within_range(self):
self.create_result(self.job, self.create_file(self.version))
# Create an appversion in between current and target.
av = self.version.apps.all()[0]
av.max = self.appversion('3.7a2')
av.save()
self.client.post(self.update_url, self.data)
assert self.version.apps.all()[0].max == self.appversion('3.7a3')
def test_version_comparison(self):
# regression test for bug 691984
job = self.create_job(completed=datetime.now(),
current=self.appversion('3.0.9'),
target=self.appversion('3.5'))
# .* was not sorting right
self.version.apps.all().update(max=self.appversion('3.0.*'))
self.create_result(job, self.create_file(self.version))
self.client.post(reverse('zadmin.notify', args=[job.pk]),
self.data)
assert self.version.apps.all()[0].max == self.appversion('3.5')
def test_update_different_app(self):
self.create_result(self.job, self.create_file(self.version))
target = self.version.apps.all()[0]
target.application = amo.FIREFOX.id
target.save()
assert self.version.apps.all()[0].max == self.curr_max
def test_update_twice(self):
self.create_result(self.job, self.create_file(self.version))
self.client.post(self.update_url, self.data)
assert self.version.apps.all()[0].max == self.job.target_version
now = self.version.modified
self.client.post(self.update_url, self.data)
assert self.version.modified == now
def test_update_notify(self):
self.create_result(self.job, self.create_file(self.version))
self.client.post(self.update_url, self.data)
assert len(mail.outbox) == 1
def test_update_subject(self):
data = self.data.copy()
data['subject'] = '{{ PASSING_ADDONS.0.name }}'
f = self.create_file(self.version)
self.create_result(self.job, f)
self.client.post(self.update_url, data)
assert mail.outbox[0].subject == (
'%s' % self.addon.name)
@mock.patch('olympia.zadmin.tasks.log')
def test_bulk_email_logs_stats(self, log):
log.info = mock.Mock()
self.create_result(self.job, self.create_file(self.version))
self.client.post(self.update_url, self.data)
assert log.info.call_args_list[-8][0][0] == (
'[1@None] bulk update stats for job %s: '
'{bumped: 1, is_dry_run: 0, processed: 1}'
% self.job.pk)
assert log.info.call_args_list[-2][0][0] == (
'[1@None] bulk email stats for job %s: '
'{author_emailed: 1, is_dry_run: 0, processed: 1}'
% self.job.pk)
def test_application_version(self):
self.create_result(self.job, self.create_file(self.version))
self.client.post(self.update_url, self.data)
assert mail.outbox[0].body == 'Firefox 3.7a3'
def test_multiple_result_links(self):
# Creates validation results for two files of the same addon:
results = [
self.create_result(self.job, self.create_file(self.version)),
self.create_result(self.job, self.create_file(self.version))]
self.client.post(self.update_url,
{'text': '{{ PASSING_ADDONS.0.links }}',
'subject': '..'})
body = mail.outbox[0].body
assert all((reverse('devhub.bulk_compat_result',
args=(self.addon.slug, result.pk))
in body)
for result in results)
def test_notify_mail_preview(self):
self.create_result(self.job, self.create_file(self.version))
self.client.post(self.update_url,
{'text': 'the message', 'subject': 'the subject',
'preview_only': 'on'})
assert len(mail.outbox) == 0
rs = self.job.get_notify_preview_emails()
assert [e.subject for e in rs] == ['the subject']
# version should not be bumped since it's in preview mode:
assert self.version.apps.all()[0].max == self.max
upd = amo.LOG.MAX_APPVERSION_UPDATED.id
logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd)
assert logs.count() == 0
class TestBulkNotify(BulkValidationTest):
def setUp(self):
super(TestBulkNotify, self).setUp()
self.job = self.create_job(completed=datetime.now())
self.update_url = reverse('zadmin.notify', args=[self.job.pk])
self.syntax_url = reverse('zadmin.notify.syntax')
self.list_url = reverse('zadmin.validation')
self.version_one = Version.objects.create(addon=self.addon)
self.version_two = Version.objects.create(addon=self.addon)
def test_no_notify_link(self):
self.create_result(self.job, self.create_file(), **{})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert len(doc('table tr td a.notify')) == 0
def test_notify_link(self):
self.create_result(self.job, self.create_file(), **{'errors': 1})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert doc('table tr td a.set-max-version').text() == (
'Notify and set max versions')
def test_notify_url(self):
self.create_result(self.job, self.create_file(), **{'errors': 1})
r = self.client.get(self.list_url)
doc = pq(r.content)
assert doc('table tr td a.set-max-version').attr('data-job-url') == (
self.update_url)
def test_notify_anonymous(self):
self.client.logout()
r = self.client.post(self.update_url)
assert r.status_code == 302
def test_notify_log(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
assert ActivityLog.objects.for_addons(self.addon).count() == 0
self.client.post(self.update_url, {'text': '..', 'subject': '..'})
upd = amo.LOG.BULK_VALIDATION_USER_EMAILED.id
logs = (ActivityLog.objects.for_user(self.creator)
.filter(action=upd))
assert logs.count() == 1
assert logs[0].user == self.creator
def test_compat_bump_log(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 0})
assert ActivityLog.objects.for_addons(self.addon).count() == 0
self.client.post(self.update_url, {'text': '..', 'subject': '..'})
upd = amo.LOG.MAX_APPVERSION_UPDATED.id
logs = ActivityLog.objects.for_addons(self.addon).filter(action=upd)
assert logs.count() == 1
assert logs[0].user == self.creator
def test_notify_mail(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url,
{'text': '..',
'subject': '{{ FAILING_ADDONS.0.name }}'})
assert r.status_code == 302
assert len(mail.outbox) == 1
assert mail.outbox[0].body == '..'
assert mail.outbox[0].subject == self.addon.name
assert mail.outbox[0].to == [u'[email protected]']
def test_result_links(self):
result = self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url,
{'text': '{{ FAILING_ADDONS.0.links }}',
'subject': '...'})
assert r.status_code == 302
assert len(mail.outbox) == 1
res = reverse('devhub.bulk_compat_result',
args=(self.addon.slug, result.pk))
email = mail.outbox[0].body
assert res in email, ('Unexpected message: %s' % email)
def test_notify_mail_partial(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
self.create_result(self.job, self.create_file(self.version))
r = self.client.post(self.update_url, {'text': '..', 'subject': '..'})
assert r.status_code == 302
assert len(mail.outbox) == 1
def test_notify_mail_multiple(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url, {'text': '..', 'subject': '..'})
assert r.status_code == 302
assert len(mail.outbox) == 1
def test_notify_mail_preview(self):
for i in range(2):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url,
{'text': 'the message', 'subject': 'the subject',
'preview_only': 'on'})
assert r.status_code == 302
assert len(mail.outbox) == 0
rs = self.job.get_notify_preview_emails()
assert [e.subject for e in rs] == ['the subject']
def test_notify_rendering(self):
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url,
{'text': '{{ FAILING_ADDONS.0.name }}'
'{{ FAILING_ADDONS.0.compat_link }}',
'subject': '{{ FAILING_ADDONS.0.name }} blah'})
assert r.status_code == 302
assert len(mail.outbox) == 1
url = reverse('devhub.versions.edit', args=[self.addon.pk,
self.version.pk])
assert str(self.addon.name) in mail.outbox[0].body
assert url in mail.outbox[0].body
assert str(self.addon.name) in mail.outbox[0].subject
def test_notify_unicode(self):
self.addon.name = u'འབྲུག་ཡུལ།'
self.addon.save()
self.create_result(self.job, self.create_file(self.version),
**{'errors': 1})
r = self.client.post(self.update_url,
{'text': '{{ FAILING_ADDONS.0.name }}',
'subject': '{{ FAILING_ADDONS.0.name }} blah'})
assert r.status_code == 302
assert len(mail.outbox) == 1
assert mail.outbox[0].body == self.addon.name
def test_notify_template(self):
for text, res in (['some sample text', True],
['{{ FAILING_ADDONS.0.name }}{% if %}', False]):
assert forms.NotifyForm(
{'text': text, 'subject': '...'}).is_valid() == res
def test_notify_syntax(self):
for text, res in (['some sample text', True],
['{{ FAILING_ADDONS.0.name }}{% if %}', False]):
r = self.client.post(self.syntax_url, {'text': text,
'subject': '..'})
assert r.status_code == 200
assert json.loads(r.content)['valid'] == res
class TestBulkValidationTask(BulkValidationTest):
def test_validate(self):
self.start_validation()
res = ValidationResult.objects.get()
self.assertCloseToNow(res.completed)
assert not res.task_error
validation = json.loads(res.validation)
assert res.errors == 1
assert validation['messages'][0]['id'] == ['main', 'prepare_package',
'not_found']
assert res.valid is False
assert res.warnings == 0, [mess['message']
for mess in validation['messages']]
assert res.notices == 0
assert validation['errors'] == 1
self.assertCloseToNow(res.validation_job.completed)
assert res.validation_job.stats['total'] == 1
assert res.validation_job.stats['completed'] == 1
assert res.validation_job.stats['passing'] == 0
assert res.validation_job.stats['failing'] == 1
assert res.validation_job.stats['errors'] == 0
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == (
'Behold! Validation results for Firefox %s->%s'
% (self.curr_max.version, self.new_max.version))
assert mail.outbox[0].to == ['[email protected]']
@mock.patch('validator.validate.validate')
def test_validator_bulk_compat_flag(self, validate):
try:
self.start_validation()
except Exception:
# We only care about the call to `validate()`, not the result.
pass
assert validate.call_args[1].get('compat_test')
@mock.patch('olympia.zadmin.tasks.run_validator')
def test_task_error(self, run_validator):
run_validator.side_effect = RuntimeError('validation error')
try:
self.start_validation()
except:
# the real test is how it's handled, below...
pass
res = ValidationResult.objects.get()
err = res.task_error.strip()
assert err.endswith('RuntimeError: validation error'), (
'Unexpected: %s' % err)
self.assertCloseToNow(res.completed)
assert res.validation_job.stats['total'] == 1
assert res.validation_job.stats['errors'] == 1
assert res.validation_job.stats['passing'] == 0
assert res.validation_job.stats['failing'] == 0
@mock.patch('olympia.zadmin.tasks.run_validator')
def test_validate_for_appversions(self, run_validator):
data = {
"errors": 1,
"warnings": 50,
"notices": 1,
"messages": [],
"compatibility_summary": {
"errors": 0,
"warnings": 0,
"notices": 0
},
"metadata": {}
}
run_validator.return_value = json.dumps(data)
self.start_validation()
assert run_validator.called
assert run_validator.call_args[1]['for_appversions'] == (
{amo.FIREFOX.guid: [self.new_max.version]})
@mock.patch('olympia.zadmin.tasks.run_validator')
def test_validate_all_tiers(self, run_validator):
run_validator.return_value = json.dumps(VALIDATOR_SKELETON_RESULTS)
res = self.create_result(self.create_job(), self.create_file(), **{})
tasks.bulk_validate_file(res.id)
assert run_validator.called
assert run_validator.call_args[1]['test_all_tiers']
@mock.patch('olympia.zadmin.tasks.run_validator')
def test_merge_with_compat_summary(self, run_validator):
data = {
"errors": 1,
"detected_type": "extension",
"success": False,
"warnings": 50,
"notices": 1,
"ending_tier": 5,
"messages": [
{"description": "A global function was called ...",
"tier": 3,
"message": "Global called in dangerous manner",
"uid": "de93a48831454e0b9d965642f6d6bf8f",
"id": [],
"compatibility_type": None,
"for_appversions": None,
"type": "warning"},
{"description": ("...no longer indicate the language "
"of Firefox's UI..."),
"tier": 5,
"message": "navigator.language may not behave as expected",
"uid": "f44c1930887c4d9e8bd2403d4fe0253a",
"id": [],
"compatibility_type": "error",
"for_appversions": {
"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": ["4.2a1pre",
"5.0a2",
"6.0a1"]},
"type": "warning"}],
"compatibility_summary": {
"notices": 1,
"errors": 6,
"warnings": 0},
"metadata": {
"version": "1.0",
"name": "FastestFox",
"id": "<id>"}}
run_validator.return_value = json.dumps(data)
res = self.create_result(self.create_job(), self.create_file(), **{})
tasks.bulk_validate_file(res.id)
assert run_validator.called
res = ValidationResult.objects.get(pk=res.pk)
assert res.errors == (
data['errors'] + data['compatibility_summary']['errors'])
assert res.warnings == (
data['warnings'] + data['compatibility_summary']['warnings'])
assert res.notices == (
data['notices'] + data['compatibility_summary']['notices'])
@mock.patch('validator.validate.validate')
def test_app_version_overrides(self, validate):
validate.return_value = json.dumps(VALIDATOR_SKELETON_RESULTS)
self.start_validation(new_max='3.7a4')
assert validate.called
overrides = validate.call_args[1]['overrides']
assert overrides['targetapp_minVersion'] == {amo.FIREFOX.guid: '3.7a4'}
assert overrides['targetapp_maxVersion'] == {amo.FIREFOX.guid: '3.7a4'}
def create_version(self, addon, statuses, version_str=None):
max = self.max
if version_str:
max = AppVersion.objects.filter(version=version_str)[0]
version = Version.objects.create(addon=addon)
ApplicationsVersions.objects.create(application=self.application,
min=self.min, max=max,
version=version)
for status in statuses:
File.objects.create(status=status, version=version)
return version
def find_files(self, job_kwargs=None):
if not job_kwargs:
job_kwargs = {}
job = self.create_job(**job_kwargs)
find_files(job)
return list(job.result_set.values_list('file_id', flat=True))
def test_getting_disabled(self):
self.addon.update(status=amo.STATUS_DISABLED)
assert len(self.find_files()) == 0
def test_getting_deleted(self):
self.addon.update(status=amo.STATUS_DELETED)
assert len(self.find_files()) == 0
def test_getting_status(self):
self.create_version(self.addon, [amo.STATUS_PUBLIC,
amo.STATUS_NOMINATED])
ids = self.find_files()
assert len(ids) == 2
def test_getting_latest_public(self):
old_version = self.create_version(self.addon, [amo.STATUS_PUBLIC])
self.create_version(self.addon, [amo.STATUS_NULL])
ids = self.find_files()
assert len(ids) == 1
assert old_version.files.all()[0].pk == ids[0]
def test_getting_latest_public_order(self):
self.create_version(self.addon, [amo.STATUS_PURGATORY])
new_version = self.create_version(self.addon, [amo.STATUS_PUBLIC])
ids = self.find_files()
assert len(ids) == 1
assert new_version.files.all()[0].pk == ids[0]
def delete_orig_version(self, fixup=True):
# Because deleting versions resets the status...
self.version.delete()
# Don't really care what status this is, as long
# as it gets past the first SQL query.
self.addon.update(status=amo.STATUS_PUBLIC)
def test_no_versions(self):
self.delete_orig_version()
assert len(self.find_files()) == 0
def test_no_files(self):
self.version.files.all().delete()
self.addon.update(status=amo.STATUS_PUBLIC)
assert len(self.find_files()) == 0
def test_not_public(self):
version = self.create_version(self.addon, [amo.STATUS_LITE])
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 1
assert version.files.all()[0].pk == ids[0]
def test_not_public_and_newer(self):
self.create_version(self.addon, [amo.STATUS_LITE])
new_version = self.create_version(self.addon, [amo.STATUS_LITE])
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 1
assert new_version.files.all()[0].pk == ids[0]
def test_not_public_w_beta(self):
self.create_version(self.addon, [amo.STATUS_LITE])
self.create_version(self.addon, [amo.STATUS_BETA])
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 2
def test_not_public_w_multiple_files(self):
self.create_version(self.addon, [amo.STATUS_BETA])
new_version = self.create_version(self.addon, [amo.STATUS_LITE,
amo.STATUS_BETA])
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 2
assert sorted([v.id for v in new_version.files.all()]) == sorted(ids)
def test_not_prelim_w_multiple_files(self):
self.create_version(self.addon, [amo.STATUS_BETA])
self.create_version(self.addon, [amo.STATUS_BETA,
amo.STATUS_NOMINATED])
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 3
def test_public_partial(self):
self.create_version(self.addon, [amo.STATUS_PUBLIC])
new_version = self.create_version(self.addon, [amo.STATUS_BETA,
amo.STATUS_DISABLED])
ids = self.find_files()
assert len(ids) == 2
assert new_version.files.all()[1].pk not in ids
def test_getting_w_unreviewed(self):
old_version = self.create_version(self.addon, [amo.STATUS_PUBLIC])
new_version = self.create_version(self.addon, [amo.STATUS_UNREVIEWED])
ids = self.find_files()
assert len(ids) == 2
old_version_pk = old_version.files.all()[0].pk
new_version_pk = new_version.files.all()[0].pk
assert sorted([old_version_pk, new_version_pk]) == sorted(ids)
def test_multiple_files(self):
self.create_version(self.addon, [amo.STATUS_PUBLIC, amo.STATUS_PUBLIC,
amo.STATUS_PUBLIC])
ids = self.find_files()
assert len(ids) == 3
def test_multiple_public(self):
self.create_version(self.addon, [amo.STATUS_PUBLIC])
new_version = self.create_version(self.addon, [amo.STATUS_PUBLIC])
ids = self.find_files()
assert len(ids) == 1
assert new_version.files.all()[0].pk == ids[0]
def test_multiple_addons(self):
addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.create_version(addon, [amo.STATUS_PURGATORY])
ids = self.find_files()
assert len(ids) == 1
assert self.version.files.all()[0].pk == ids[0]
def test_no_app(self):
version = self.create_version(self.addon, [amo.STATUS_LITE])
self.delete_orig_version()
version.apps.all().delete()
ids = self.find_files()
assert len(ids) == 0
def test_wrong_version(self):
self.create_version(self.addon, [amo.STATUS_LITE],
version_str='4.0b2pre')
self.delete_orig_version()
ids = self.find_files()
assert len(ids) == 0
def test_version_slightly_newer_than_current(self):
# addon matching current app/version but with a newer public version
# that is within range of the target app/version.
# See bug 658739.
self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a2')
newer = self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a3')
kw = dict(curr_max_version=self.appversion('3.7a2'),
target_version=self.appversion('3.7a4'))
ids = self.find_files(job_kwargs=kw)
assert newer.files.all()[0].pk == ids[0]
def test_version_compatible_with_newer_app(self):
# addon with a newer public version that is already compatible with
# an app/version higher than the target.
# See bug 658739.
self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a2')
# A version that supports a newer Firefox than what we're targeting
self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a4')
kw = dict(curr_max_version=self.appversion('3.7a2'),
target_version=self.appversion('3.7a3'))
ids = self.find_files(job_kwargs=kw)
assert len(ids) == 0
def test_version_compatible_with_target_app(self):
self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a2')
# Already has a version that supports target:
self.create_version(self.addon, [amo.STATUS_PUBLIC],
version_str='3.7a3')
kw = dict(curr_max_version=self.appversion('3.7a2'),
target_version=self.appversion('3.7a3'))
ids = self.find_files(job_kwargs=kw)
assert len(ids) == 0
def test_version_webextension(self):
self.version.files.update(is_webextension=True)
assert not self.find_files()
class TestTallyValidationErrors(BulkValidationTest):
def setUp(self):
super(TestTallyValidationErrors, self).setUp()
self.data = {
"errors": 1,
"warnings": 1,
"notices": 0,
"messages": [
{"message": "message one",
"description": ["message one long"],
"id": ["path", "to", "test_one"],
"uid": "de93a48831454e0b9d965642f6d6bf8f",
"type": "error"},
{"message": "message two",
"description": "message two long",
"id": ["path", "to", "test_two"],
"uid": "f44c1930887c4d9e8bd2403d4fe0253a",
"compatibility_type": "error",
"type": "warning"}],
"metadata": {},
"compatibility_summary": {
"errors": 1,
"warnings": 1,
"notices": 0}}
def csv(self, job_id):
r = self.client.get(reverse('zadmin.validation_tally_csv',
args=[job_id]))
assert r.status_code == 200
rdr = csv.reader(StringIO(r.content))
header = rdr.next()
rows = sorted((r for r in rdr), key=lambda r: r[0])
return header, rows
@mock.patch('olympia.zadmin.tasks.run_validator')
def test_csv(self, run_validator):
run_validator.return_value = json.dumps(self.data)
self.start_validation()
res = ValidationResult.objects.get()
assert res.task_error is None
header, rows = self.csv(res.validation_job.pk)
assert header == ['message_id', 'message', 'long_message',
'type', 'addons_affected']
assert rows.pop(0) == ['path.to.test_one',
'message one', 'message one long', 'error', '1']
assert rows.pop(0) == ['path.to.test_two',
'message two', 'message two long', 'error', '1']
def test_count_per_addon(self):
job = self.create_job()
data_str = json.dumps(self.data)
for i in range(3):
tasks.tally_validation_results(job.pk, data_str)
header, rows = self.csv(job.pk)
assert rows.pop(0) == ['path.to.test_one',
'message one', 'message one long', 'error', '3']
assert rows.pop(0) == ['path.to.test_two',
'message two', 'message two long', 'error', '3']
def test_nested_list_messages(self):
job = self.create_job()
self.data['messages'] = [{
"message": "message one",
"description": ["message one long", ["something nested"]],
"id": ["path", "to", "test_one"],
"uid": "de93a48831454e0b9d965642f6d6bf8f",
"type": "error",
}]
data_str = json.dumps(self.data)
# This was raising an exception. bug 733845
tasks.tally_validation_results(job.pk, data_str)
class TestEmailPreview(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestEmailPreview, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
addon = Addon.objects.get(pk=3615)
self.topic = EmailPreviewTopic(addon)
def test_csv(self):
self.topic.send_mail('the subject', u'Hello Ivan Krsti\u0107',
from_email='[email protected]',
recipient_list=['[email protected]'])
r = self.client.get(reverse('zadmin.email_preview_csv',
args=[self.topic.topic]))
assert r.status_code == 200
rdr = csv.reader(StringIO(r.content))
assert rdr.next() == ['from_email', 'recipient_list', 'subject',
'body']
assert rdr.next() == ['[email protected]', '[email protected]',
'the subject', 'Hello Ivan Krsti\xc4\x87']
class TestMonthlyPick(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestMonthlyPick, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.url = reverse('zadmin.monthly_pick')
addon = Addon.objects.get(pk=3615)
MonthlyPick.objects.create(addon=addon,
locale='zh-CN',
blurb="test data",
image="http://www.google.com")
self.f = self.client.get(self.url).context['form'].initial_forms[0]
self.initial = self.f.initial
def test_form_initial(self):
assert self.initial['addon'] == 3615
assert self.initial['locale'] == 'zh-CN'
assert self.initial['blurb'] == 'test data'
assert self.initial['image'] == 'http://www.google.com'
def test_success_insert(self):
dupe = initial(self.f)
del dupe['id']
dupe.update(locale='fr')
data = formset(initial(self.f), dupe, initial_count=1)
self.client.post(self.url, data)
assert MonthlyPick.objects.count() == 2
assert MonthlyPick.objects.all()[1].locale == 'fr'
def test_insert_no_image(self):
dupe = initial(self.f)
dupe.update(id='', image='', locale='en-US')
data = formset(initial(self.f), dupe, initial_count=1)
self.client.post(self.url, data)
assert MonthlyPick.objects.count() == 2
assert MonthlyPick.objects.all()[1].image == ''
def test_success_insert_no_locale(self):
dupe = initial(self.f)
del dupe['id']
del dupe['locale']
data = formset(initial(self.f), dupe, initial_count=1)
self.client.post(self.url, data)
assert MonthlyPick.objects.count() == 2
assert MonthlyPick.objects.all()[1].locale == ''
def test_insert_long_blurb(self):
dupe = initial(self.f)
dupe.update(id='', blurb='x' * 201, locale='en-US')
data = formset(initial(self.f), dupe, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[1]['blurb'][0] == (
'Ensure this value has at most 200 characters (it has 201).')
def test_success_update(self):
d = initial(self.f)
d.update(locale='fr')
r = self.client.post(self.url, formset(d, initial_count=1))
assert r.status_code == 302
assert MonthlyPick.objects.all()[0].locale == 'fr'
def test_success_delete(self):
d = initial(self.f)
d.update(DELETE=True)
self.client.post(self.url, formset(d, initial_count=1))
assert MonthlyPick.objects.count() == 0
def test_require_login(self):
self.client.logout()
r = self.client.get(self.url)
assert r.status_code == 302
class TestFeatures(TestCase):
fixtures = ['base/users', 'base/collections', 'base/addon_3615.json']
def setUp(self):
super(TestFeatures, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.url = reverse('zadmin.features')
FeaturedCollection.objects.create(application=amo.FIREFOX.id,
locale='zh-CN', collection_id=80)
self.f = self.client.get(self.url).context['form'].initial_forms[0]
self.initial = self.f.initial
def test_form_initial(self):
assert self.initial['application'] == amo.FIREFOX.id
assert self.initial['locale'] == 'zh-CN'
assert self.initial['collection'] == 80
def test_form_attrs(self):
r = self.client.get(self.url)
assert r.status_code == 200
doc = pq(r.content)
assert doc('#features tr').attr('data-app') == str(amo.FIREFOX.id)
assert doc('#features td.app').hasClass(amo.FIREFOX.short)
assert doc('#features td.collection.loading').attr(
'data-collection') == '80'
assert doc('#features .collection-ac.js-hidden')
assert not doc('#features .collection-ac[disabled]')
def test_disabled_autocomplete_errors(self):
"""If any collection errors, autocomplete field should be enabled."""
d = dict(application=amo.FIREFOX.id, collection=999)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
doc = pq(r.content)
assert not doc('#features .collection-ac[disabled]')
def test_required_app(self):
d = dict(locale='zh-CN', collection=80)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.status_code == 200
assert r.context['form'].errors[0]['application'] == (
['This field is required.'])
assert r.context['form'].errors[0]['collection'] == (
['Invalid collection for this application.'])
def test_bad_app(self):
d = dict(application=999, collection=80)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[0]['application'] == [
'Select a valid choice. 999 is not one of the available choices.']
def test_bad_collection_for_app(self):
d = dict(application=amo.THUNDERBIRD.id, collection=80)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[0]['collection'] == (
['Invalid collection for this application.'])
def test_optional_locale(self):
d = dict(application=amo.FIREFOX.id, collection=80)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors == [{}]
def test_bad_locale(self):
d = dict(application=amo.FIREFOX.id, locale='klingon', collection=80)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[0]['locale'] == (
['Select a valid choice. klingon is not one of the available '
'choices.'])
def test_required_collection(self):
d = dict(application=amo.FIREFOX.id)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[0]['collection'] == (
['This field is required.'])
def test_bad_collection(self):
d = dict(application=amo.FIREFOX.id, collection=999)
data = formset(self.initial, d, initial_count=1)
r = self.client.post(self.url, data)
assert r.context['form'].errors[0]['collection'] == (
['Invalid collection for this application.'])
def test_success_insert(self):
dupe = initial(self.f)
del dupe['id']
dupe.update(locale='fr')
data = formset(initial(self.f), dupe, initial_count=1)
self.client.post(self.url, data)
assert FeaturedCollection.objects.count() == 2
assert FeaturedCollection.objects.all()[1].locale == 'fr'
def test_success_update(self):
d = initial(self.f)
d.update(locale='fr')
r = self.client.post(self.url, formset(d, initial_count=1))
assert r.status_code == 302
assert FeaturedCollection.objects.all()[0].locale == 'fr'
def test_success_delete(self):
d = initial(self.f)
d.update(DELETE=True)
self.client.post(self.url, formset(d, initial_count=1))
assert FeaturedCollection.objects.count() == 0
class TestLookup(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestLookup, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.user = UserProfile.objects.get(pk=999)
self.url = reverse('zadmin.search', args=['users', 'userprofile'])
def test_logged_out(self):
self.client.logout()
assert self.client.get('%s?q=admin' % self.url).status_code == 403
def check_results(self, q, expected):
res = self.client.get(urlparams(self.url, q=q))
assert res.status_code == 200
content = json.loads(res.content)
assert len(content) == len(expected)
ids = [int(c['value']) for c in content]
emails = [u'%s' % c['label'] for c in content]
for d in expected:
id = d['value']
email = u'%s' % d['label']
assert id in ids, (
'Expected user ID "%s" not found' % id)
assert email in emails, (
'Expected username "%s" not found' % email)
def test_lookup_wrong_model(self):
self.url = reverse('zadmin.search', args=['doesnt', 'exist'])
res = self.client.get(urlparams(self.url, q=''))
assert res.status_code == 404
def test_lookup_empty(self):
users = UserProfile.objects.values('id', 'email')
self.check_results('', [dict(
value=u['id'], label=u['email']) for u in users])
def test_lookup_by_id(self):
self.check_results(self.user.id, [dict(value=self.user.id,
label=self.user.email)])
def test_lookup_by_email(self):
self.check_results(self.user.email, [dict(value=self.user.id,
label=self.user.email)])
def test_lookup_by_username(self):
self.check_results(self.user.username, [dict(value=self.user.id,
label=self.user.email)])
class TestAddonSearch(amo.tests.ESTestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestAddonSearch, self).setUp()
self.reindex(Addon)
assert self.client.login(username='[email protected]',
password='password')
self.url = reverse('zadmin.addon-search')
def test_lookup_addon(self):
res = self.client.get(urlparams(self.url, q='delicious'))
# There's only one result, so it should just forward us to that page.
assert res.status_code == 302
class TestAddonAdmin(TestCase):
fixtures = ['base/users', 'base/addon_3615']
def setUp(self):
super(TestAddonAdmin, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.url = reverse('admin:addons_addon_changelist')
def test_basic(self):
res = self.client.get(self.url)
doc = pq(res.content)
rows = doc('#result_list tbody tr')
assert rows.length == 1
assert rows.find('a').attr('href') == (
'/en-US/admin/models/addons/addon/3615/')
class TestAddonManagement(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestAddonManagement, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.url = reverse('zadmin.addon_manage', args=[self.addon.slug])
self.client.login(username='[email protected]', password='password')
def test_can_manage_unlisted_addons(self):
"""Unlisted addons can be managed too."""
self.addon.update(is_listed=False)
assert self.client.get(self.url).status_code == 200
def _form_data(self, data=None):
initial_data = {
'status': '4',
'form-0-status': '4',
'form-0-id': '67442',
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
}
if data:
initial_data.update(data)
return initial_data
def test_addon_status_change(self):
data = self._form_data({'status': '3'})
r = self.client.post(self.url, data, follow=True)
assert r.status_code == 200
addon = Addon.objects.get(pk=3615)
assert addon.status == 3
def test_addon_file_status_change(self):
data = self._form_data({'form-0-status': '1'})
r = self.client.post(self.url, data, follow=True)
assert r.status_code == 200
file = File.objects.get(pk=67442)
assert file.status == 1
def test_addon_deleted_file_status_change(self):
file = File.objects.get(pk=67442)
file.version.update(deleted=True)
data = self._form_data({'form-0-status': '1'})
r = self.client.post(self.url, data, follow=True)
# Form errors are silently suppressed.
assert r.status_code == 200
# But no change.
assert file.status == 4
@mock.patch.object(File, 'file_path',
amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi'))
def test_regenerate_hash(self):
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version)
r = self.client.post(reverse('zadmin.recalc_hash', args=[file.id]))
assert json.loads(r.content)[u'success'] == 1
file = File.objects.get(pk=file.id)
assert file.size, 'File size should not be zero'
assert file.hash, 'File hash should not be empty'
@mock.patch.object(File, 'file_path',
amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi'))
def test_regenerate_hash_get(self):
""" Don't allow GET """
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version)
r = self.client.get(reverse('zadmin.recalc_hash', args=[file.id]))
assert r.status_code == 405 # GET out of here
class TestCompat(amo.tests.ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestCompat, self).setUp()
self.url = reverse('zadmin.compat')
self.client.login(username='[email protected]', password='password')
self.app = amo.FIREFOX
self.app_version = amo.COMPAT[0]['main']
self.addon = self.populate(guid='xxx')
self.generate_reports(self.addon, good=0, bad=0, app=self.app,
app_version=self.app_version)
def update(self):
compatibility_report()
self.refresh()
def populate(self, **kw):
now = datetime.now()
name = 'Addon %s' % now
kw.update(guid=name)
addon = amo.tests.addon_factory(name=name, **kw)
UpdateCount.objects.create(addon=addon, count=10, date=now)
return addon
def generate_reports(self, addon, good, bad, app, app_version):
defaults = dict(guid=addon.guid, app_guid=app.guid,
app_version=app_version)
for x in xrange(good):
CompatReport.objects.create(works_properly=True, **defaults)
for x in xrange(bad):
CompatReport.objects.create(works_properly=False, **defaults)
self.update()
def get_pq(self, **kw):
r = self.client.get(self.url, kw)
assert r.status_code == 200
return pq(r.content)('#compat-results')
def test_defaults(self):
r = self.client.get(self.url)
assert r.status_code == 200
assert r.context['app'] == self.app
assert r.context['version'] == self.app_version
table = pq(r.content)('#compat-results')
assert table.length == 1
assert table.find('.no-results').length == 1
def check_row(self, tr, addon, good, bad, percentage, app, app_version):
assert tr.length == 1
version = addon.current_version.version
name = tr.find('.name')
assert name.find('.version').text() == 'v' + version
assert name.remove('.version').text() == unicode(addon.name)
assert name.find('a').attr('href') == addon.get_url_path()
assert tr.find('.maxver').text() == (
addon.compatible_apps[app].max.version)
incompat = tr.find('.incompat')
assert incompat.find('.bad').text() == str(bad)
assert incompat.find('.total').text() == str(good + bad)
percentage += '%'
assert percentage in incompat.text(), (
'Expected incompatibility to be %r' % percentage)
assert tr.find('.version a').attr('href') == (
reverse('devhub.versions.edit',
args=[addon.slug, addon.current_version.id]))
assert tr.find('.reports a').attr('href') == (
reverse('compat.reporter_detail', args=[addon.guid]))
form = tr.find('.overrides form')
assert form.attr('action') == reverse(
'admin:addons_compatoverride_add')
self.check_field(form, '_compat_ranges-TOTAL_FORMS', '1')
self.check_field(form, '_compat_ranges-INITIAL_FORMS', '0')
self.check_field(form, '_continue', '1')
self.check_field(form, '_confirm', '1')
self.check_field(form, 'addon', str(addon.id))
self.check_field(form, 'guid', addon.guid)
compat_field = '_compat_ranges-0-%s'
self.check_field(form, compat_field % 'min_version', '0')
self.check_field(form, compat_field % 'max_version', version)
self.check_field(form, compat_field % 'app', str(app.id))
self.check_field(form, compat_field % 'min_app_version',
app_version + 'a1')
self.check_field(form, compat_field % 'max_app_version',
app_version + '*')
def check_field(self, form, name, val):
assert form.find('input[name="%s"]' % name).val() == val
def test_firefox_hosted(self):
addon = self.populate()
self.generate_reports(addon, good=0, bad=11, app=self.app,
app_version=self.app_version)
tr = self.get_pq().find('tr[data-guid="%s"]' % addon.guid)
self.check_row(tr, addon, good=0, bad=11, percentage='100.0',
app=self.app, app_version=self.app_version)
# Add an override for this current app version.
compat = CompatOverride.objects.create(addon=addon, guid=addon.guid)
CompatOverrideRange.objects.create(
compat=compat,
app=amo.FIREFOX.id, min_app_version=self.app_version + 'a1',
max_app_version=self.app_version + '*')
# Check that there is an override for this current app version.
tr = self.get_pq().find('tr[data-guid="%s"]' % addon.guid)
assert tr.find('.overrides a').attr('href') == (
reverse('admin:addons_compatoverride_change', args=[compat.id]))
def test_non_default_version(self):
app_version = amo.COMPAT[2]['main']
addon = self.populate()
self.generate_reports(addon, good=0, bad=11, app=self.app,
app_version=app_version)
pq = self.get_pq()
assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 0
appver = '%s-%s' % (self.app.id, app_version)
tr = self.get_pq(appver=appver)('tr[data-guid="%s"]' % addon.guid)
self.check_row(tr, addon, good=0, bad=11, percentage='100.0',
app=self.app, app_version=app_version)
def test_minor_versions(self):
addon = self.populate()
self.generate_reports(addon, good=0, bad=1, app=self.app,
app_version=self.app_version)
self.generate_reports(addon, good=1, bad=2, app=self.app,
app_version=self.app_version + 'a2')
tr = self.get_pq(ratio=0.0, minimum=0).find('tr[data-guid="%s"]' %
addon.guid)
self.check_row(tr, addon, good=1, bad=3, percentage='75.0',
app=self.app, app_version=self.app_version)
def test_ratio(self):
addon = self.populate()
self.generate_reports(addon, good=11, bad=11, app=self.app,
app_version=self.app_version)
# Should not show up for > 80%.
pq = self.get_pq()
assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 0
# Should not show up for > 50%.
tr = self.get_pq(ratio=.5).find('tr[data-guid="%s"]' % addon.guid)
assert tr.length == 0
# Should show up for > 40%.
tr = self.get_pq(ratio=.4).find('tr[data-guid="%s"]' % addon.guid)
assert tr.length == 1
def test_min_incompatible(self):
addon = self.populate()
self.generate_reports(addon, good=0, bad=11, app=self.app,
app_version=self.app_version)
# Should show up for >= 10.
pq = self.get_pq()
assert pq.find('tr[data-guid="%s"]' % addon.guid).length == 1
# Should show up for >= 0.
tr = self.get_pq(minimum=0).find('tr[data-guid="%s"]' % addon.guid)
assert tr.length == 1
# Should not show up for >= 20.
tr = self.get_pq(minimum=20).find('tr[data-guid="%s"]' % addon.guid)
assert tr.length == 0
class TestMemcache(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestMemcache, self).setUp()
self.url = reverse('zadmin.memcache')
cache.set('foo', 'bar')
self.client.login(username='[email protected]', password='password')
def test_login(self):
self.client.logout()
assert self.client.get(self.url).status_code == 302
def test_can_clear(self):
self.client.post(self.url, {'yes': 'True'})
assert cache.get('foo') is None
def test_cant_clear(self):
self.client.post(self.url, {'yes': 'False'})
assert cache.get('foo') == 'bar'
class TestElastic(amo.tests.ESTestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestElastic, self).setUp()
self.url = reverse('zadmin.elastic')
self.client.login(username='[email protected]', password='password')
def test_login(self):
self.client.logout()
self.assert3xx(
self.client.get(self.url),
reverse('users.login') + '?to=/en-US/admin/elastic')
class TestEmailDevs(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestEmailDevs, self).setUp()
self.login('admin')
self.addon = Addon.objects.get(pk=3615)
def post(self, recipients='eula', subject='subject', message='msg',
preview_only=False):
return self.client.post(reverse('zadmin.email_devs'),
dict(recipients=recipients, subject=subject,
message=message,
preview_only=preview_only))
def test_preview(self):
res = self.post(preview_only=True)
self.assertNoFormErrors(res)
preview = EmailPreviewTopic(topic='email-devs')
assert [e.recipient_list for e in preview.filter()] == ['[email protected]']
assert len(mail.outbox) == 0
def test_actual(self):
subject = 'about eulas'
message = 'message about eulas'
res = self.post(subject=subject, message=message)
self.assertNoFormErrors(res)
self.assert3xx(res, reverse('zadmin.email_devs'))
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == subject
assert mail.outbox[0].body == message
assert mail.outbox[0].to == ['[email protected]']
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
def test_only_eulas(self):
self.addon.update(eula=None)
res = self.post()
self.assertNoFormErrors(res)
assert len(mail.outbox) == 0
def test_sdk_devs(self):
(File.objects.filter(version__addon=self.addon)
.update(jetpack_version='1.5'))
res = self.post(recipients='sdk')
self.assertNoFormErrors(res)
assert len(mail.outbox) == 1
assert mail.outbox[0].to == ['[email protected]']
def test_only_sdk_devs(self):
res = self.post(recipients='sdk')
self.assertNoFormErrors(res)
assert len(mail.outbox) == 0
def test_only_extensions(self):
self.addon.update(type=amo.ADDON_EXTENSION)
res = self.post(recipients='all_extensions')
self.assertNoFormErrors(res)
assert len(mail.outbox) == 1
def test_ignore_deleted_always(self):
self.addon.update(status=amo.STATUS_DELETED)
for name, label in DevMailerForm._choices:
res = self.post(recipients=name)
self.assertNoFormErrors(res)
assert len(mail.outbox) == 0
def test_exclude_pending_for_addons(self):
self.addon.update(status=amo.STATUS_PENDING)
for name, label in DevMailerForm._choices:
if name in ('payments', 'desktop_apps'):
continue
res = self.post(recipients=name)
self.assertNoFormErrors(res)
assert len(mail.outbox) == 0
def test_exclude_fxa_migrated(self):
user = self.addon.authors.get()
user.update(fxa_id='yup')
res = self.post(recipients='fxa')
self.assertNoFormErrors(res)
assert len(mail.outbox) == 0
def test_include_fxa_not_migrated(self):
res = self.post(recipients='fxa')
user = self.addon.authors.get()
self.assertNoFormErrors(res)
assert len(mail.outbox) == 1
user = self.addon.authors.get()
user.update(fxa_id='')
res = self.post(recipients='fxa')
self.assertNoFormErrors(res)
assert len(mail.outbox) == 2
class TestFileDownload(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestFileDownload, self).setUp()
assert self.client.login(username='[email protected]',
password='password')
self.file = open(get_image_path('animated.png'), 'rb')
resp = self.client.post(reverse('devhub.upload'),
{'upload': self.file})
assert resp.status_code == 302
self.upload = FileUpload.objects.get()
self.url = reverse('zadmin.download_file', args=[self.upload.uuid])
def test_download(self):
"""Test that downloading file_upload objects works."""
resp = self.client.get(self.url)
assert resp.status_code == 200
assert resp.content == self.file.read()
class TestPerms(TestCase):
fixtures = ['base/users']
FILE_ID = '1234567890abcdef1234567890abcdef'
def assert_status(self, view, status, **kw):
"""Check that requesting the named view returns the expected status."""
assert self.client.get(reverse(view, kwargs=kw)).status_code == status
def test_admin_user(self):
# Admin should see views with Django's perm decorator and our own.
assert self.client.login(username='[email protected]',
password='password')
self.assert_status('zadmin.index', 200)
self.assert_status('zadmin.settings', 200)
self.assert_status('zadmin.langpacks', 200)
self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID)
self.assert_status('zadmin.addon-search', 200)
self.assert_status('zadmin.monthly_pick', 200)
self.assert_status('zadmin.features', 200)
self.assert_status('discovery.module_admin', 200)
def test_staff_user(self):
# Staff users have some privileges.
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Staff', rules='AdminTools:View')
GroupUser.objects.create(group=group, user=user)
assert self.client.login(username='[email protected]',
password='password')
self.assert_status('zadmin.index', 200)
self.assert_status('zadmin.settings', 200)
self.assert_status('zadmin.langpacks', 200)
self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID)
self.assert_status('zadmin.addon-search', 200)
self.assert_status('zadmin.monthly_pick', 200)
self.assert_status('zadmin.features', 200)
self.assert_status('discovery.module_admin', 200)
def test_sr_reviewers_user(self):
# Sr Reviewers users have only a few privileges.
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Sr Reviewer',
rules='ReviewerAdminTools:View')
GroupUser.objects.create(group=group, user=user)
assert self.client.login(username='[email protected]',
password='password')
self.assert_status('zadmin.index', 200)
self.assert_status('zadmin.langpacks', 200)
self.assert_status('zadmin.download_file', 404, uuid=self.FILE_ID)
self.assert_status('zadmin.addon-search', 200)
self.assert_status('zadmin.settings', 403)
def test_bulk_compat_user(self):
# Bulk Compatibility Updaters only have access to /admin/validation/*.
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Bulk Compatibility Updaters',
rules='BulkValidationAdminTools:View')
GroupUser.objects.create(group=group, user=user)
assert self.client.login(username='[email protected]',
password='password')
self.assert_status('zadmin.index', 200)
self.assert_status('zadmin.validation', 200)
self.assert_status('zadmin.langpacks', 403)
self.assert_status('zadmin.download_file', 403, uuid=self.FILE_ID)
self.assert_status('zadmin.addon-search', 403)
self.assert_status('zadmin.settings', 403)
def test_unprivileged_user(self):
# Unprivileged user.
assert self.client.login(username='[email protected]',
password='password')
self.assert_status('zadmin.index', 403)
self.assert_status('zadmin.settings', 403)
self.assert_status('zadmin.langpacks', 403)
self.assert_status('zadmin.download_file', 403, uuid=self.FILE_ID)
self.assert_status('zadmin.addon-search', 403)
self.assert_status('zadmin.monthly_pick', 403)
self.assert_status('zadmin.features', 403)
self.assert_status('discovery.module_admin', 403)
# Anonymous users should also get a 403.
self.client.logout()
self.assert3xx(self.client.get(reverse('zadmin.index')),
reverse('users.login') + '?to=/en-US/admin/')
| andymckay/addons-server | src/olympia/zadmin/tests/test_views.py | Python | bsd-3-clause | 77,618 |
from dasdocc.conf import base | JohnRandom/django-aggregator | dasdocc/conf/staging.py | Python | bsd-3-clause | 29 |
import keyedcache
import logging
log = logging.getLogger(__name__)
class CachedObjectMixin(object):
"""Provides basic object keyedcache for any objects using this as a mixin.
The class name of the object should be unambiguous.
"""
def cache_delete(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
log.debug("clearing cache for %s", key)
keyedcache.cache_delete(key, children=True)
def cache_get(self, *args, **kwargs):
key = self.cache_key(*args, **kwargs)
return keyedcache.cache_get(key)
def cache_key(self, *args, **kwargs):
keys = [self.__class__.__name__, self]
keys.extend(args)
return keyedcache.cache_key(keys, **kwargs)
def cache_reset(self):
self.cache_delete()
self.cache_set()
def cache_set(self, *args, **kwargs):
val = kwargs.pop('value', self)
key = self.cache_key(*args, **kwargs)
keyedcache.cache_set(key, value=val)
def is_cached(self, *args, **kwargs):
return keyedcache.is_cached(self.cache_key(*args, **kwargs))
# Unused functions find_by_id, find_by_key, find_by_slug are coming from
# Satchmo but are currently unused also there.
def find_by_id(cls, groupkey, objectid, raises=False):
"""A helper function to look up an object by id"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, objectid)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(pk=objectid)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, objectid)
if raises:
raise cls.DoesNotExist
return ob
def find_by_key(cls, groupkey, key, raises=False):
"""A helper function to look up an object by key"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, key)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(key__exact=key)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, key)
if raises:
raise
return ob
def find_by_slug(cls, groupkey, slug, raises=False):
"""A helper function to look up an object by slug"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, slug)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(slug__exact=slug)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, slug)
if raises:
raise
return ob
| aronysidoro/django-livesettings | live/keyedcache/models.py | Python | bsd-3-clause | 2,736 |
# -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
| wakatime/wakatime | wakatime/packages/py27/pygments/styles/manni.py | Python | bsd-3-clause | 2,374 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import zip
from six import StringIO
import unittest
import warnings
from functools import partial
from skbio import (read, write, Sequence, DNA, RNA, Protein,
SequenceCollection, Alignment)
from skbio.io import FASTQFormatError
from skbio.io.fastq import (
_fastq_sniffer, _fastq_to_generator, _fastq_to_sequence_collection,
_fastq_to_alignment, _generator_to_fastq, _sequence_collection_to_fastq,
_alignment_to_fastq)
from skbio.util import get_data_path
import numpy as np
# Note: the example FASTQ files with file extension .fastq are taken from the
# following open-access publication's supplementary data:
#
# P.J.A. Cock, C.J. Fields, N. Goto, M.L. Heuer and P.M. Rice (2009). The
# Sanger FASTQ file format for sequences with quality scores, and the
# Solexa/Illumina FASTQ variants.
#
# See licenses/fastq-example-files-readme.txt for the original README that
# accompanied these files, which includes the terms of use and detailed
# description of the files.
#
# The example files bearing the original filenames have not been modified from
# their original form.
def _drop_kwargs(kwargs, *args):
for arg in args:
if arg in kwargs:
kwargs.pop(arg)
class TestSniffer(unittest.TestCase):
def setUp(self):
self.positives = [get_data_path(e) for e in [
'fastq_multi_seq_sanger',
'fastq_multi_blank_between_records',
'fastq_multi_ws_lines_between_records',
'fastq_multi_blank_end_of_file',
'fastq_multi_ws_lines_end_of_file',
'fastq_multi_whitespace_stripping',
'fastq_blank_lines',
'fastq_whitespace_only_lines',
'fastq_single_seq_illumina1.3',
'fastq_wrapping_as_illumina_no_description',
'fastq_wrapping_as_sanger_no_description',
'fastq_wrapping_original_sanger_no_description',
'fastq_writer_illumina1.3_defaults',
'fastq_writer_sanger_defaults',
'fastq_writer_sanger_non_defaults',
'fastq_5_blanks_start_of_file',
'fastq_5_ws_lines_start_of_file',
'illumina_full_range_as_illumina.fastq',
'illumina_full_range_as_sanger.fastq',
'illumina_full_range_original_illumina.fastq',
'longreads_as_illumina.fastq',
'longreads_as_sanger.fastq',
'longreads_original_sanger.fastq',
'misc_dna_as_illumina.fastq',
'misc_dna_as_sanger.fastq',
'misc_dna_original_sanger.fastq',
'misc_rna_as_illumina.fastq',
'misc_rna_as_sanger.fastq',
'misc_rna_original_sanger.fastq',
'sanger_full_range_as_illumina.fastq',
'sanger_full_range_as_sanger.fastq',
'sanger_full_range_original_sanger.fastq',
'solexa_full_range_original_solexa.fastq',
'wrapping_as_illumina.fastq',
'wrapping_as_sanger.fastq',
'wrapping_original_sanger.fastq'
]]
self.negatives = [get_data_path(e) for e in [
'empty',
'whitespace_only',
'fastq_multi_blank_start_of_file',
'fastq_multi_ws_lines_start_of_file',
'fastq_invalid_blank_after_header',
'fastq_invalid_blank_after_seq',
'fastq_invalid_blank_after_plus',
'fastq_invalid_blank_within_seq',
'fastq_invalid_blank_within_qual',
'fastq_invalid_ws_line_after_header',
'fastq_invalid_ws_line_after_seq',
'fastq_invalid_ws_line_after_plus',
'fastq_invalid_ws_line_within_seq',
'fastq_invalid_ws_line_within_qual',
'fastq_invalid_missing_header',
'fastq_invalid_missing_seq_data',
'error_diff_ids.fastq',
'error_double_qual.fastq',
'error_double_seq.fastq',
'error_long_qual.fastq',
'error_no_qual.fastq',
'error_qual_del.fastq',
'error_qual_escape.fastq',
'error_qual_null.fastq',
'error_qual_space.fastq',
'error_qual_tab.fastq',
'error_qual_unit_sep.fastq',
'error_qual_vtab.fastq',
'error_short_qual.fastq',
'error_spaces.fastq',
'error_tabs.fastq',
'error_trunc_at_seq.fastq',
'error_trunc_at_plus.fastq',
'error_trunc_at_qual.fastq',
'error_trunc_in_title.fastq',
'error_trunc_in_seq.fastq',
'error_trunc_in_plus.fastq',
'error_trunc_in_qual.fastq',
]]
def test_positives(self):
for fp in self.positives:
self.assertEqual(_fastq_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negatives:
self.assertEqual(_fastq_sniffer(fp), (False, {}))
class TestReaders(unittest.TestCase):
def setUp(self):
self.valid_configurations = [
([get_data_path('empty'),
get_data_path('whitespace_only')],
[{},
{'variant': 'illumina1.8'},
{'phred_offset': 33,
'constructor': DNA}],
[]),
([get_data_path('fastq_single_seq_illumina1.3')], [
{'variant': 'illumina1.3'},
{'phred_offset': 64},
{'variant': 'illumina1.3',
'constructor': Protein},
], [
('', 'bar\t baz', 'aCGT', [33, 34, 35, 36])
]),
([get_data_path('fastq_multi_seq_sanger'),
get_data_path('fastq_whitespace_only_lines'),
get_data_path('fastq_blank_lines'),
get_data_path('fastq_multi_blank_between_records'),
get_data_path('fastq_multi_ws_lines_between_records'),
get_data_path('fastq_multi_blank_end_of_file'),
get_data_path('fastq_multi_ws_lines_end_of_file'),
get_data_path('fastq_multi_blank_start_of_file'),
get_data_path('fastq_multi_ws_lines_start_of_file'),
get_data_path('fastq_multi_whitespace_stripping')], [
{'variant': 'sanger'},
{'phred_offset': 33, 'seq_num': 2},
{'variant': 'sanger',
'constructor': partial(RNA, validate=False),
'seq_num': 3},
], [
('foo', 'bar baz', 'AACCGG',
[16, 17, 18, 19, 20, 21]),
('bar', 'baz foo', 'TTGGCC',
[23, 22, 21, 20, 19, 18]),
('baz', 'foo bar', 'GATTTC',
[20, 21, 22, 23, 24, 18])
]),
]
self.invalid_files = [(get_data_path(e[0]), e[1], e[2]) for e in [
('fastq_invalid_blank_after_header', FASTQFormatError,
'blank or whitespace-only line.*after header.*in FASTQ'),
('fastq_invalid_blank_after_seq', FASTQFormatError,
"blank or whitespace-only line.*before '\+' in FASTQ"),
('fastq_invalid_blank_after_plus', FASTQFormatError,
"blank or whitespace-only line.*after '\+'.*in FASTQ"),
('fastq_invalid_blank_within_seq', FASTQFormatError,
'blank or whitespace-only line.*within sequence.*FASTQ'),
('fastq_invalid_blank_within_qual', FASTQFormatError,
"blank or whitespace-only line.*within quality scores.*in FASTQ"),
('fastq_invalid_ws_line_after_header', FASTQFormatError,
'blank or whitespace-only line.*after header.*in FASTQ'),
('fastq_invalid_ws_line_after_seq', FASTQFormatError,
"blank or whitespace-only line.*before '\+' in FASTQ"),
('fastq_invalid_ws_line_after_plus', FASTQFormatError,
"blank or whitespace-only line.*after '\+'.*in FASTQ"),
('fastq_invalid_ws_line_within_seq', FASTQFormatError,
'blank or whitespace-only line.*within sequence.*FASTQ'),
('fastq_invalid_ws_line_within_qual', FASTQFormatError,
"blank or whitespace-only line.*within quality scores.*in FASTQ"),
('fastq_invalid_missing_header', FASTQFormatError,
"sequence.*header.*start of file: 'seq1 desc1'"),
('fastq_invalid_missing_seq_data', FASTQFormatError,
'without sequence data'),
('error_diff_ids.fastq', FASTQFormatError,
"header lines do not match: "
"'SLXA-B3_649_FC8437_R1_1_1_850_123' != "
"'SLXA-B3_649_FC8437_R1_1_1_850_124'"),
('error_double_qual.fastq', FASTQFormatError,
"Extra quality.*'\+SLXA-B3_649_FC8437_R1_1_1_850_123'"),
('error_double_seq.fastq', FASTQFormatError,
'FASTQ record that is missing a quality \(\+\) header line'),
('error_long_qual.fastq', FASTQFormatError, "Extra quality.*'Y'"),
('error_no_qual.fastq', FASTQFormatError,
"blank or whitespace-only line.*after '\+'.*in FASTQ"),
('error_qual_del.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_escape.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_null.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_space.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_tab.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_unit_sep.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_qual_vtab.fastq', ValueError,
'Decoded Phred score.*out of range'),
('error_short_qual.fastq', FASTQFormatError,
"Extra quality.*'SLXA-B3_649_FC8437_R1_1_1_362_549'"),
('error_spaces.fastq', FASTQFormatError,
"whitespace.*sequence data: 'GATGTGCAA TACCTTTGTA GAGGAA'"),
('error_tabs.fastq', FASTQFormatError,
r"whitespace.*sequence data: 'GATGTGCAA\\tTACCTTTGTA\\tGAGGAA'"),
('error_trunc_at_seq.fastq', FASTQFormatError,
'incomplete/truncated.*FASTQ'),
('error_trunc_at_plus.fastq', FASTQFormatError,
'incomplete/truncated.*FASTQ'),
('error_trunc_at_qual.fastq', FASTQFormatError,
'incomplete/truncated.*end of file'),
('error_trunc_in_title.fastq', FASTQFormatError,
'incomplete/truncated.*end of file'),
('error_trunc_in_seq.fastq', FASTQFormatError,
'incomplete/truncated.*end of file'),
('error_trunc_in_plus.fastq', FASTQFormatError,
"header lines do not match: "
"'SLXA-B3_649_FC8437_R1_1_1_183_714' != 'SLXA-B3_649_FC'"),
('error_trunc_in_qual.fastq', FASTQFormatError,
'incomplete/truncated.*end of file')
]]
def test_fastq_to_generator_valid_files(self):
for valid_files, kwargs, components in self.valid_configurations:
for valid in valid_files:
for observed_kwargs in kwargs:
_drop_kwargs(observed_kwargs, 'seq_num')
constructor = observed_kwargs.get('constructor', Sequence)
# Can't use partials for this because the read
# function below can't operate on partials
expected_kwargs = {}
if hasattr(constructor, 'lowercase'):
expected_kwargs['lowercase'] = 'introns'
observed_kwargs['lowercase'] = 'introns'
expected = [constructor(c[2],
metadata={'id': c[0],
'description': c[1]},
positional_metadata={'quality': np.array(c[3],
dtype=np.uint8)},
**expected_kwargs)
for c in components]
observed = list(_fastq_to_generator(valid,
**observed_kwargs))
self.assertEqual(len(expected), len(observed))
for o, e in zip(observed, expected):
self.assertEqual(o, e)
def test_fastq_to_generator_invalid_files_all_variants(self):
# files that should be invalid for all variants, as well as custom
# phred offsets
for fp, error_type, error_msg_regex in self.invalid_files:
for variant in 'sanger', 'illumina1.3', 'illumina1.8':
with self.assertRaisesRegexp(error_type, error_msg_regex):
list(_fastq_to_generator(fp, variant=variant))
for offset in 33, 64, 40, 77:
with self.assertRaisesRegexp(error_type, error_msg_regex):
list(_fastq_to_generator(fp, phred_offset=offset))
def test_fastq_to_generator_invalid_files_illumina(self):
# files that should be invalid for illumina1.3 and illumina1.8 variants
fps = [get_data_path(fp) for fp in
['sanger_full_range_original_sanger.fastq',
'solexa_full_range_original_solexa.fastq']]
for fp in fps:
with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
list(_fastq_to_generator(fp, variant='illumina1.3'))
with self.assertRaisesRegexp(ValueError, 'out of range \[0, 62\]'):
list(_fastq_to_generator(fp, variant='illumina1.8'))
def test_fastq_to_generator_solexa(self):
# solexa support isn't implemented yet. should raise error even with
# valid solexa file
with self.assertRaises(NotImplementedError):
list(_fastq_to_generator(
get_data_path('solexa_full_range_original_solexa.fastq'),
variant='solexa'))
def test_fastq_to_sequence(self):
for constructor in [Sequence, DNA, RNA, Protein]:
for valid_files, kwargs, components in self.valid_configurations:
for valid in valid_files:
# skip empty file case since we cannot read a specific
# sequencefrom an empty file
if len(components) == 0:
continue
for observed_kwargs in kwargs:
expected_kwargs = {}
# TODO:
# some of the test files contain characters which are
# invalid for RNA, so don't validate for now. Need to
# fix this
if constructor is RNA:
observed_kwargs['validate'] = False
expected_kwargs['validate'] = False
_drop_kwargs(observed_kwargs, 'constructor')
# Can't use partials for this because the read
# function below can't operate on partials
if hasattr(constructor, 'lowercase'):
expected_kwargs['lowercase'] = 'introns'
observed_kwargs['lowercase'] = 'introns'
seq_num = observed_kwargs.get('seq_num', 1)
c = components[seq_num - 1]
expected = \
constructor(
c[2], metadata={'id': c[0],
'description': c[1]},
positional_metadata={'quality': np.array(c[3],
dtype=np.uint8)},
**expected_kwargs)
observed = read(valid, into=constructor,
format='fastq', verify=False,
**observed_kwargs)
self.assertEqual(observed, expected)
def test_fastq_to_sequence_collection(self):
for valid_files, kwargs, components in self.valid_configurations:
for valid in valid_files:
for observed_kwargs in kwargs:
_drop_kwargs(observed_kwargs, 'seq_num')
constructor = observed_kwargs.get('constructor', Sequence)
# Can't use partials for this because the read
# function below can't operate on partials
expected_kwargs = {}
if hasattr(constructor, 'lowercase'):
expected_kwargs['lowercase'] = 'introns'
observed_kwargs['lowercase'] = 'introns'
expected = SequenceCollection(
[constructor(
c[2], metadata={'id': c[0], 'description': c[1]},
positional_metadata={'quality': np.array(c[3],
np.uint8)},
**expected_kwargs)
for c in components])
observed = _fastq_to_sequence_collection(valid,
**observed_kwargs)
self.assertEqual(observed, expected)
def test_fastq_to_alignment(self):
for valid_files, kwargs, components in self.valid_configurations:
for valid in valid_files:
for observed_kwargs in kwargs:
_drop_kwargs(observed_kwargs, 'seq_num')
constructor = observed_kwargs.get('constructor', Sequence)
# Can't use partials for this because the read
# function below can't operate on partials
expected_kwargs = {}
if hasattr(constructor, 'lowercase'):
expected_kwargs['lowercase'] = 'introns'
observed_kwargs['lowercase'] = 'introns'
expected = Alignment(
[constructor(
c[2], metadata={'id': c[0],
'description': c[1]},
positional_metadata={'quality': np.array(c[3],
dtype=np.uint8)},
**expected_kwargs)
for c in components])
observed = _fastq_to_alignment(valid, **observed_kwargs)
self.assertEqual(observed, expected)
class TestWriters(unittest.TestCase):
def setUp(self):
self.valid_files = [
([
('f o o', 'bar\n\nbaz', 'AaCcGg',
[16, 17, 18, 19, 20, 21]),
('bar', 'baz foo', 'TtGgCc',
[23, 22, 21, 20, 19, 18]),
('ba\n\t\tz', 'foo bar', 'gAtTtC',
[20, 21, 22, 23, 24, 18])
], [
({'variant': 'sanger'},
get_data_path('fastq_writer_sanger_defaults')),
({'phred_offset': 33},
get_data_path('fastq_writer_sanger_defaults')),
({'variant': 'illumina1.8'},
get_data_path('fastq_writer_sanger_defaults')),
({'variant': 'illumina1.3'},
get_data_path('fastq_writer_illumina1.3_defaults')),
({'variant': 'sanger', 'id_whitespace_replacement': '%',
'description_newline_replacement': '^'},
get_data_path('fastq_writer_sanger_non_defaults'))
]),
]
def test_generator_to_fastq_kwargs_passed(self):
for components, kwargs_expected_fp in self.valid_files:
for kwargs, expected_fp in kwargs_expected_fp:
def gen():
for c in components:
yield Sequence(
c[2], metadata={'id': c[0], 'description': c[1]},
positional_metadata={'quality': c[3]})
fh = StringIO()
_generator_to_fastq(gen(), fh, **kwargs)
observed = fh.getvalue()
fh.close()
with open(expected_fp, 'U') as f:
expected = f.read()
self.assertEqual(observed, expected)
def test_sequence_to_fastq_kwargs_passed(self):
for constructor in [Sequence, DNA, RNA, Protein]:
for components, kwargs_expected_fp in self.valid_files:
for expected_kwargs, expected_fp in kwargs_expected_fp:
observed_kwargs = {}
# TODO:
# some of the test files contain characters which are
# invalid for RNA, so don't validate for now. Need to
# fix this
if constructor is RNA:
observed_kwargs['validate'] = False
# Can't use partials for this because the read
# function below can't operate on partials
if hasattr(constructor, 'lowercase'):
expected_kwargs['lowercase'] = 'introns'
observed_kwargs['lowercase'] = 'introns'
fh = StringIO()
for c in components:
obj = constructor(
c[2],
metadata={'id': c[0], 'description': c[1]},
positional_metadata={'quality': c[3]},
**observed_kwargs)
write(obj, into=fh, format='fastq', **expected_kwargs)
observed = fh.getvalue()
fh.close()
with open(expected_fp, 'U') as f:
expected = f.read()
self.assertEqual(observed, expected)
def test_sequence_collection_to_fastq_kwargs_passed(self):
for components, kwargs_expected_fp in self.valid_files:
for kwargs, expected_fp in kwargs_expected_fp:
obj = SequenceCollection([
DNA(c[2], metadata={'id': c[0], 'description': c[1]},
positional_metadata={'quality': c[3]},
lowercase='introns')
for c in components])
fh = StringIO()
kwargs['lowercase'] = 'introns'
_sequence_collection_to_fastq(obj, fh, **kwargs)
observed = fh.getvalue()
fh.close()
with open(expected_fp, 'U') as f:
expected = f.read()
self.assertEqual(observed, expected)
def test_alignment_to_fastq_kwargs_passed(self):
for components, kwargs_expected_fp in self.valid_files:
for kwargs, expected_fp in kwargs_expected_fp:
obj = Alignment([
Protein(c[2], metadata={'id': c[0], 'description': c[1]},
positional_metadata={'quality': c[3]},
lowercase='introns')
for c in components])
fh = StringIO()
kwargs['lowercase'] = 'introns'
_alignment_to_fastq(obj, fh, **kwargs)
observed = fh.getvalue()
fh.close()
with open(expected_fp, 'U') as f:
expected = f.read()
self.assertEqual(observed, expected)
def test_generator_to_fastq_no_qual(self):
def gen():
yield Sequence('ACGT',
metadata={'id': 'foo', 'description': 'bar'},
positional_metadata={'quality': range(4)})
yield Sequence('ACG', metadata={'id': 'foo', 'description': 'bar'})
with self.assertRaisesRegexp(ValueError, '2nd.*quality scores'):
_generator_to_fastq(gen(), StringIO(), variant='illumina1.8')
class TestConversions(unittest.TestCase):
def setUp(self):
self.conversions = [
(get_data_path('empty'),
get_data_path('empty'), [
({'variant': 'sanger'}, {'phred_offset': 42}),
]),
(get_data_path('longreads_original_sanger.fastq'),
get_data_path('longreads_as_sanger.fastq'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('longreads_original_sanger.fastq'),
get_data_path('longreads_as_illumina.fastq'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
(get_data_path('wrapping_original_sanger.fastq'),
get_data_path('wrapping_as_sanger.fastq'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('wrapping_original_sanger.fastq'),
get_data_path('wrapping_as_illumina.fastq'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
(get_data_path('sanger_full_range_original_sanger.fastq'),
get_data_path('sanger_full_range_as_sanger.fastq'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('sanger_full_range_original_sanger.fastq'),
get_data_path('sanger_full_range_as_illumina.fastq'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
(get_data_path('illumina_full_range_original_illumina.fastq'),
get_data_path('illumina_full_range_as_illumina.fastq'), [
({'variant': 'illumina1.3'}, {'variant': 'illumina1.3'}),
({'phred_offset': 64}, {'variant': 'illumina1.3'}),
({'variant': 'illumina1.3'}, {'phred_offset': 64})
]),
(get_data_path('illumina_full_range_original_illumina.fastq'),
get_data_path('illumina_full_range_as_sanger.fastq'), [
({'variant': 'illumina1.3'}, {'variant': 'sanger'}),
({'phred_offset': 64}, {'variant': 'sanger'}),
({'variant': 'illumina1.3'}, {'phred_offset': 33})
]),
(get_data_path('misc_dna_original_sanger.fastq'),
get_data_path('misc_dna_as_sanger.fastq'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('misc_dna_original_sanger.fastq'),
get_data_path('misc_dna_as_illumina.fastq'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
(get_data_path('misc_rna_original_sanger.fastq'),
get_data_path('misc_rna_as_sanger.fastq'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('misc_rna_original_sanger.fastq'),
get_data_path('misc_rna_as_illumina.fastq'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
(get_data_path('fastq_wrapping_original_sanger_no_description'),
get_data_path('fastq_wrapping_as_sanger_no_description'), [
({'variant': 'sanger'}, {'variant': 'sanger'}),
({'phred_offset': 33}, {'variant': 'sanger'}),
({'variant': 'sanger'}, {'phred_offset': 33})
]),
(get_data_path('fastq_wrapping_original_sanger_no_description'),
get_data_path('fastq_wrapping_as_illumina_no_description'), [
({'variant': 'sanger'}, {'variant': 'illumina1.3'}),
({'phred_offset': 33}, {'variant': 'illumina1.3'}),
({'variant': 'sanger'}, {'phred_offset': 64})
]),
]
def test_conversion(self):
for from_fp, to_fp, kwargs in self.conversions:
for from_kwargs, to_kwargs in kwargs:
read_gen = _fastq_to_generator(from_fp, **from_kwargs)
fh = StringIO()
# will issue warning when truncating quality scores
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore")
_generator_to_fastq(read_gen, fh, **to_kwargs)
obs = fh.getvalue()
fh.close()
with open(to_fp, 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
if __name__ == '__main__':
unittest.main()
| Achuth17/scikit-bio | skbio/io/tests/test_fastq.py | Python | bsd-3-clause | 30,554 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserInfo.avatar'
db.add_column('canvas_userinfo', 'avatar', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['canvas.Content'], null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserInfo.avatar'
db.delete_column('canvas_userinfo', 'avatar_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'epic_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.commentstickerlog': {
'Meta': {'object_name': 'CommentStickerLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'avatar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']", 'null': 'True'}),
'bio_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'enable_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'enable_timeline_posts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile_image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']", 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| canvasnetworks/canvas | website/canvas/migrations/0163_auto__add_field_userinfo_avatar.py | Python | bsd-3-clause | 21,325 |
from statsmodels.compat.python import lzip
import numpy as np
from scipy import stats
from statsmodels.distributions import ECDF
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import add_constant
from . import utils
__all__ = ["qqplot", "qqplot_2samples", "qqline", "ProbPlot"]
class ProbPlot(object):
"""
Q-Q and P-P Probability Plots
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array_like
A 1d data array
dist : callable
Compare x against dist. A scipy.stats or statsmodels distribution. The
default is scipy.stats.distributions.norm (a standard normal). Can be
a SciPy frozen distribution.
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist are fit
automatically using dist.fit. The quantiles are formed from the
standardized data, after subtracting the fitted loc and dividing by
the fitted scale. fit cannot be used if dist is a SciPy frozen
distribution.
distargs : tuple
A tuple of arguments passed to dist to specify it fully so dist.ppf
may be called. distargs must not contain loc or scale. These values
must be passed using the loc or scale inputs. distargs cannot be used
if dist is a SciPy frozen distribution.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by
(i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1)
loc : float
Location parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
scale : float
Scale parameter for dist. Cannot be used if dist is a SciPy frozen
distribution.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `fit` is True then the parameters are fit using the
distribution's `fit()` method.
3) The call signatures for the `qqplot`, `ppplot`, and `probplot`
methods are similar, so examples 1 through 4 apply to all
three methods.
4) The three plotting methods are summarized below:
ppplot : Probability-Probability plot
Compares the sample and theoretical probabilities (percentiles).
qqplot : Quantile-Quantile plot
Compares the sample and theoretical quantiles
probplot : Probability plot
Same as a Q-Q plot, however probabilities are shown in the scale of
the theoretical distribution (x-axis) and the y-axis contains
unscaled quantiles of the sample data.
Examples
--------
The first example shows a Q-Q plot for regression residuals
>>> # example 1
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> model = sm.OLS(data.endog, data.exog)
>>> mod_fit = model.fit()
>>> res = mod_fit.resid # residuals
>>> pplot = sm.ProbPlot(res)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 1 - qqplot - residuals of OLS fit")
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4
degrees of freedom:
>>> # example 2
>>> import scipy.stats as stats
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,))
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 2 - qqplot - residuals against quantiles of t-dist")
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> # example 3
>>> pplot = sm.ProbPlot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> fig = pplot.qqplot()
>>> h = plt.title("Ex. 3 - qqplot - resids vs quantiles of t-dist")
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> # example 4
>>> pplot = sm.ProbPlot(res, stats.t, fit=True)
>>> fig = pplot.qqplot(line="45")
>>> h = plt.title("Ex. 4 - qqplot - resids vs. quantiles of fitted t-dist")
>>> plt.show()
A second `ProbPlot` object can be used to compare two separate sample
sets by using the `other` kwarg in the `qqplot` and `ppplot` methods.
>>> # example 5
>>> import numpy as np
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=37)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> h = plt.title("Ex. 5 - qqplot - compare two sample sets")
>>> plt.show()
In qqplot, sample size of `other` can be equal or larger than the first.
In case of larger, size of `other` samples will be reduced to match the
size of the first by interpolation
>>> # example 6
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line="45", other=pp_y)
>>> title = "Ex. 6 - qqplot - compare different sample sizes"
>>> h = plt.title(title)
>>> plt.show()
In ppplot, sample size of `other` and the first can be different. `other`
will be used to estimate an empirical cumulative distribution function
(ECDF). ECDF(x) will be plotted against p(x)=0.5/n, 1.5/n, ..., (n-0.5)/n
where x are sorted samples from the first.
>>> # example 7
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=57)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> pp_y.ppplot(line="45", other=pp_x)
>>> plt.title("Ex. 7A- ppplot - compare two sample sets, other=pp_x")
>>> pp_x.ppplot(line="45", other=pp_y)
>>> plt.title("Ex. 7B- ppplot - compare two sample sets, other=pp_y")
>>> plt.show()
The following plot displays some options, follow the link to see the
code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
def __init__(
self,
data,
dist=stats.norm,
fit=False,
distargs=(),
a=0,
loc=0,
scale=1,
):
self.data = data
self.a = a
self.nobs = data.shape[0]
self.distargs = distargs
self.fit = fit
self._is_frozen = isinstance(dist, stats.distributions.rv_frozen)
if self._is_frozen and (
fit or loc != 0 or scale != 1 or distargs != ()
):
raise ValueError(
"Frozen distributions cannot be combined with fit, loc, scale"
" or distargs."
)
# propertes
self._cache = {}
if self._is_frozen:
self.dist = dist
dist_gen = dist.dist
shapes = dist_gen.shapes
if shapes is not None:
shape_args = tuple(map(str.strip, shapes.split(",")))
else:
shape_args = ()
numargs = len(shape_args)
args = dist.args
if len(args) >= numargs + 1:
self.loc = args[numargs]
else:
self.loc = dist.kwds.get("loc", loc)
if len(args) >= numargs + 2:
self.scale = args[numargs + 1]
else:
self.scale = dist.kwds.get("scale", scale)
fit_params = []
for i, arg in enumerate(shape_args):
if arg in dist.kwds:
value = dist.kwds[arg]
else:
value = dist.args[i]
fit_params.append(value)
self.fit_params = np.r_[fit_params, self.loc, self.scale]
elif fit:
self.fit_params = dist.fit(data)
self.loc = self.fit_params[-2]
self.scale = self.fit_params[-1]
if len(self.fit_params) > 2:
self.dist = dist(*self.fit_params[:-2], **dict(loc=0, scale=1))
else:
self.dist = dist(loc=0, scale=1)
elif distargs or loc != 0 or scale != 1:
try:
self.dist = dist(*distargs, **dict(loc=loc, scale=scale))
except Exception:
distargs = ", ".join([str(da) for da in distargs])
cmd = "dist({distargs}, loc={loc}, scale={scale})"
cmd = cmd.format(distargs=distargs, loc=loc, scale=scale)
raise TypeError(
"Initializing the distribution failed. This "
"can occur if distargs contains loc or scale. "
"The distribution initialization command "
"is:\n{cmd}".format(cmd=cmd)
)
self.loc = loc
self.scale = scale
self.fit_params = np.r_[distargs, loc, scale]
else:
self.dist = dist
self.loc = loc
self.scale = scale
self.fit_params = np.r_[loc, scale]
@cache_readonly
def theoretical_percentiles(self):
"""Theoretical percentiles"""
return plotting_pos(self.nobs, self.a)
@cache_readonly
def theoretical_quantiles(self):
"""Theoretical quantiles"""
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = "%s requires more parameters to compute ppf".format(
self.dist.name,
)
raise TypeError(msg)
except Exception as exc:
msg = "failed to compute the ppf of {0}".format(self.dist.name)
raise type(exc)(msg)
@cache_readonly
def sorted_data(self):
"""sorted data"""
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data
@cache_readonly
def sample_quantiles(self):
"""sample quantiles"""
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data - self.loc) / self.scale
else:
return self.sorted_data
@cache_readonly
def sample_percentiles(self):
"""Sample percentiles"""
_check_for(self.dist, "cdf")
if self._is_frozen:
return self.dist.cdf(self.sorted_data)
quantiles = (self.sorted_data - self.fit_params[-2]) / self.fit_params[
-1
]
return self.dist.cdf(quantiles)
def ppplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
**plotkwargs,
):
"""
Plot of the percentiles of x versus the percentiles of a distribution.
Parameters
----------
xlabel : str or None, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : str or None, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45": 45-degree line
- "s": standardized line, the expected order statistics are
scaled by the standard deviation of the given sample and have
the mean added to them
- "r": A regression line is fit
- "q": A line is fit through the quartiles.
- None: by default no reference line is added to the plot.
other : ProbPlot, array_like, or None, optional
If provided, ECDF(x) will be plotted against p(x) where x are
sorted samples from `self`. ECDF is an empirical cumulative
distribution function estimated from `other` and
p(x) = 0.5/n, 1.5/n, ..., (n-0.5)/n where n is the number of
samples in `self`. If an array-object is provided, it will be
turned into a `ProbPlot` instance default parameters. If not
provided (default), `self.dist(x)` is be plotted against p(x).
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
p_x = self.theoretical_percentiles
ecdf_x = ECDF(other.sample_quantiles)(self.sample_quantiles)
fig, ax = _do_plot(
p_x, ecdf_x, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Probabilities of 2nd Sample"
if ylabel is None:
ylabel = "Probabilities of 1st Sample"
else:
fig, ax = _do_plot(
self.theoretical_percentiles,
self.sample_percentiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Probabilities"
if ylabel is None:
ylabel = "Sample Probabilities"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
return fig
def qqplot(
self,
xlabel=None,
ylabel=None,
line=None,
other=None,
ax=None,
swap: bool = False,
**plotkwargs,
):
"""
Plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can also be used to plot against the quantiles of another `ProbPlot`
instance.
Parameters
----------
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : {ProbPlot, array_like, None}, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. Sample size of `other` must be equal or larger than
this `ProbPlot` instance. If the sample size is larger, sample
quantiles of `other` will be interpolated to match the sample size
of this `ProbPlot` instance. If an array-like object is provided,
it will be turned into a `ProbPlot` instance using default
parameters. If not provided (default), the theoretical quantiles
are used.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
swap : bool, optional
Flag indicating to swap the x and y labels.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
s_self = self.sample_quantiles
s_other = other.sample_quantiles
if len(s_self) > len(s_other):
raise ValueError(
"Sample size of `other` must be equal or "
+ "larger than this `ProbPlot` instance"
)
elif len(s_self) < len(s_other):
# Use quantiles of the smaller set and interpolate quantiles of
# the larger data set
p = plotting_pos(self.nobs, self.a)
s_other = stats.mstats.mquantiles(s_other, p)
fig, ax = _do_plot(
s_other, s_self, self.dist, ax=ax, line=line, **plotkwargs
)
if xlabel is None:
xlabel = "Quantiles of 2nd Sample"
if ylabel is None:
ylabel = "Quantiles of 1st Sample"
if swap:
xlabel, ylabel = ylabel, xlabel
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sample_quantiles,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Theoretical Quantiles"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def probplot(
self,
xlabel=None,
ylabel=None,
line=None,
exceed=False,
ax=None,
**plotkwargs,
):
"""
Plot of unscaled quantiles of x against the prob of a distribution.
The x-axis is scaled linearly with the quantiles, but the probabilities
are used to label the axis.
Parameters
----------
xlabel : {None, str}, optional
User-provided labels for the x-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
ylabel : {None, str}, optional
User-provided labels for the y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : {None, "45", "s", "r", q"}, optional
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
exceed : bool, optional
If False (default) the raw sample quantiles are plotted against
the theoretical quantiles, show the probability that a sample will
not exceed a given value. If True, the theoretical quantiles are
flipped such that the figure displays the probability that a
sample will exceed a given value.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs
Additional arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if exceed:
fig, ax = _do_plot(
self.theoretical_quantiles[::-1],
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Probability of Exceedance (%)"
else:
fig, ax = _do_plot(
self.theoretical_quantiles,
self.sorted_data,
self.dist,
ax=ax,
line=line,
**plotkwargs,
)
if xlabel is None:
xlabel = "Non-exceedance Probability (%)"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
_fmt_probplot_axis(ax, self.dist, self.nobs)
return fig
def qqplot(
data,
dist=stats.norm,
distargs=(),
a=0,
loc=0,
scale=1,
fit=False,
line=None,
ax=None,
**plotkwargs,
):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under Parameters.)
Parameters
----------
data : array_like
A 1d data array.
dist : callable
Comparison distribution. The default is
scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
loc : float
Location parameter for dist
scale : float
Scale parameter for dist
fit : bool
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : {None, "45", "s", "r", "q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
**plotkwargs
Additional matplotlib arguments to be passed to the `plot` command.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, exog).fit()
>>> res = mod_fit.resid # residuals
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4 degrees
of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line="45")
>>> plt.show()
The following plot displays some options, follow the link to see the code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
probplot = ProbPlot(
data, dist=dist, distargs=distargs, fit=fit, a=a, loc=loc, scale=scale
)
fig = probplot.qqplot(ax=ax, line=line, **plotkwargs)
return fig
def qqplot_2samples(
data1, data2, xlabel=None, ylabel=None, line=None, ax=None
):
"""
Q-Q Plot of two samples' quantiles.
Can take either two `ProbPlot` instances or two array-like objects. In the
case of the latter, both inputs will be converted to `ProbPlot` instances
using only the default values - so use `ProbPlot` instances if
finer-grained control of the quantile computations is required.
Parameters
----------
data1 : {array_like, ProbPlot}
Data to plot along x axis. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
data2 : {array_like, ProbPlot}
Data to plot along y axis. Does not need to have the same number of
observations as data 1. If the sample sizes are unequal, the longer
series is always plotted along the x-axis.
xlabel : {None, str}
User-provided labels for the x-axis. If None (default),
other values are used.
ylabel : {None, str}
User-provided labels for the y-axis. If None (default),
other values are used.
line : {None, "45", "s", "r", q"}
Options for the reference line to which the data is compared:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `data1` and `data2` are not `ProbPlot` instances, instances will be
created using the default parameters. Therefore, it is recommended to use
`ProbPlot` instance if fine-grained control is needed in the computation
of the quantiles.
Examples
--------
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqplot_2samples
>>> x = np.random.normal(loc=8.5, scale=2.5, size=37)
>>> y = np.random.normal(loc=8.0, scale=3.0, size=37)
>>> pp_x = sm.ProbPlot(x)
>>> pp_y = sm.ProbPlot(y)
>>> qqplot_2samples(pp_x, pp_y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_2samples.py
>>> fig = qqplot_2samples(pp_x, pp_y, xlabel=None, ylabel=None,
... line=None, ax=None)
"""
if not isinstance(data1, ProbPlot):
data1 = ProbPlot(data1)
if not isinstance(data2, ProbPlot):
data2 = ProbPlot(data2)
if data2.data.shape[0] > data1.data.shape[0]:
fig = data1.qqplot(
xlabel=ylabel, ylabel=xlabel, line=line, other=data2, ax=ax
)
else:
fig = data2.qqplot(
xlabel=ylabel,
ylabel=xlabel,
line=line,
other=data1,
ax=ax,
swap=True,
)
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt="r-", **lineoptions):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {"45","r","s","q"}
Options for the reference line to which the data is compared.:
- "45" - 45-degree line
- "s" - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- "r" - A regression line is fit
- "q" - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : ndarray
X data for plot. Not needed if line is "45".
y : ndarray
Y data for plot. Not needed if line is "45".
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is "q".
fmt : str, optional
Line format string passed to `plot`.
**lineoptions
Additional arguments to be passed to the `plot` command.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
Examples
--------
Import the food expenditure dataset. Plot annual food expenditure on x-axis
and household income on y-axis. Use qqline to add regression line into the
plot.
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from statsmodels.graphics.gofplots import qqline
>>> foodexp = sm.datasets.engel.load()
>>> x = foodexp.exog
>>> y = foodexp.endog
>>> ax = plt.subplot(111)
>>> plt.scatter(x, y)
>>> ax.set_xlabel(foodexp.exog_name[0])
>>> ax.set_ylabel(foodexp.endog_name)
>>> qqline(ax, "r", x, y)
>>> plt.show()
.. plot:: plots/graphics_gofplots_qqplot_qqline.py
"""
lineoptions = lineoptions.copy()
for ls in ("-", "--", "-.", ":"):
if ls in fmt:
lineoptions.setdefault("linestyle", ls)
fmt = fmt.replace(ls, "")
break
for marker in (
".",
",",
"o",
"v",
"^",
"<",
">",
"1",
"2",
"3",
"4",
"8",
"s",
"p",
"P",
"*",
"h",
"H",
"+",
"x",
"X",
"D",
"d",
"|",
"_",
):
if marker in fmt:
lineoptions.setdefault("marker", marker)
fmt = fmt.replace(marker, "")
break
if fmt:
lineoptions.setdefault("color", fmt)
if line == "45":
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, **lineoptions)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None or y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
x = np.array(x)
y = np.array(y)
if line == "r":
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are "clean"
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x, y, **lineoptions)
elif line == "s":
m, b = np.std(y), np.mean(y)
ref_line = x * m + b
ax.plot(x, ref_line, **lineoptions)
elif line == "q":
_check_for(dist, "ppf")
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m * theoretical_quartiles[0]
ax.plot(x, m * x + b, **lineoptions)
# about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a=0.0, b=None):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float, default 0.0
alpha parameter for the plotting position of an expected order
statistic
b : float, default None
beta parameter for the plotting position of an expected order
statistic. If None, then b is set to a.
Returns
-------
ndarray
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs + 1 - a - b) for i in
range(1, nobs+1)
See Also
--------
scipy.stats.mstats.plotting_positions
Additional information on alpha and beta
"""
b = a if b is None else b
return (np.arange(1.0, nobs + 1) - a) / (nobs + 1 - a - b)
def _fmt_probplot_axis(ax, dist, nobs):
"""
Formats a theoretical quantile axis to display the corresponding
probabilities on the quantiles' scale.
Parameters
----------
ax : AxesSubplot, optional
The axis to be formatted
nobs : scalar
Number of observations in the sample
dist : scipy.stats.distribution
A scipy.stats distribution sufficiently specified to implement its
ppf() method.
Returns
-------
There is no return value. This operates on `ax` in place
"""
_check_for(dist, "ppf")
axis_probs = np.linspace(10, 90, 9, dtype=float)
small = np.array([1.0, 2, 5])
axis_probs = np.r_[small, axis_probs, 100 - small[::-1]]
if nobs >= 50:
axis_probs = np.r_[small / 10, axis_probs, 100 - small[::-1] / 10]
if nobs >= 500:
axis_probs = np.r_[small / 100, axis_probs, 100 - small[::-1] / 100]
axis_probs /= 100.0
axis_qntls = dist.ppf(axis_probs)
ax.set_xticks(axis_qntls)
ax.set_xticklabels(
axis_probs * 100,
rotation=45,
rotation_mode="anchor",
horizontalalignment="right",
verticalalignment="center",
)
ax.set_xlim([axis_qntls.min(), axis_qntls.max()])
def _do_plot(
x, y, dist=None, line=None, ax=None, fmt="b", step=False, **kwargs
):
"""
Boiler plate plotting function for the `ppplot`, `qqplot`, and
`probplot` methods of the `ProbPlot` class
Parameters
----------
x : array_like
X-axis data to be plotted
y : array_like
Y-axis data to be plotted
dist : scipy.stats.distribution
A scipy.stats distribution, needed if `line` is "q".
line : {"45", "s", "r", "q", None}, default None
Options for the reference line to which the data is compared.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
fmt : str, optional
matplotlib-compatible formatting string for the data markers
kwargs : keywords
These are passed to matplotlib.plot
Returns
-------
fig : Figure
The figure containing `ax`.
ax : AxesSubplot
The original axes if provided. Otherwise a new instance.
"""
plot_style = {
"marker": "o",
"markerfacecolor": "C0",
"markeredgecolor": "C0",
"linestyle": "none",
}
plot_style.update(**kwargs)
where = plot_style.pop("where", "pre")
fig, ax = utils.create_mpl_ax(ax)
ax.set_xmargin(0.02)
if step:
ax.step(x, y, fmt, where=where, **plot_style)
else:
ax.plot(x, y, fmt, **plot_style)
if line:
if line not in ["r", "q", "45", "s"]:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, x=x, y=y, dist=dist)
return fig, ax
def _check_for(dist, attr="ppf"):
if not hasattr(dist, attr):
raise AttributeError(f"distribution must have a {attr} method")
| statsmodels/statsmodels | statsmodels/graphics/gofplots.py | Python | bsd-3-clause | 35,607 |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Define some categories
categories = [
'ousia', 'poson', 'poion', 'pros ti', 'pou',
'pote', 'keisthai', 'echein', 'poiein', 'paschein',
]
# Create data
N = 10
data = { cat : np.random.randint(10, 100, size=N) for cat in categories }
# Define a little function to stack series together to make polygons. Soon
# this will be built into Bokeh.
def stacked(data, categories):
ys = []
last = np.zeros(len(data.values()[0]))
for cat in categories:
next = last + data[cat]
ys.append(np.hstack((last[::-1], next)))
last = next
return ys
# Get the y coordinates of the stacked data
ys = stacked(data, categories)
# The x coordinates for each polygon are simply the series concatenated
# with its reverse.
xs = [np.hstack((categories[::-1], categories))] * len(ys)
# Pick out a color palette
colors = brewer["Spectral"][len(ys)]
# EXERCISE: output static HTML file
# EXERCISE: play around with parameters like:
# - line_color
# - line_alpha
# - line_width
# - line_dash (e.g., [2,4])
# - fill_color
# - fill_alpha
# - background_fill
patches(xs, ys, x_range=categories, y_range=[0, 800],
color=colors, alpha=0.8, line_color=None, background_fill="lightgrey",
title="Categories of Brewering")
# EXERCISE: configure all of the following plot properties
ygrid().grid_line_color = # color, or None, to suppress the line
ygrid().grid_line_width = # line width for grid lines
axis().major_label_text_font_size = # "12pt", "1.5em", "10px", etc
axis().major_label_text_font_style = # "bold", "normal", "italic"
axis().major_label_standoff = # distance of tick labels from ticks
axis().axis_line_color = # color, or None, to suppress the line
xaxis().major_label_orientation = # radians, "horizontal", "vertical", "normal"
xaxis().major_tick_in = # distance ticks extends into the plot
xaxis().major_tick_out = # and distance they extend out
xaxis().major_tick_line_color = # color, or None, to suppress the line
show() | jakevdp/bokeh | sphinx/source/tutorial/exercises/style.py | Python | bsd-3-clause | 2,134 |
"""A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <[email protected]>
# Hari Bharadwaj <[email protected]>
# Clement Moutard <[email protected]>
# Jean-Remi King <[email protected]>
#
# License : BSD (3-clause)
from copy import deepcopy
from functools import partial
from math import sqrt
import numpy as np
from scipy import linalg
from scipy.fftpack import fft, ifft
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask, check_fname, sizeof_fmt
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..channels.layout import _pair_grad_sensors
from ..io.pick import pick_info, pick_types
from ..io.meas_info import Info
from ..utils import SizeMixin
from .multitaper import dpss_windows
from ..viz.utils import figure_nobar, plt_show, _setup_cmap
from ..externals.h5io import write_hdf5, read_hdf5
from ..externals.six import string_types
# Make wavelet
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float, defaults to 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, defaults to None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), defaults to 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, defaults to 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , defaults to False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _cwt(X, Ws, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
if mode not in ['same', 'valid', 'full']:
raise ValueError("`mode` must be 'same', 'valid' or 'full', "
"got %s instead." % mode)
if mode == 'full' and (not use_fft):
# XXX JRK: full wavelet decomposition needs to be implemented
raise ValueError('`full` decomposition with convolution is currently' +
' not supported.')
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter '
'wavelets.')
if use_fft:
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
ret = np.convolve(x, W, mode=mode)
# Center and decimate decomposition
if mode == "valid":
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, frequencies, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
frequencies : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, defaults to 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', defaults to 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses Morlet wavelets windowed with multiple DPSS
multitapers.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, defaults to None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, defaults to None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, defaults to True
Use the FFT for convolutions or not.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int, defaults to 1
The number of epochs to process at the same time. The parallelization
is implemented across channels.
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape '
'(n_epochs, n_chans, n_times)')
# Check params
frequencies, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(frequencies, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
# Setup wavelet
if method == 'morlet':
W = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
decim = _check_decim(decim)
n_freqs = len(frequencies)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(frequencies, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check frequencies
if not isinstance(frequencies, (list, np.ndarray)):
raise ValueError('frequencies must be an array-like, got %s '
'instead.' % type(frequencies))
frequencies = np.asarray(frequencies, dtype=float)
if frequencies.ndim != 1:
raise ValueError('frequencies must be of shape (n_freqs,), got %s '
'instead.' % np.array(frequencies.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
frequencies = np.asarray(frequencies)
if (method == 'multitaper') and (output == 'phase'):
raise NotImplementedError(
'This function is not optimized to compute the phase using the '
'multitaper method. Use np.angle of the complex output instead.')
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(frequencies):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(frequencies), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
allowed_ouput = ('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')
if output not in allowed_ouput:
raise ValueError("Unknown output type. Allowed are %s but "
"got %s." % (allowed_ouput, output))
if method not in ('multitaper', 'morlet'):
raise ValueError('method must be "morlet" or "multitaper", got %s '
'instead.' % type(method))
return frequencies, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
"""
# Set output type
dtype = np.float
if output in ['complex', 'avg_power_itc']:
dtype = np.complex
# Init outputs
decim = _check_decim(decim)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for W in Ws:
coefs = _cwt(X, W, mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
tfrs /= len(Ws)
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
`use_fft=False`. Defaults to 'same'.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
Defaults to 1.
Returns
-------
tfr : array, shape (n_signals, n_frequencies, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
decim = _check_decim(decim)
n_signals, n_times = X[:, decim].shape
coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft)
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
**tfr_params):
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[:, picks, :]
if average:
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power'
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=1, picks=None, zero_mean=True, average=True,
verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool, defaults to False
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
channels are decomposed.
zero_mean : bool, defaults to True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, frequencies, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=1, verbose=None):
"""Compute time-frequency transform using Morlet wavelets.
Convolves epoch data with selected Morlet wavelets.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
frequencies : array-like of floats, shape (n_freqs)
The frequencies.
n_cycles : float | array of float, defaults to 7.0
Number of cycles in the Morlet wavelet. Fixed number or one per
frequency.
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. Defaults to False.
use_fft : bool
Use the FFT for convolutions or not. Defaults to True.
decim : int | slice
To reduce memory usage, decimation factor after time-frequency
decomposition. Defaults to 1
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, defaults to 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
n_jobs : int
The number of epochs to process at the same time. The parallelization
is implemented across channels. Defaults to 1
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, frequencies=frequencies,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=1, picks=None, average=True, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional), defaults to 4.0 (3 good tapers).
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool, defaults to True
The fft based convolution or not.
return_itc : bool, defaults to True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
decim : int | slice, defaults to 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
n_jobs : int, defaults to 1
The number of jobs to run in parallel.
picks : array-like of int | None, defaults to None
The indices of the channels to decompose. If None, all available
channels are decomposed.
average : bool, defaults to True
If True average across Epochs.
.. versionadded:: 0.13.0
verbose : bool, str, int, or None, defaults to None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin):
"""Base TFR class."""
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
mask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
self.times = self.times[mask]
self.data = self.data[..., mask]
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
self.data = rescale(self.data, self.times, baseline, mode,
copy=False)
return self
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks, baseline=None, mode='mean', tmin=None, tmax=None,
fmin=None, fmax=None, vmin=None, vmax=None, cmap='RdBu_r',
dB=False, colorbar=True, show=True, title=None, axes=None,
layout=None, yscale='auto', verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
picks : array-like of int
The indices of the channels to plot, one figure per channel.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, info['sfreq'])
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
cmap = _setup_cmap(cmap)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, onselect_callback,
ylim=None, tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=colorbar, cmap=cmap, yscale=yscale)
if title:
fig.suptitle(title)
plt_show(show)
return fig
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Handle rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000., 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000., 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{0:.2f} s - {1:.2f} s, {2:.2f} Hz - {3:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, layout=layout,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None,
axes=ax)
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto'):
"""Plot TFRs in a topography with images.
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None, all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode, layout=layout)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap=None,
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : None | 'ratio' | 'zscore' | 'mean' | 'percent' | 'logratio' | 'zlogratio'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
class EpochsTFR(_BaseTFR):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, defaults to None
Comment on the data, e.g., the experimental condition.
method : str | None, defaults to None
Comment on the method used to compute the data, e.g., morlet wavelet.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Attributes
----------
ch_names : list
The names of the channels.
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None,
method=None, verbose=None): # noqa: D102
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = np.array(times, dtype=float)
self.freqs = np.array(freqs, dtype=float)
self.comment = comment
self.method = method
self.preload = True
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def average(self):
"""Average the data across epochs.
Returns
-------
ave : instance of AverageTFR
The averaged data.
"""
data = np.mean(self.data, axis=0)
return AverageTFR(info=self.info.copy(), data=data,
times=self.times.copy(), freqs=self.freqs.copy(),
nave=self.data.shape[0],
method=self.method)
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, string_types):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks):
"""Prepare the picks."""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if np.array_equal(picks, np.arange(len(data))):
picks = slice(None)
else:
info = pick_info(info, picks)
return info, data, picks
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq):
"""Aux Function to prepare tfr computation."""
from ..viz.utils import _setup_vmin_vmax
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
if isinstance(decim, int):
decim = slice(None, None, decim)
elif not isinstance(decim, slice):
raise(TypeError, '`decim` must be int or slice, got %s instead'
% type(decim))
return decim
# i/o
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info,
nave=tfr.nave, comment=tfr.comment,
method=tfr.method))
def read_tfrs(fname, condition=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'The file contains "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
| nicproulx/mne-python | mne/time_frequency/tfr.py | Python | bsd-3-clause | 69,456 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20160404_1908'),
]
operations = [
migrations.AlterField(
model_name='cmsplugin',
name='position',
field=models.PositiveSmallIntegerField(default=0, verbose_name='position', editable=False),
),
]
| rsalmaso/django-cms | cms/migrations/0015_auto_20160421_0000.py | Python | bsd-3-clause | 391 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def generate_initial_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.filter(username="root2").first()
if not root:
root = User.objects.create(username="root2")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(name="U-Reporters",
slug="ureporters",
description="U-Reporters Page",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by=root,
modified_by=root)
class Migration(migrations.Migration):
dependencies = [
('dashblocks', '0002_auto_20140802_2112'),
]
operations = [
migrations.RunPython(generate_initial_block_types),
]
| peterayeni/dash | dash/dashblocks/migrations/0003_auto_20140804_0236.py | Python | bsd-3-clause | 1,506 |
import numpy as np
import tensorflow as tf
import dists
from misc import *
| davmre/bayesflow | elbow/util/__init__.py | Python | bsd-3-clause | 77 |
import ipcalc
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
class BlockIP(models.Model):
network = models.CharField(_('IP address or mask'), max_length=18)
reason_for_block = models.TextField(blank=True, null=True, help_text=_("Optional reason for block"))
def __unicode__(self):
return 'BlockIP: %s' % self.network
def get_network(self):
return ipcalc.Network(self.network)
class Meta:
verbose_name = _('IPs & masks to ban')
verbose_name_plural = _('IPs & masks to ban')
def _clear_cache(sender, instance, **kwargs):
cache.set('blockip:list', BlockIP.objects.all())
post_save.connect(_clear_cache, sender=BlockIP)
post_delete.connect(_clear_cache, sender=BlockIP)
| zhendilin/django-block-ip | block_ip/models.py | Python | bsd-3-clause | 866 |
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex,
timedelta_range)
import pandas.util.testing as tm
class TestTimedeltaIndex(object):
def test_astype_object(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT,
Timedelta('4 days')]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name='idx')
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index(str(x) for x in idx)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range('1H', periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize('dtype', [
float, 'datetime64', 'datetime64[ns]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN])
msg = 'Cannot cast TimedeltaArray to dtype'
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = pd.timedelta_range("1H", periods=2, freq='H')
result = obj.astype('category')
expected = pd.CategoricalIndex([pd.Timedelta('1H'),
pd.Timedelta('2H')])
tm.assert_index_equal(result, expected)
result = obj._data.astype('category')
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| GuessWhoSamFoo/pandas | pandas/tests/indexes/timedeltas/test_astype.py | Python | bsd-3-clause | 4,066 |
'''
Created on Oct 21, 2011
@author: bolme
'''
import time
from collections import defaultdict
import cProfile
import traceback
import shelve
class EmptyData(object):
def __str__(self):
return "<MissingData>"
class DefaultData(object):
def __init__(self,default):
self.default = default
def __str__(self):
tmp = str(self.default)
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
return "<DefaultData:%s>"%(tmp,)
EMPTY_DATA = EmptyData()
#############################################################################
# Video tasks are opperations to be run on a frame.
#############################################################################
class VideoTask(object):
'''
This provides an interface and support functions for a video processing
task. Typically a subclass will overide the constructor which will
be used as a task factory and will create the task and specify the
arguments.
'''
# TODO: optional args should also be added which are included if avalible but will not delay execution if they are not avalible.
def __init__(self,frame_id,args=[]):
'''
@param frame_id: the frame_id associated with this task.
@param args: specification of the data that is required to execute the task.
'''
self.frame_id = frame_id
self.args = args
self.task_id = None
self.label = self.__class__.__name__
if not hasattr(self,'subgraph'):
self.subgraph = None
if not hasattr(self,'color'):
self.color = None
self._arg_map = {}
self._added_args = 0 # keep track of how many arguments have been found.
self._default_args = 0 # keep track of how many arguments are currently default.
for i in range(len(args)):
each = args[i]
dtype = each[0]
fid = each[1]
key = (dtype,fid)
#if self._arg_map.has_key(key):
# continue
if len(each) == 2:
self._arg_map[key] = EMPTY_DATA
elif len(each) == 3:
self._arg_map[key] = DefaultData(each[2])
self._default_args += 1
else:
raise ValueError("Argument should have 2 or 3 values: %s"%each)
self.collected_args = [False for each in self.args]
self.processed_args = [each for each in self.args]
self.distributable = False
self.is_ready = False
def addData(self, data_item):
'''
Check to see if the data item is needed for this task. If it is then keep a reference.
'''
# Compute the key
key = (data_item.getType(),data_item.getFrameId())
# Check if this task needs the data
if self._arg_map.has_key(key):
curr_val = self._arg_map[key]
# If no default save the data and update counts
if curr_val == EMPTY_DATA:
self._arg_map[key] = data_item.getData()
self._added_args += 1
return True
# If there is a default replace and update counts
elif isinstance(curr_val,DefaultData):
self._arg_map[key] = data_item.getData()
self._added_args += 1
self._default_args -= 1
assert self._default_args >= 0 # This should only fail if there is an error in counting.
return True
return False
def ready(self):
'''
Returns True if this task is ready to run.
'''
return self._added_args == len(self._arg_map)
def couldRun(self):
'''
Returns True if this task could run with the default arguments.
'''
return self._added_args + self._default_args == len(self._arg_map)
def run(self):
args = []
for i in range(len(self.args)):
each = self.args[i]
key = (each[0],each[1])
if isinstance(self._arg_map[key],DefaultData):
args.append(self._arg_map[key].default)
else:
args.append(self._arg_map[key])
return self.execute(*args)
def getFrameId(self):
'''
@returns: the frame_id associated with this task.
'''
return self.frame_id
def required(self):
'''
@returns: the list of required data.
'''
return self.args
def execute(self, *args, **kwargs):
'''
This is an abstract method that needs to be implemented in subclasses.
One argument is suppled for each item in the required arguments. This
method should return a list of new data items. If no data is
generated by this method an empty list should be returned.
'''
raise NotImplementedError("Abstract Method")
def printInfo(self):
print "VideoTask {%s:%d}"%(self.__class__.__name__,self.getFrameId())
for key in self._arg_map.keys():
dtype,frame_id = key
if self._arg_map[key] is EMPTY_DATA or isinstance(self._arg_map[key],DefaultData):
print " Argument <%s,%d> -> %s"%(dtype,frame_id,str(self._arg_map[key]))
else:
tmp = str(self._arg_map[key])
tmp = " ".join(tmp.split()) # Flatten to one line an collapse white space to single spaces
if len(tmp) > 40:
tmp = tmp[:37] + "..."
print " Argument <%s,%d> -> %s"%(dtype,frame_id,tmp)
class _VideoDataItem(object):
'''
This class keeps track of data items and when they are used.
'''
def __init__(self,data_tuple):
self._data_type = data_tuple[0]
self._frame_id = data_tuple[1]
self._data = data_tuple[2]
self._touched = 0
def getType(self):
''' Get the item type. '''
return self._data_type
def getFrameId(self):
''' Get the frame id. '''
return self._frame_id
def getData(self):
''' Get the actual data. '''
return self._data
def getKey(self):
''' Get the key. '''
return (self._data_type,self._frame_id)
def touch(self):
''' Count the number of times this data was touched. '''
self._touched += 1
def getTouched(self):
''' Return the number of times the data was touched. '''
return self._touched
def __repr__(self):
return "_VideoDataItem((%s,%s,%s)"%(self._data_type,self._frame_id,self._data)
def vtmProcessor(task_queue,results_queue,options):
'''
Each task_queue item should have three items (task_id,frame_id,command/task).
the command "quit" is used to stop the process.
The vtmProcessor will return (task_id, frame_id, results). If there is an exception
then the result will be replaced by the exception and a stack trace will be printed.
'''
while True:
item = task_queue.get()
try:
task_id,frame_id,task = item
result = task.run()
results_queue.put((task_id,frame_id,result))
except Exception, error:
traceback.print_exc()
results_queue.put((task_id,frame_id,error))
#############################################################################
# This class manages the workflow for video items.
#############################################################################
# TODO: Should we keep this name?
class VideoTaskManager(object):
'''
The framework provide by this class will allow complex video processing
systems to be constructed from simple tasks. Often video processing
loops can be complicated because data needs to persist across many frame
and many operations or tasks need to be completed to solve a video analysis
problem. This class allows for many small and simple tasks to be managed
in a way that can produce a complex and powerful system. #
Tasks request only the data they need, which keeps the complexity of tasks
as simple as possible. This also reduces the coupling between tasks and
eliminates complex video processing loops. The video task manager handles
much of the complexity of the video processing system like data buffering,
and insures that each task gets its required data. #
This class manages tasks that are run on video frames. The video task
manager maintains a list of data objects and task objects. Each task is
a listener for data objects. When the data objects are avalible required
to execute a task the tasks execute method will be called and the required
data items will be passed as arguments. #
New frames are added using the addFrame method. When a frame is added
it creates a data item that includes a frame_id, a data type of "FRAME",
and a pv.Image that contains the frame data. Tasks can register to
receive that frame data or any data products of other tasks and when
that data becomes available the task will be executed.
'''
def __init__(self,debug_level=0, buffer_size=10, show = False):
'''
Create a task manager.
@param debug_level: 0=quiet, 1=errors, 2=warnings, 3=info, 4=verbose
@type debug_level: int
@param buffer_size: the size of the frame and data buffer.
@type buffer_size: int
'''
self.debug_level = debug_level
# Initialize data.
self.frame_id = 0
self.task_list = []
self.task_factories = []
self.buffer_size = buffer_size
self.frame_list = []
self.show = show
# Initialize information for flow analysis.
self.flow = defaultdict(set)
self.task_set = set()
self.data_set = set((('FRAME',None),('LAST_FRAME',None),))
self.task_data = defaultdict(dict)
self.task_id = 0
self.lastFrameCreated = 0
self.recording_shelf = None
self.playback_shelf = None
self.recording_filter = None
self.task_filter = None
self.playback_filter = None
if self.debug_level >= 3:
print "TaskManager[INFO]: Initialized"
def addTaskFactory(self,task_factory,*args,**kwargs):
'''
This function add a task factory function to the video task manager.
The function is called once for every frame processed by the
VideoTaskManager. This function should take one argument which
is the frame_id of that frame. The task factory should return an
instance of the VideoTask class that will perform processing on this
frame. There are three options for implementing a task factory. #
- A class object for a VideoTask which has a constructor that takes
a frame_id as an argument. When called the constructor for that
class and will create a task.
- A function that takes a frame id argument. The function can
create and return a task.
- Any other object that implements the __call__ method which
returns a task instance.
Any additional arguments or keyword arguments passed to this
to this function will be pased after the frame_id argument
to the task factory. #
@param task_factory: a function or callible object that returns a task.
@type task_factory: callable
@param profile: Keyword argument. If true, profile data will be
generated for each call to this task.
@type profile: True | False
'''
self.task_id += 1
profile = False
if kwargs.has_key('profile'):
profile = kwargs['profile']
del kwargs['profile']
self.task_factories.append((task_factory,args,kwargs,profile,self.task_id))
def addFrame(self,frame,ilog=None):
'''
Adds a new frame to the task manager and then start processing.
@param frame: the next frame of video.
@type frame: pv.Image
'''
# Add the frame to the data manager
start = time.time()
frame_data = _VideoDataItem(("FRAME",self.frame_id,frame))
self._createTasksForFrame(self.frame_id)
self.addDataItem(frame_data)
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,False))
self.addDataItem(last_data)
self.frame_list.append(frame_data)
# Playback the recording
if self.playback_shelf != None and self.playback_shelf.has_key(str(self.frame_id)):
data_items = self.playback_shelf[str(self.frame_id)]
for each in data_items:
if self.playback_filter==None or each.getType() in self.playback_filter:
self.addDataItem(each)
self.data_set.add((each.getKey()[0],None))
self.flow[('Playback',each.getType())].add(0)
# Run any tasks that can be completed with the current data.
self._runTasks()
if self.recording_shelf != None:
self.recording_shelf.sync()
# Delete old data
#self._cleanUp()
stop = time.time()
# Set up for the next frame and display the results.
self.frame_id += 1
self.showFrames(ilog=ilog)
if self.debug_level >= 3:
print "TaskManager[INFO]: Frame Processing Time=%0.3fms"%(1000*(stop-start),)
def addData(self,data_list):
'''
Add additional data for this frame. The data list should contain a list tuples where each tuple of (label, data)
'''
for each in data_list:
data = _VideoDataItem((each[0],self.frame_id,each[1]))
self.addDataItem(data)
self.flow[('Data Input',data.getType())].add(0)
self.data_set.add((data.getKey()[0],None))
def addDataItem(self,data_item):
'''
Process any new data items and associate them with tasks.
'''
if self.recording_shelf != None:
frame_id = str(self.frame_id)
if not self.recording_shelf.has_key(frame_id):
self.recording_shelf[frame_id] = []
if self.recording_filter == None or data_item.getType() in self.recording_filter:
self.recording_shelf[frame_id].append(data_item)
for task in self.task_list:
was_added = task.addData(data_item)
if was_added:
# Compute the dataflow
self.flow[(data_item.getKey()[0],task.task_id)].add(data_item.getKey()[1]-task.getFrameId())
def _createTasksForFrame(self,frame_id):
'''
This calls the task factories to create tasks for the current frame.
'''
while self.lastFrameCreated < frame_id + self.buffer_size:
start = time.time()
count = 0
for factory,args,kwargs,profile,task_id in self.task_factories:
task = factory(self.lastFrameCreated,*args,**kwargs)
task.task_id=task_id
self.task_data[task.task_id]['class_name'] = task.__class__.__name__
task.profile=profile
count += 1
if self.task_filter == None or task.__class__.__name__ in self.task_filter:
self.task_list += [task]
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Created %d new tasks for frame %s. Total Tasks=%d. Time=%0.2fms"%(count,self.lastFrameCreated,len(self.task_list),stop*1000)
self.lastFrameCreated += 1
def _runTasks(self,flush=False):
'''
Run any tasks that have all data available.
'''
if self.debug_level >= 3: print "TaskManager[INFO]: Running Tasks..."
while True:
start_count = len(self.task_list)
remaining_tasks = []
for task in self.task_list:
if self._evaluateTask(task,flush=flush):
remaining_tasks.append(task)
self.task_list = remaining_tasks
if start_count == len(self.task_list):
break
def flush(self):
'''
Run all tasks that can be run and then finish up. The LAST_FRAME data
item will be set to true for the last frame inserted.
'''
last_data = _VideoDataItem(("LAST_FRAME",self.frame_id-1,True))
self.addDataItem(last_data)
self._runTasks(flush=True)
def _evaluateTask(self,task,flush=False):
'''
Attempts to run a task. This is intended to be run within a filter operation.
@returns: false if task should be deleted and true otherwise.
'''
self.task_set.add(task.task_id)
should_run = False
if task.ready():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and task.couldRun():
should_run = True
elif (flush or self.frame_id - task.getFrameId() > self.buffer_size) and not task.couldRun():
if self.debug_level >= 2:
print "TaskManager[WARNING]: Task %s for frame %d was not executed."%(task,task.getFrameId())
task.printInfo()
# If the task is beyond the buffer, then delete it.
return False
# If the task is not ready then skip it for now.
if not should_run:
return True
# Run the task.
start = time.time()
# Start the profiler
if task.profile:
prof = cProfile.Profile()
prof.enable()
# RUN THE TASK
result = task.run()
# Stop the profiler and show that information.
if task.profile:
prof.disable()
print
print "Profiled task:",task.__class__.__name__
prof.print_stats('time')
print
# Check that the task did return a list.
try:
len(result)
except:
raise Exception("Task did not return a valid list of data.\n Task: %s\n Data:%s"%(task,result))
# Record the dataflow information.
for each in result:
self.flow[(task.task_id,each[0])].add(0)
self.data_set.add((each[0],task.subgraph))
# Compute the dataflow
for i in range(len(task.collected_args)):
if task.collected_args[i]:
each = task.processed_args[i]
self.flow[(each.getKey()[0],task.task_id)].add(each.getKey()[1]-task.getFrameId())
self.data_set.add((each.getKey()[0],task.subgraph))
# Add the data to the cache.
for data_item in result:
if len(data_item) != 3:
raise Exception("Task returned a data item that does not have 3 elements.\n Task: %s\n Data: %s"%(task,data_item))
data_item = _VideoDataItem(data_item)
self.addDataItem(data_item)
stop = time.time() - start
if self.debug_level >= 3:
print "TaskManager[INFO]: Evaluate task %s for frame %d. Time=%0.2fms"%(task,task.getFrameId(),stop*1000)
# Compute task statistics
if not self.task_data[task.task_id].has_key('time_sum'):
self.task_data[task.task_id]['time_sum'] = 0.0
self.task_data[task.task_id]['call_count'] = 0
self.task_data[task.task_id]['time_sum'] += stop
self.task_data[task.task_id]['call_count'] += 1
self.task_data[task.task_id]['color'] = task.color
self.task_data[task.task_id]['subgraph'] = task.subgraph
# Return false so that the task is deleted.
return False
def _remainingTasksForFrame(self,frame_id):
'''
@returns: the number of tasks that need to be run for this frame.
'''
count = 0
for task in self.task_list:
if task.getFrameId() == frame_id:
count += 1
return count
# TODO: I don't really like how show frames works. I would like display of frames to be optional or maybe handled outside of this class. How should this work.
def showFrames(self,ilog=None):
'''
Show any frames with no remaining tasks.
'''
while len(self.frame_list) > 0:
frame_data = self.frame_list[0]
frame_id = frame_data.getFrameId()
frame = frame_data.getData()
task_count = self._remainingTasksForFrame(frame_id)
# If the frame is complete then show it.
if task_count == 0:
if self.show:
frame.show(delay=1)
if ilog != None:
ilog(frame,ext='jpg')
del self.frame_list[0]
else:
break
def recordingFile(self,filename):
'''
Set up an output file for recording.
'''
assert self.playback_shelf == None
self.recording_shelf = shelve.open(filename, flag='n', protocol=2, writeback=True)
def playbackFile(self,filename,cache=False):
'''
Set up an input file for playback.
'''
assert self.recording_shelf == None
self.playback_shelf = shelve.open(filename, flag='r', protocol=2, writeback=False)
def recordingFilter(self,data_types):
'''
Only recorded data_types in the list.
'''
self.recording_filter = set(data_types)
def taskFilter(self,task_types):
'''
Only generate tasks in the list.
'''
self.task_filter = set(task_types)
def playbackFilter(self,data_types):
'''
Only playback data_types in the list.
'''
self.playback_filter = set(data_types)
def asGraph(self,as_image=False):
'''
This uses runtime analysis to create a dataflow graph for this VTM.
'''
import pydot
import pyvision as pv
import PIL.Image
from cStringIO import StringIO
def formatNum(n):
'''
This formats frame offsets correctly: -1,0,+1
'''
if n == 0:
return '0'
else:
return "%+d"%n
def record_strings(my_list):
return '{''}'
# Create the graph.
graph = pydot.Dot(graph_type='digraph',nodesep=.3,ranksep=.5)
graph.add_node(pydot.Node("Data Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_node(pydot.Node("Video Input",shape='invhouse',style='filled',fillcolor='#ffCC99'))
graph.add_edge(pydot.Edge("Video Input","FRAME"))
graph.add_edge(pydot.Edge("Video Input","LAST_FRAME"))
if self.playback_shelf != None:
graph.add_node(pydot.Node("Playback",shape='invhouse',style='filled',fillcolor='#ffCC99'))
subgraphs = {None:graph}
# Add task nodes
for each in self.task_set:
if self.task_data[each].has_key('call_count'):
class_name = self.task_data[each]['class_name']
call_count = self.task_data[each]['call_count']
mean_time = self.task_data[each]['time_sum']/call_count
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
color = '#99CC99'
print each, self.task_data[each]
if self.task_data[each]['color'] is not None:
color = self.task_data[each]['color']
subgraph = self.task_data[each]['subgraph']
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
if not subgraphs.has_key(subgraph):
print "adding subgraph",subgraph
subgraphs[subgraph_name] = pydot.Cluster(subgraph_name,label=subgraph,shape='box',style='filled',fillcolor='#DDDDDD',nodesep=1.0)
subgraphs[None].add_subgraph(subgraphs[subgraph_name])
print "adding node",each,subgraph
subgraphs[subgraph_name].add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor=color))
else:
# The task node was never executed
call_count = 0
mean_time = -1
class_name = self.task_data[each]['class_name']
node_label = "{" + " | ".join([class_name,
"Time=%0.2fms"%(mean_time*1000.0,),
"Calls=%d"%(call_count,),
]) + "}"
graph.add_node(pydot.Node(each,label=node_label,shape='record',style='filled',fillcolor='#CC3333'))
# Add Data Nodes
for each,subgraph in self.data_set:
subgraph_name = subgraph
if subgraph_name != None:
subgraph_name = "_".join(subgraph.split())
subgraphs[subgraph_name].add_node(pydot.Node(each,shape='box',style='rounded, filled',fillcolor='#9999ff'))
# Add edges.
for each,offsets in self.flow.iteritems():
offsets = list(offsets)
if len(offsets) == 1 and list(offsets)[0] == 0:
graph.add_edge(pydot.Edge(each[0],each[1]))
else:
offsets = formatOffsets(offsets)
graph.add_edge(pydot.Edge(each[0],each[1],label=offsets,label_scheme=2,labeldistance=2,labelfloat=False))
# Create a pv.Image containing the graph.
if as_image:
data = graph.create_png()
f = StringIO(data)
im = pv.Image(PIL.Image.open(f))
return im
return graph
def formatGroup(group):
try:
if len(group) > 3:
return formatGroup(group[:1])+"..."+formatGroup(group[-1:])
except:
pass
return ",".join(["%+d"%each for each in group])
def groupOffsets(offsets):
offsets.sort()
group = []
groups = [group]
for each in offsets:
if len(group) == 0 or each == group[-1]+1:
group.append(each)
else:
group = [each]
groups.append(group)
return groups
def formatOffsets(offsets):
groups = groupOffsets(offsets)
out = "("+ ",".join([formatGroup(each) for each in groups]) + ")"
return out
if __name__ == '__main__':
offsets = [-3,-2,-1,0,1,3,4,5,6,7,8,10,15,20,21,22,23,-21,-22,56,57]
offsets.sort()
print offsets
groups = groupOffsets(offsets)
print groups
print ",".join([formatGroup(each) for each in groups])
| mikeseven/pyvision | src/pyvision/beta/vtm.py | Python | bsd-3-clause | 27,866 |
"""
An implementation of OGC WFS 2.0.0 over the top of Django. This module requires that OGR be installed and that you use
either the PostGIS or Spatialite backends to GeoDjango for the layers you are retrieving. The module provides a
generic view, :py:class:WFS that provides standard WFS requests and responses and :py:class:WFST that provides WFS +
Transactions.
This is an initial cut at WFS compatibility. It is not perfect by any means, but it is a decent start. To use WFS with
your application, you will either need to use a GeoDjango model or derive from :py:class:WFSAdapter and
wrap a model class with it. Most URL configs will look like this::
url('r/wfs', WFS.as_view(model=myapp.models.MyGeoModel))
Models' Meta class can be modified to include attributes that can be picked up by the view as descriptive parameters
that will make it into the response of a GetCapabilities request.
The following features remain unimplemented:
* Transactions
* Creation and removal of stored queries
* Resolution
* The standard XML filter language (instead I intend to support OGR SQL and the Django filter language)
"""
from collections import namedtuple
from uuid import uuid4
from django.http import HttpResponse
from django.contrib.gis.db.models.query import GeoQuerySet
from django.contrib.gis.db.models import GeometryField
from django import forms as f
import json
from django.shortcuts import render_to_response
from ga_ows.views import common
from ga_ows.utils import MultipleValueField, BBoxField, CaseInsensitiveDict
from lxml import etree
from ga_ows.views.common import RequestForm, CommonParameters, GetCapabilitiesMixin
from osgeo import ogr
from django.conf import settings
from tempfile import gettempdir
from django.db import connections
import re
from lxml import etree
import os
#: Requests' Common Parameters
#: ===========================
class InputParameters(RequestForm):
"""
"""
srs_name = f.CharField()
input_format = f.CharField() # default should be "application/gml+xml; version=3.2"
srs_format = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['srs_name'] = request.get('srsname', 'EPSG:4326')
request['input_format'] = request.get('inputformat', "application/gml+xml; version=3.2")
class PresentationParameters(RequestForm):
count = f.IntegerField()
start_index = f.IntegerField()
max_features = f.IntegerField()
output_format = f.CharField()
@classmethod
def from_request(cls, request):
request['count'] = int(request.get('count', '1'))
request['start_index'] = int(request.get('startindex','1'))
request['max_features'] = int(request.get('maxfeatures', '1'))
request['output_format'] = request.get('outputformat',"application/gml+xml; version=3.2")
class AdHocQueryParameters(RequestForm):
type_names = MultipleValueField()
aliases = MultipleValueField(required=False)
filter = f.CharField(required=False)
filter_language = f.CharField(required=False)
resource_id = f.CharField(required=False)
bbox = BBoxField()
sort_by = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['type_names'] = request.getlist('typenames')
request['aliases'] = request.getlist('aliases')
request['filter'] = request.get('filter')
request['filter_language'] = request.get('filterlanguage')
request['resource_id'] = request.get('resource_id')
request['bbox'] = request.get('bbox')
request['sort_by'] = request.get('sortby')
class StoredQueryParameters(RequestForm):
stored_query_id = f.CharField(required=False)
@classmethod
def from_request(cls, request):
request['stored_query_id'] = request.get('storedquery_id')
class GetFeatureByIdParameters(RequestForm):
feature_id = f.CharField()
@classmethod
def from_request(cls, request):
request['feature_id'] = request.get('id')
class ResolveParameters(RequestForm):
resolve = f.CharField(required=False)
resolve_depth = f.IntegerField()
resolve_timeout = f.FloatField()
@classmethod
def from_request(cls, request):
request['resolve'] = request.get('resolve')
request['resolve_depth'] = int(request.get('resolve_depth','0'))
request['resolve_timeout'] = float(request.get('resolve_timeout', '0'))
#: Exceptions
#: ==========
class CannotLockAllFeatures(common.OWSException):
"""A locking request with a lockAction of ALL failed to lock all the requested features."""
class DuplicateStoredQueryIdValue(common.OWSException):
"""The identifier specified for a stored query expression is a duplicate."""
class DuplicateStoredQueryParameterName(common.OWSException):
"""This specified name for a stored query parameter is already being used within the same stored query definition."""
class FeaturesNotLocked(common.OWSException):
"""For servers that do not support automatic data locking (see 15.2.3.1), this exception indicates that a transaction operation is modifying features that have not previously been locked using a LockFeature (see Clause 12) or GetFeatureWithLock (see Clause 13) operation."""
class InvalidLockId(common.OWSException):
"""The value of the lockId parameter on a Transaction operation is invalid because it was not generated by the server."""
class InvalidValue(common.OWSException):
"""A Transaction (see Clause 15) has attempted to insert or change the value of a data component in a way that violates the schema of the feature."""
class LockHasExpired(common.OWSException):
"""The specified lock identifier on a Transaction or LockFeature operation has expired and is no longer valid."""
class OperationParsingFailed(common.OWSException):
"""The request is badly formed and failed to be parsed by the server."""
class OperationProcessingFailed(common.OWSException):
"""An error was encountered while processing the operation."""
class ResponseCacheExpired(common.OWSException):
"""The response cache used to support paging has expired and the results are no longer available."""
class OperationNotSupported(common.OWSException):
"""The operation is not yet implemented"""
########################################################################################################################
# Adapter class
########################################################################################################################
#: Class for describing features. A named tuple containing:
#: * name : str - the feature type name. this is what goes in the featureTypes parameter on a GetFeature request.
#: * title : str - the human readable name for this feature type
#: * abstract : str - a short description of this feature type, if necessary
#: * keywords : list(str) - keywords associated with this feature_type
#: * srs : str - the sptial reference system that is default for this feature type
#: * bbox : (minx, miny, maxx, maxy) - the boundinb box for this feature type. must be present and filled in WGS84
#:
FeatureDescription = namedtuple('FeatureDescription', ('ns', 'ns_name', 'name','title','abstract','keywords','srs','bbox', 'schema'))
#: A description of a stored-query parameter. A named tuple containing:
#: * type : str - the parameter type
#: * name : str - the parameter name (computer-readable)
#: * title : str - the parameter name (human-readable)
#: * abstract : str - a short description of the parameter
#: * query_expression : :py:class:StoredQueryExpression
#:
StoredQueryParameter = namedtuple("StoredQueryParameter", ('type','name', 'title','abstract', 'query_expression'))
#: A description of how a stored query parameter should be filled in. A named tuple containing:
#: * text : str - template text for a query
#: * language : str - the language the query is expressed in.
#: * private : boolean - whether or not the query is private
#: * return_feature_types : the comma-separated computer-readable names of the feature types that are returned
StoredQueryExpression = namedtuple("StoredQueryExpression", ('text', 'language', 'private', 'return_feature_types'))
#: A description of a stored query. A named tuple containing:
#: * name : str - the computer-readable name of the stored query
#: * title : str - the human-readable name of the stored query
#: * feature_types : str - the comma-separated computer-readable names of the feature types that are returned
StoredQueryDescription = namedtuple("StoredQueryDescription", ('name', 'feature_types', 'title', 'parameters'))
class WFSAdapter(object):
"""
This adapter should be defined by any class that needs to expose WFS services on its interface. The adapter will
be called with an object as its working object and will encapsulate all the functionality needed to expose that
object via WFS using the ga_ows.WFSView class.
"""
def get_feature_descriptions(self, request, *types):
raise OperationNotSupported.at('GetFeatureDescription', 'Implementor should return list of FeatureDescriptions')
def list_stored_queries(self, request):
"""Subclasses of this class may implement extra stored queries by creating methods
matching the pattern::
def SQ_{QueryName}(self, request, parms):
pass
where request and parms are the Django HTTPRequest object and parms are
GetFeature parameters
"""
queries = dict([(q[3:],[]) for q in filter(lambda x: x.startswith("SQ_"),
reduce(
list.__add__,
[c.__dict__.keys() for c in self.__class__.mro()]
)
)])
return queries
def get_features(self, request, parms):
raise OperationNotSupported.at('GetFeature', "Implementor is given a GetFeatures.Parameters object and should return an OGR dataset or a GeoDjango QuerySet")
def supports_feature_versioning(self):
return False
class GeoDjangoWFSAdapter(WFSAdapter):
def __init__(self, models):
self.models = {}
self.srids = {}
# NOTE this assumes that there will be only one geometry field per model. This is of course not necessarily the case, but it works 95% of the time.
self.geometries = {}
for model in models:
self.models[model._meta.app_label + ":" + model._meta.object_name] = model
for field in model._meta.fields:
if isinstance(field, GeometryField):
self.geometries[model._meta.app_label + ":" + model._meta.object_name] = field
self.srids[model._meta.app_label + ":" + model._meta.object_name] = field.srid
def list_stored_queries(self, request):
sq = super(GeoDjangoWFSAdapter, self).list_stored_queries(request)
fts = list(self.models.keys())
for k in sq.keys():
sq[k] = StoredQueryDescription(name=k, feature_types=fts, title=k, parameters=[])
return sq
def get_feature_descriptions(self, request, *types):
namespace = request.build_absolute_uri().split('?')[0] + "/schema" # todo: include https://bitbucket.org/eegg/django-model-schemas/wiki/Home
for model in self.models.values():
if model.objects.count() > 0:
extent = model.objects.extent()
else:
extent = (0,0,0,0)
yield FeatureDescription(
ns=namespace,
ns_name=model._meta.app_label,
name=model._meta.object_name,
abstract=model.__doc__,
title=model._meta.verbose_name,
keywords=[],
srs=self.srids[model._meta.app_label + ":" + model._meta.object_name],
bbox=extent,
schema=namespace
)
def get_features(self, request, parms):
if parms.cleaned_data['stored_query_id']:
squid = "SQ_" + parms.cleaned_data['stored_query_id']
try:
return self.__getattribute__(squid)(request, parms)
except AttributeError:
raise OperationNotSupported.at('GetFeatures', 'stored_query_id={squid}'.format(squid=squid))
else:
#try:
return self.AdHocQuery(request, parms)
#except KeyError as k:
# raise OperationProcessingFailed.at("GetFeatures", str(k))
#except ValueError as v:
# raise OperationParsingFailed.at("GetFeatures", "filter language not supported or invalid JSON")
def AdHocQuery(self, request, parms):
type_names = parms.cleaned_data['type_names'] # only support one type-name at a time (model) for now
#aliases = parms.cleaned_data['aliases'] # ignored for now
flt = parms.cleaned_data['filter'] # filter should be in JSON
flt_lang = parms.cleaned_data['filter_language'] # only support JSON now
#res_id = parms.cleaned_data['resource_id'] # ignored
bbox = parms.cleaned_data['bbox']
sort_by = parms.cleaned_data['sort_by']
count = parms.cleaned_data['count']
if not count:
count = parms.cleaned_data['max_features']
start_index = parms.cleaned_data['start_index']
srs_name = parms.cleaned_data['srs_name'] # assume bbox is in this
srs_format = parms.cleaned_data['srs_format'] # this can be proj, None (srid), srid, or wkt.
model = self.models[type_names[0]] # support only the first type-name for now.
geometry_field = self.geometries[type_names[0]]
query_set = model.objects.all()
if bbox:
mnx, mny, mxx, mxy = bbox
query_set.filter(**{ geometry_field.name + "__bboverlaps" :
"POLYGON(({mnx} {mny}, {mxx} {mny}, {mxx} {mxy}, {mnx} {mxy}, {mnx} {mny}))".format(
mnx=mnx,
mny=mny,
mxx=mxx,
mxy=mxy)
})
if flt:
flt = json.loads(flt)
query_set = query_set.filter(**flt)
if sort_by and ',' in sort_by:
sort_by = sort_by.split(',')
query_set = query_set.order_by(*sort_by)
elif sort_by:
query_set = query_set.order_by(sort_by)
if start_index and count:
query_set = query_set[start_index:start_index+count]
elif start_index:
query_set = query_set[start_index:]
elif count:
query_set = query_set[:count]
if srs_name:
if (not srs_format or srs_format == 'srid') and srs_name != geometry_field.srid:
if srs_name.lower().startswith('epsg:'):
srs_name = srs_name[5:]
query_set.transform(int(srs_name))
# TODO support proj and WKT formats by manually transforming geometries.
# First create a list() from the queryset, then create SpatialReference objects for
# the source and dest. Then import them from their corresponding SRS definitions
# then loop over the list and transform each model instance's geometry record
return query_set
def SQ_GetFeatureById(self, request, parms):
my_parms = GetFeatureByIdParameters.create(request.REQUEST)
typename, pk = my_parms.cleaned_data['feature_id'].split('.')
return self.models[typename].objects.filter(pk=int(pk))
# WFS itself. All the individual classes are defined as mixins for the sake of modularity and ease of debugging.
class WFSBase(object):
"""The base class for WFS mixins. Makes sure that all mixins assume an adapter"""
adapter = None
class DescribeFeatureTypeMixin(WFSBase):
"""
Defines the DescribeFeatureType operation found in section 9 of the WFS standard
"""
class Parameters(
CommonParameters
):
type_names = MultipleValueField()
output_format = f.CharField()
@classmethod
def from_request(cls, request):
request['type_names'] = request.getlist('typename') + request.getlist('typenames')
request['output_format'] = request.get('outputformat', "application/gml+xml; version=3.2")
def _parse_xml_DescribeFeatureType(self, request):
"""See section 9.4.2 of the OGC spec. Note that the spec is unclear how to encode the typeNames parameter. Its
example says one thing and the standard says another, so I've done both here.
Returns a named tuple:
* type_names: 'all' or list. all should return all feature types. list should return the named feature types.
"""
def add_ns(it, ns):
x = it.split(':')
if len(x) > 1:
return ns[x[0]], x[1]
else:
return '',x
root = etree.fromstring(request)
xmlns = root.get('xmlns')
output_format = root.get('outputFormat', 'application/gml+xml; version=3.2')
if xmlns is not None:
xmlns = "{" + xmlns + "}"
else:
xmlns = ""
namespaces = {}
for name, value in root.attrib.items():
if name.startswith(xmlns):
namespaces[value] = name[len(xmlns):]
type_names = root.get('typeNames')
if type_names is not None:
type_names = [add_ns(n, namespaces) for n in type_names.split(',')]
else:
type_names = []
for elt in root:
if elt.tag.endswith("TypeName"):
namespace, name = elt.text.split(":")
namespace = namespaces[namespace]
type_names.append((namespace, name))
if not len(type_names):
type_names = 'all'
return DescribeFeatureTypeMixin.Parameters.create(CaseInsensitiveDict({"typenames" : type_names, "outputformat" : output_format}))
def _response_xml_DescribeFeatureType(self, response):
return render_to_response("ga_ows/WFS_DescribeFeature.template.xml", { "feature_types" : list(response) })
def _response_json_DescribeFeatureType(self, response, callback=None):
rsp = []
for feature_type in response:
rsp.append({
"schema" : feature_type.schema,
"name" : feature_type.name,
"abstract" : feature_type.abstract,
"title" : feature_type.title,
"ns_name" : feature_type.ns_name
})
if callback is not None:
return HttpResponse(callback + "(" + json.dumps(rsp) + ")", mimetype='text/javascript')
else:
return HttpResponse(json.dumps(rsp), mimetype='application/json')
def DescribeFeatureType(self, request, kwargs):
"""See section 9 of the OGC WFS standards document."""
if 'xml' in kwargs:
parms = self._parse_xml_DescribeFeatureType(kwargs['xml'])
else:
parms = DescribeFeatureTypeMixin.Parameters.create(kwargs)
response = self.adapter.get_feature_descriptions(request, *parms.cleaned_data['type_names'])
if parms.cleaned_data['output_format'].endswith('json'):
if 'callback' in kwargs:
return self._response_json_DescribeFeatureType(response, callback=kwargs['callback'])
elif 'jsonp' in kwargs:
return self._response_json_DescribeFeatureType(response, callback=kwargs['jsonp'])
else:
return self._response_json_DescribeFeatureType(response)
else:
return self._response_xml_DescribeFeatureType(response)
class GetFeatureMixin(WFSBase):
"""
Defines the GetFeature operation in section 11 of the WFS standard.
"""
class Parameters(
CommonParameters,
InputParameters,
PresentationParameters,
AdHocQueryParameters,
StoredQueryParameters
):
pass
def _parse_xml_GetFeature(self, request):
"""
"""
raise OperationNotSupported.at("GetFeature", "XML encoded POST for WFS.GetFeature needs implemented")
#TODO implement this method.
def GetFeature(self, request, kwargs):
"""
"""
mimetypes = {
'GeoJSON' : 'application/json'
}
if 'xml' in kwargs:
parms = self._parse_xml_GetFeature(kwargs['xml'])
else:
parms = GetFeatureMixin.Parameters.create(kwargs)
# must be an OGR dataset or a QuerySet containing one layer
response = self.adapter.get_features(request, parms)
if isinstance(response, GeoQuerySet):
layer = None
db_params = settings.DATABASES[response.db]
if db_params['ENGINE'].endswith('postgis'):
# Then we take the raw SQL from thr QuerySet and pass it through OGR instead. This causes the SQL to be
# executed twice, but it's also the most expedient way to create our output. This could be done better,
# but it gets it out the door for now.
# Create the query from the QuerySet
# adapt() prevents SQL injection attacks
from psycopg2.extensions import adapt
query, parameters = response.query.get_compiler(response.db).as_sql()
parameters = tuple([adapt(p) for p in parameters])
query = query % parameters
# Connect to PostGIS with OGR.
drv = ogr.GetDriverByName("PostgreSQL")
connection_string = "PG:dbname='{db}'".format(db=db_params['NAME'])
if 'HOST' in db_params and db_params['HOST']:
connection_string += " host='{host}'".format(host=db_params['HOST'])
if 'PORT' in db_params and db_params['PORT']:
connection_string += " port='{port}'".format(port=db_params['PORT'])
if 'USER' in db_params and db_params['USER']:
connection_string += " user='{user}'".format(user=db_params['USER'])
if 'PASSWORD' in db_params and db_params['PASSWORD']:
connection_string += " password='{password}'".format(password=db_params['PASSWORD'])
conn = drv.Open(connection_string)
# Put the QuerySet into a layer the hard way.
layer = conn.ExecuteSQL(query)
elif db_params['ENGINE'].endswith('spatialite'):
# This works the same way as the if-statement above.
# todo replace this with the sqlite version of the same thing for preventing SQL injection attacks
from psycopg2.extensions import adapt
query, parameters = response.query.get_compiler(response.db).as_sql()
parameters = tuple([adapt(p) for p in parameters])
query = query % parameters
drv = ogr.GetDriverByName("Spatialite")
conn = drv.Open(db_params['NAME'])
layer = conn.ExecuteSQL(query)
else:
layer = response.GetLayerByIndex(0)
drivers = dict([(ogr.GetDriver(drv).GetName(), ogr.GetDriver(drv)) for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)])
output_format = parms.cleaned_data['output_format'].decode('ascii')
if 'gml' in output_format or 'xml' in output_format:
tmpname = "{tmpdir}{sep}{uuid}.{output_format}".format(tmpdir=gettempdir(), uuid=uuid4(), output_format='gml', sep=os.path.sep)
drv = ogr.GetDriverByName("GML")
ds = drv.CreateDataSource(tmpname)
l2 = ds.CopyLayer(layer, 'WFS_result')
l2.SyncToDisk()
del ds
responsef = open(tmpname)
rdata = responsef.read()
responsef.close()
os.unlink(tmpname)
return HttpResponse(rdata, mimetype=output_format)
elif output_format in drivers:
tmpname = "{tmpdir}{sep}{uuid}.{output_format}".format(tmpdir=gettempdir(), uuid=uuid4(), output_format=output_format, sep=os.path.sep)
drv = drivers[output_format]
ds = drv.CreateDataSource(tmpname)
l2 = ds.CopyLayer(layer, 'WFS_result')
l2.SyncToDisk()
del ds
responsef = open(tmpname)
rdata = responsef.read()
responsef.close()
os.unlink(tmpname)
return HttpResponse(rdata, mimetype=mimetypes.get(output_format,'text/plain'))
else:
raise OperationProcessingFailed.at('GetFeature', 'outputFormat {of} not supported ({formats})'.format(of=output_format, formats=drivers.keys()))
class ListStoredQueriesMixin(WFSBase):
"""
Defines the ListStoredQueries operation in section 14.3 of the standard
"""
def ListStoredQueries(self, request, kwargs):
"""
"""
queries = self.adapter.list_stored_queries(request)
response = etree.Element("ListStoredQueriesResponse")
for query, description in queries.items():
sub = etree.SubElement(response, "StoredQuery")
etree.SubElement(sub, "Title").text = query
for feature_type in description.feature_types:
etree.SubElement(sub, 'ReturnFeatureType').text = feature_type
return HttpResponse(etree.tostring(response, pretty_print=True), mimetype='text/xml')
class DescribeStoredQueriesMixin(WFSBase):
class Parameters(CommonParameters):
stored_query_id = MultipleValueField()
@classmethod
def from_request(cls, request):
request['stored_query_id'] = request.getlist('storedqueryid')
def DescribeStoredQueries(self, request, kwargs):
parms = DescribeStoredQueriesMixin.Parameters.create(kwargs)
inspected_queries = parms.cleaned_data['stored_query_id']
response = etree.Element('DescribeStoredQueriesResponse')
for query, description in filter(lambda (x,y): x in inspected_queries, self.adapter.list_stored_queries(request).items()):
desc = etree.SubElement(response, "StoredQueryDescription")
etree.SubElement(desc, 'Title').text = query
for parameter in description.parameters:
p = etree.SubElement(desc, "Parameter", attrib={"name" : parameter.name, "type" : parameter.type})
etree.SubElement(p, 'Title').text = parameter.title
etree.SubElement(p, 'Abstract').text = parameter.abstractS
if parameter.query_expression:
etree.SubElement(p, "QueryExpressionText", attrib={
"isPrivate" : parameter.query_expression.private == True,
"language" : parameter.query_expression.language,
"returnFeatureTypes" : ' '.join(parameter.query_expression.return_feature_types)
}).text = parameter.query_expression.text
return HttpResponse(etree.tostring(response, pretty_print=True), mimetype='text/xml')
# TODO implement stored queries
class CreateStoredQuery(WFSBase):
def CreateStoredQuery(self, request, kwargs):
raise OperationNotSupported.at("CreateStoredQuery")
class DropStoredQuery(WFSBase):
def DropStoredQuery(self, request, kwargs):
raise OperationNotSupported.at("DropStoredQuery")
# TODO implement transactions
class TransactionMixin(WFSBase):
def Transaction(self, request, kwargs):
"""
"""
raise OperationNotSupported.at('Transaction')
class GetFeatureWithLockMixin(WFSBase):
def GetFeatureWithLock(self, request, kwargs):
raise OperationNotSupported.at("GetFeatureWithLock")
class LockFeatureMixin(WFSBase):
def LockFeature(self, request, kwargs):
raise OperationNotSupported.at('LockFeature')
class GetPropertyValueMixin(WFSBase):
class Parameters(StoredQueryParameters, AdHocQueryParameters):
value_reference = f.CharField()
resolve_path = f.CharField(required=False)
def from_request(cls, request):
request['value_reference'] = request['valuereference']
request['resolve_path'] = request['resolvepath']
def GetPropertyValue(self, request, kwargs):
raise OperationNotSupported.at('GetPropertyValue')
class WFS(
common.OWSView,
GetCapabilitiesMixin,
DescribeFeatureTypeMixin,
DescribeStoredQueriesMixin,
GetFeatureMixin,
ListStoredQueriesMixin,
GetPropertyValueMixin
):
""" A generic view supporting the WFS 2.0.0 standard from the OGC"""
adapter = None
models = None
title = None
keywords = []
fees = None
access_constraints = None
provider_name = None
addr_street = None
addr_city = None
addr_admin_area = None
addr_postcode = None
addr_country = None
addr_email = None
def __init__(self, **kwargs):
common.OWSView.__init__(self, **kwargs)
if self.models:
self.adapter = GeoDjangoWFSAdapter(self.models)
def get_capabilities_response(self, request, params):
return render_to_response('ga_ows/WFS_GetCapabilities.template.xml', {
"title" : self.title,
"keywords" : self.keywords,
"fees" : self.fees,
"access_constraints" : self.access_constraints,
"endpoint" : request.build_absolute_uri().split('?')[0],
"output_formats" : [ogr.GetDriver(drv).GetName() for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)],
"addr_street" : self.addr_street,
"addr_city" : self.addr_city,
"addr_admin_area" : self.addr_admin_area,
"addr_postcode" : self.addr_postcode,
"addr_country" : self.addr_country,
"feature_versioning" : False,
"transactional" : False,
'feature_types' : self.adapter.get_feature_descriptions(request)
})
class WFST(WFS,TransactionMixin,GetFeatureWithLockMixin, LockFeatureMixin):
""" A generic view supporting the WFS 2.0.0 standard from the OGC including transactions"""
def get_capabilities_response(self, request, params):
return render_to_response('ga_ows/WFS_GetCapabilities.template.xml', {
"title" : self.title,
"keywords" : self.keywords,
"fees" : self.fees,
"access_constraints" : self.access_constraints,
"endpoint" : request.build_absolute_uri().split('?')[0],
"output_formats" : [ogr.GetDriver(drv).GetName() for drv in range(ogr.GetDriverCount()) if ogr.GetDriver(drv).TestCapability(ogr.ODrCCreateDataSource)],
"addr_street" : self.addr_street,
"addr_city" : self.addr_city,
"addr_admin_area" : self.addr_admin_area,
"addr_postcode" : self.addr_postcode,
"addr_country" : self.addr_country,
"feature_versioning" : self.adapter.supports_feature_versioning(),
"transactional" : True,
'feature_types' : self.adapter.get_feature_descriptions(request)
})
| hydroshare/hydroshare_temp | ga_ows/views/wfs.py | Python | bsd-3-clause | 31,334 |
# -*- coding: utf-8 -*-
import morepath
from morepath.request import Response
from morepath.authentication import Identity, NO_IDENTITY
from .fixtures import identity_policy
import base64
import json
from webtest import TestApp as Client
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
def test_no_permission():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
c.get('/foo', status=403)
def test_permission_directive_identity():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.verify_identity()
def verify_identity(identity):
return True
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_with_app_arg():
class App(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@App.permission_rule(model=Model, permission=Permission)
def get_permission(app, identity, model, permission):
assert isinstance(app, App)
if model.id == 'foo':
return True
else:
return False
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(App())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_permission_directive_no_identity():
class app(morepath.App):
pass
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.path(model=Model, path='{id}',
variables=lambda model: {'id': model.id})
def get_model(id):
return Model(id)
@app.permission_rule(model=Model, permission=Permission, identity=None)
def get_permission(identity, model, permission):
if model.id == 'foo':
return True
else:
return False
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
c = Client(app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_policy_action():
c = Client(identity_policy.app())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
def test_no_identity_policy():
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@App.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@App.verify_identity()
def verify_identity(identity):
return True
c = Client(App())
# if you protect things with permissions and you
# install no identity policy, doing a log in has
# no effect
c.get('/foo', status=403)
c.get('/foo/log_in')
c.get('/foo', status=403)
c.get('/foo/log_out')
c.get('/foo', status=403)
class DumbCookieIdentityPolicy(object):
"""A very insecure cookie-based policy.
Only for testing. Don't use in practice!
"""
def identify(self, request):
data = request.cookies.get('dumb_id', None)
if data is None:
return NO_IDENTITY
data = json.loads(base64.b64decode(data).decode())
return Identity(**data)
def remember(self, response, request, identity):
data = base64.b64encode(str.encode(json.dumps(identity.as_dict())))
response.set_cookie('dumb_id', data)
def forget(self, response, request):
response.delete_cookie('dumb_id')
def test_cookie_identity_policy():
class app(morepath.App):
pass
@app.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.permission_rule(model=Model, permission=Permission)
def get_permission(identity, model, permission):
return identity.userid == 'user'
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request, Identity(userid='user', payload='Amazing'))
return response
@app.view(model=Model, name='log_out')
def log_out(self, request):
response = Response()
request.app.forget_identity(response, request)
return response
@app.identity_policy()
def policy():
return DumbCookieIdentityPolicy()
@app.verify_identity()
def verify_identity(identity):
return True
c = Client(app(), cookiejar=CookieJar())
response = c.get('/foo', status=403)
response = c.get('/foo/log_in')
response = c.get('/foo', status=200)
assert response.body == b'Model: foo'
response = c.get('/foo/log_out')
response = c.get('/foo', status=403)
def test_default_verify_identity():
class app(morepath.App):
pass
identity = morepath.Identity('foo')
assert not app()._verify_identity(identity)
def test_verify_identity_directive():
class app(morepath.App):
pass
@app.verify_identity()
def verify_identity(identity):
return identity.password == 'right'
identity = morepath.Identity('foo', password='wrong')
assert not app()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert app()._verify_identity(identity)
def test_verify_identity_directive_app_arg():
class App(morepath.App):
pass
@App.verify_identity()
def verify_identity(app, identity):
assert isinstance(app, App)
return identity.password == 'right'
identity = morepath.Identity('foo', password='wrong')
assert not App()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert App()._verify_identity(identity)
def test_verify_identity_directive_identity_argument():
class app(morepath.App):
pass
class PlainIdentity(morepath.Identity):
pass
@app.verify_identity(identity=object)
def verify_identity(identity):
return False
@app.verify_identity(identity=PlainIdentity)
def verify_plain_identity(identity):
return identity.password == 'right'
identity = PlainIdentity('foo', password='wrong')
assert not app()._verify_identity(identity)
identity = morepath.Identity('foo', password='right')
assert not app()._verify_identity(identity)
identity = PlainIdentity('foo', password='right')
assert app()._verify_identity(identity)
def test_false_verify_identity():
class app(morepath.App):
pass
@app.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@app.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@app.view(model=Model, name='log_in')
def log_in(self, request):
response = Response()
request.app.remember_identity(
response, request,
Identity(userid='user', payload='Amazing'))
return response
@app.identity_policy()
def policy():
return DumbCookieIdentityPolicy()
@app.verify_identity()
def verify_identity(identity):
return False
c = Client(app(), cookiejar=CookieJar())
c.get('/foo', status=403)
c.get('/foo/log_in')
c.get('/foo', status=403)
def test_dispatch_verify_identity():
# This app uses two Identity classes, morepath.Identity and
# Anonymous, which are verfied in two different ways (see the two
# functions a little further down that are decorated by
# @App.verify_identity).
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Read(object):
"""Read Permission"""
class Anonymous(Identity):
def __init__(self, **kw):
super(Anonymous, self).__init__(userid=None, **kw)
@App.permission_rule(model=Model, permission=Read)
def get_permission(identity, model, permission):
return True
@App.view(model=Model, permission=Read)
def default(self, request):
if request.identity.userid == self.id:
return "Read restricted: %s" % self.id
return "Read shared: %s" % self.id
@App.identity_policy()
class HeaderIdentityPolicy(object):
def identify(self, request):
user = request.headers.get('user', None)
if user is not None:
if user == '':
return Anonymous()
return Identity(
userid=user,
password=request.headers['password'])
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
@App.verify_identity(identity=Identity)
def verify_identity(identity):
return identity.password == 'secret'
@App.verify_identity(identity=Anonymous)
def verify_anonymous(identity):
return True
c = Client(App())
r = c.get('/foo', status=403)
r = c.get('/foo', status=403, headers=dict(user='foo', password='wrong'))
r = c.get('/foo', status=403, headers=dict(user='bar', password='wrong'))
r = c.get('/foo', status=200, headers={'user': ''})
assert r.text == 'Read shared: foo'
r = c.get('/foo', status=200, headers=dict(user='foo', password='secret'))
assert r.text == 'Read restricted: foo'
r = c.get('/foo', status=200, headers=dict(user='bar', password='secret'))
assert r.text == 'Read shared: foo'
def test_settings():
class App(morepath.App):
pass
class Model(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.path(model=Model, path='test')
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "%s, your token is valid." % request.identity.userid
@App.setting_section(section="test")
def get_test_settings():
return {'encryption_key': 'secret'}
@App.identity_policy()
def get_identity_policy(settings):
test_settings = settings.test.__dict__.copy()
return IdentityPolicy(**test_settings)
class IdentityPolicy(object):
def __init__(self, encryption_key):
self.encryption_key = encryption_key
def identify(self, request):
token = self.get_token(request)
if token is None or not self.token_is_valid(
token, self.encryption_key
):
return NO_IDENTITY
return Identity('Testuser')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
def get_token(self, request):
try:
authtype, token = request.authorization
except ValueError:
return None
if authtype.lower() != 'bearer':
return None
return token
def token_is_valid(self, token, encryption_key):
return token == encryption_key # fake validation
c = Client(App())
headers = {'Authorization': 'Bearer secret'}
response = c.get('/test', headers=headers)
assert response.body == b'Testuser, your token is valid.'
def test_prevent_poisoned_host_headers():
class App(morepath.App):
pass
@App.path(path='')
class Model(object):
pass
@App.view(model=Model)
def view_model(self, request):
return 'ok'
poisoned_hosts = (
'[email protected]',
'example.com:[email protected]',
'example.com:[email protected]:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
)
legit_hosts = (
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
)
c = Client(App())
for host in legit_hosts:
response = c.get('/', headers={'Host': host})
assert response.status_code == 200
for host in poisoned_hosts:
response = c.get('/', headers={'Host': host}, expect_errors=True)
assert response.status_code == 400
def test_settings_in_permission_rule():
class App(morepath.App):
pass
@App.path(path='{id}')
class Model(object):
def __init__(self, id):
self.id = id
class Permission(object):
pass
@App.verify_identity()
def verify_identity(identity):
return True
@App.setting_section(section="permissions")
def get_roles_setting():
return {
'read': {'foo'},
}
@App.permission_rule(model=Model, permission=Permission)
def get_permission(app, identity, model, permission):
return model.id in app.settings.permissions.read
@App.view(model=Model, permission=Permission)
def default(self, request):
return "Model: %s" % self.id
@App.identity_policy()
class IdentityPolicy(object):
def identify(self, request):
return Identity('testidentity')
def remember(self, response, request, identity):
pass
def forget(self, response, request):
pass
c = Client(App())
response = c.get('/foo')
assert response.body == b'Model: foo'
response = c.get('/bar', status=403)
| taschini/morepath | morepath/tests/test_security.py | Python | bsd-3-clause | 16,352 |
#!/usr/bin/env python3
# flake8: noqa
import io
import sys
if len (sys.argv) != 5:
print ("""usage: ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt UnicodeData.txt Blocks.txt
Input file, as of Unicode 12:
* https://unicode.org/Public/UCD/latest/ucd/IndicSyllabicCategory.txt
* https://unicode.org/Public/UCD/latest/ucd/IndicPositionalCategory.txt
* https://unicode.org/Public/UCD/latest/ucd/UnicodeData.txt
* https://unicode.org/Public/UCD/latest/ucd/Blocks.txt""", file=sys.stderr)
sys.exit (1)
BLACKLISTED_BLOCKS = ["Thai", "Lao"]
files = [io.open (x, encoding='utf-8') for x in sys.argv[1:]]
headers = [[f.readline () for i in range (2)] for j,f in enumerate(files) if j != 2]
headers.append (["UnicodeData.txt does not have a header."])
data = [{} for f in files]
values = [{} for f in files]
for i, f in enumerate (files):
for line in f:
j = line.find ('#')
if j >= 0:
line = line[:j]
fields = [x.strip () for x in line.split (';')]
if len (fields) == 1:
continue
uu = fields[0].split ('..')
start = int (uu[0], 16)
if len (uu) == 1:
end = start
else:
end = int (uu[1], 16)
t = fields[1 if i != 2 else 2]
for u in range (start, end + 1):
data[i][u] = t
values[i][t] = values[i].get (t, 0) + end - start + 1
defaults = ('Other', 'Not_Applicable', 'Cn', 'No_Block')
# TODO Characters that are not in Unicode Indic files, but used in USE
data[0][0x034F] = defaults[0]
data[0][0x1B61] = defaults[0]
data[0][0x1B63] = defaults[0]
data[0][0x1B64] = defaults[0]
data[0][0x1B65] = defaults[0]
data[0][0x1B66] = defaults[0]
data[0][0x1B67] = defaults[0]
data[0][0x1B69] = defaults[0]
data[0][0x1B6A] = defaults[0]
data[0][0x2060] = defaults[0]
# TODO https://github.com/harfbuzz/harfbuzz/pull/1685
data[0][0x1B5B] = 'Consonant_Placeholder'
data[0][0x1B5C] = 'Consonant_Placeholder'
data[0][0x1B5F] = 'Consonant_Placeholder'
data[0][0x1B62] = 'Consonant_Placeholder'
data[0][0x1B68] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/issues/1035
data[0][0x11C44] = 'Consonant_Placeholder'
data[0][0x11C45] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/pull/1399
data[0][0x111C8] = 'Consonant_Placeholder'
for u in range (0xFE00, 0xFE0F + 1):
data[0][u] = defaults[0]
# Merge data into one dict:
for i,v in enumerate (defaults):
values[i][v] = values[i].get (v, 0) + 1
combined = {}
for i,d in enumerate (data):
for u,v in d.items ():
if i >= 2 and not u in combined:
continue
if not u in combined:
combined[u] = list (defaults)
combined[u][i] = v
combined = {k:v for k,v in combined.items() if v[3] not in BLACKLISTED_BLOCKS}
data = combined
del combined
num = len (data)
property_names = [
# General_Category
'Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc',
'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po',
'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs',
# Indic_Syllabic_Category
'Other',
'Bindu',
'Visarga',
'Avagraha',
'Nukta',
'Virama',
'Pure_Killer',
'Invisible_Stacker',
'Vowel_Independent',
'Vowel_Dependent',
'Vowel',
'Consonant_Placeholder',
'Consonant',
'Consonant_Dead',
'Consonant_With_Stacker',
'Consonant_Prefixed',
'Consonant_Preceding_Repha',
'Consonant_Succeeding_Repha',
'Consonant_Subjoined',
'Consonant_Medial',
'Consonant_Final',
'Consonant_Head_Letter',
'Consonant_Initial_Postfixed',
'Modifying_Letter',
'Tone_Letter',
'Tone_Mark',
'Gemination_Mark',
'Cantillation_Mark',
'Register_Shifter',
'Syllable_Modifier',
'Consonant_Killer',
'Non_Joiner',
'Joiner',
'Number_Joiner',
'Number',
'Brahmi_Joining_Number',
# Indic_Positional_Category
'Not_Applicable',
'Right',
'Left',
'Visual_Order_Left',
'Left_And_Right',
'Top',
'Bottom',
'Top_And_Bottom',
'Top_And_Right',
'Top_And_Left',
'Top_And_Left_And_Right',
'Bottom_And_Left',
'Bottom_And_Right',
'Top_And_Bottom_And_Right',
'Overstruck',
]
class PropertyValue(object):
def __init__(self, name_):
self.name = name_
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == (other if isinstance(other, str) else other.name)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(str(self))
property_values = {}
for name in property_names:
value = PropertyValue(name)
assert value not in property_values
assert value not in globals()
property_values[name] = value
globals().update(property_values)
def is_BASE(U, UISC, UGC):
return (UISC in [Number, Consonant, Consonant_Head_Letter,
#SPEC-DRAFT Consonant_Placeholder,
Tone_Letter,
Vowel_Independent #SPEC-DRAFT
] or
(UGC == Lo and UISC in [Avagraha, Bindu, Consonant_Final, Consonant_Medial,
Consonant_Subjoined, Vowel, Vowel_Dependent]))
def is_BASE_IND(U, UISC, UGC):
#SPEC-DRAFT return (UISC in [Consonant_Dead, Modifying_Letter] or UGC == Po)
return (UISC in [Consonant_Dead, Modifying_Letter] or
(UGC == Po and not U in [0x104B, 0x104E, 0x1B5B, 0x1B5C, 0x1B5F, 0x2022, 0x111C8, 0x11A3F, 0x11A45, 0x11C44, 0x11C45]) or
False # SPEC-DRAFT-OUTDATED! U == 0x002D
)
def is_BASE_NUM(U, UISC, UGC):
return UISC == Brahmi_Joining_Number
def is_BASE_OTHER(U, UISC, UGC):
if UISC == Consonant_Placeholder: return True #SPEC-DRAFT
#SPEC-DRAFT return U in [0x00A0, 0x00D7, 0x2015, 0x2022, 0x25CC, 0x25FB, 0x25FC, 0x25FD, 0x25FE]
return U in [0x2015, 0x2022, 0x25FB, 0x25FC, 0x25FD, 0x25FE]
def is_CGJ(U, UISC, UGC):
return U == 0x034F
def is_CONS_FINAL(U, UISC, UGC):
return ((UISC == Consonant_Final and UGC != Lo) or
UISC == Consonant_Succeeding_Repha)
def is_CONS_FINAL_MOD(U, UISC, UGC):
#SPEC-DRAFT return UISC in [Consonant_Final_Modifier, Syllable_Modifier]
return UISC == Syllable_Modifier
def is_CONS_MED(U, UISC, UGC):
# Consonant_Initial_Postfixed is new in Unicode 11; not in the spec.
return (UISC == Consonant_Medial and UGC != Lo or
UISC == Consonant_Initial_Postfixed)
def is_CONS_MOD(U, UISC, UGC):
return UISC in [Nukta, Gemination_Mark, Consonant_Killer]
def is_CONS_SUB(U, UISC, UGC):
#SPEC-DRAFT return UISC == Consonant_Subjoined
return UISC == Consonant_Subjoined and UGC != Lo
def is_CONS_WITH_STACKER(U, UISC, UGC):
return UISC == Consonant_With_Stacker
def is_HALANT(U, UISC, UGC):
return (UISC in [Virama, Invisible_Stacker]
and not is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC)
and not is_SAKOT(U, UISC, UGC))
def is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/1102
# https://github.com/harfbuzz/harfbuzz/issues/1379
return U in [0x11046, 0x1134D]
def is_HALANT_NUM(U, UISC, UGC):
return UISC == Number_Joiner
def is_ZWNJ(U, UISC, UGC):
return UISC == Non_Joiner
def is_ZWJ(U, UISC, UGC):
return UISC == Joiner
def is_Word_Joiner(U, UISC, UGC):
return U == 0x2060
def is_OTHER(U, UISC, UGC):
#SPEC-OUTDATED return UGC == Zs # or any other SCRIPT_COMMON characters
return (UISC == Other
and not is_SYM(U, UISC, UGC)
and not is_SYM_MOD(U, UISC, UGC)
and not is_CGJ(U, UISC, UGC)
and not is_Word_Joiner(U, UISC, UGC)
and not is_VARIATION_SELECTOR(U, UISC, UGC)
)
def is_Reserved(U, UISC, UGC):
return UGC == 'Cn'
def is_REPHA(U, UISC, UGC):
return UISC in [Consonant_Preceding_Repha, Consonant_Prefixed]
def is_SAKOT(U, UISC, UGC):
return U == 0x1A60
def is_SYM(U, UISC, UGC):
if U == 0x25CC: return False #SPEC-DRAFT
#SPEC-DRAFT return UGC in [So, Sc] or UISC == Symbol_Letter
return UGC in [So, Sc] and U not in [0x1B62, 0x1B68]
def is_SYM_MOD(U, UISC, UGC):
return U in [0x1B6B, 0x1B6C, 0x1B6D, 0x1B6E, 0x1B6F, 0x1B70, 0x1B71, 0x1B72, 0x1B73]
def is_VARIATION_SELECTOR(U, UISC, UGC):
return 0xFE00 <= U <= 0xFE0F
def is_VOWEL(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC == Pure_Killer or
(UGC != Lo and UISC in [Vowel, Vowel_Dependent] and U not in [0xAA29]))
def is_VOWEL_MOD(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC in [Tone_Mark, Cantillation_Mark, Register_Shifter, Visarga] or
(UGC != Lo and (UISC == Bindu or U in [0xAA29])))
use_mapping = {
'B': is_BASE,
'IND': is_BASE_IND,
'N': is_BASE_NUM,
'GB': is_BASE_OTHER,
'CGJ': is_CGJ,
'F': is_CONS_FINAL,
'FM': is_CONS_FINAL_MOD,
'M': is_CONS_MED,
'CM': is_CONS_MOD,
'SUB': is_CONS_SUB,
'CS': is_CONS_WITH_STACKER,
'H': is_HALANT,
'HVM': is_HALANT_OR_VOWEL_MODIFIER,
'HN': is_HALANT_NUM,
'ZWNJ': is_ZWNJ,
'ZWJ': is_ZWJ,
'WJ': is_Word_Joiner,
'O': is_OTHER,
'Rsv': is_Reserved,
'R': is_REPHA,
'S': is_SYM,
'Sk': is_SAKOT,
'SM': is_SYM_MOD,
'VS': is_VARIATION_SELECTOR,
'V': is_VOWEL,
'VM': is_VOWEL_MOD,
}
use_positions = {
'F': {
'Abv': [Top],
'Blw': [Bottom],
'Pst': [Right],
},
'M': {
'Abv': [Top],
'Blw': [Bottom, Bottom_And_Left],
'Pst': [Right],
'Pre': [Left],
},
'CM': {
'Abv': [Top],
'Blw': [Bottom],
},
'V': {
'Abv': [Top, Top_And_Bottom, Top_And_Bottom_And_Right, Top_And_Right],
'Blw': [Bottom, Overstruck, Bottom_And_Right],
'Pst': [Right, Top_And_Left, Top_And_Left_And_Right, Left_And_Right],
'Pre': [Left],
},
'VM': {
'Abv': [Top],
'Blw': [Bottom, Overstruck],
'Pst': [Right],
'Pre': [Left],
},
'SM': {
'Abv': [Top],
'Blw': [Bottom],
},
'H': None,
'HVM': None,
'B': None,
'FM': {
'Abv': [Top],
'Blw': [Bottom],
'Pst': [Not_Applicable],
},
'SUB': None,
}
def map_to_use(data):
out = {}
items = use_mapping.items()
for U,(UISC,UIPC,UGC,UBlock) in data.items():
# Resolve Indic_Syllabic_Category
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x1CE2 <= U <= 0x1CE8: UISC = Cantillation_Mark
# Tibetan:
# TODO: These don't have UISC assigned in Unicode 12.0, but have UIPC
if 0x0F18 <= U <= 0x0F19 or 0x0F3E <= U <= 0x0F3F: UISC = Vowel_Dependent
if 0x0F86 <= U <= 0x0F87: UISC = Tone_Mark
# Overrides to allow NFC order matching syllable
# https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Tibetan' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
# TODO: https://github.com/harfbuzz/harfbuzz/pull/982
# also https://github.com/harfbuzz/harfbuzz/issues/1012
if UBlock == 'Chakma' and is_VOWEL (U, UISC, UGC):
if UIPC == Top:
UIPC = Bottom
elif UIPC == Bottom:
UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/627
if 0x1BF2 <= U <= 0x1BF3: UISC = Nukta; UIPC = Bottom
# TODO: U+1CED should only be allowed after some of
# the nasalization marks, maybe only for U+1CE9..U+1CF1.
if U == 0x1CED: UISC = Tone_Mark
# TODO: https://github.com/harfbuzz/harfbuzz/issues/1105
if U == 0x11134: UISC = Gemination_Mark
values = [k for k,v in items if v(U,UISC,UGC)]
assert len(values) == 1, "%s %s %s %s" % (hex(U), UISC, UGC, values)
USE = values[0]
# Resolve Indic_Positional_Category
# TODO: These should die, but have UIPC in Unicode 12.0
if U in [0x953, 0x954]: UIPC = Not_Applicable
# TODO: In USE's override list but not in Unicode 12.0
if U == 0x103C: UIPC = Left
# TODO: https://github.com/harfbuzz/harfbuzz/pull/2012
if U == 0x1C29: UIPC = Left
# TODO: These are not in USE's override list that we have, nor are they in Unicode 12.0
if 0xA926 <= U <= 0xA92A: UIPC = Top
# TODO: https://github.com/harfbuzz/harfbuzz/pull/1037
# and https://github.com/harfbuzz/harfbuzz/issues/1631
if U in [0x11302, 0x11303, 0x114C1]: UIPC = Top
if U == 0x1171E: UIPC = Left
if 0x1CF8 <= U <= 0x1CF9: UIPC = Top
assert (UIPC in [Not_Applicable, Visual_Order_Left] or
USE in use_positions), "%s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC)
pos_mapping = use_positions.get(USE, None)
if pos_mapping:
values = [k for k,v in pos_mapping.items() if v and UIPC in v]
assert len(values) == 1, "%s %s %s %s %s %s" % (hex(U), UIPC, USE, UISC, UGC, values)
USE = USE + values[0]
out[U] = (USE, UBlock)
return out
defaults = ('O', 'No_Block')
data = map_to_use(data)
print ("/* == Start of generated table == */")
print ("/*")
print (" * The following table is generated by running:")
print (" *")
print (" * ./gen-use-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt UnicodeData.txt Blocks.txt")
print (" *")
print (" * on files with these headers:")
print (" *")
for h in headers:
for l in h:
print (" * %s" % (l.strip()))
print (" */")
print ()
print ('#include "hb.hh"')
print ()
print ('#ifndef HB_NO_OT_SHAPE')
print ()
print ('#include "hb-ot-shape-complex-use.hh"')
print ()
total = 0
used = 0
last_block = None
def print_block (block, start, end, data):
global total, used, last_block
if block and block != last_block:
print ()
print ()
print (" /* %s */" % block)
if start % 16:
print (' ' * (20 + (start % 16 * 6)), end='')
num = 0
assert start % 8 == 0
assert (end+1) % 8 == 0
for u in range (start, end+1):
if u % 16 == 0:
print ()
print (" /* %04X */" % u, end='')
if u in data:
num += 1
d = data.get (u, defaults)
print ("%6s," % d[0], end='')
total += end - start + 1
used += num
if block:
last_block = block
uu = sorted (data.keys ())
last = -100000
num = 0
offset = 0
starts = []
ends = []
print ('#pragma GCC diagnostic push')
print ('#pragma GCC diagnostic ignored "-Wunused-macros"')
for k,v in sorted(use_mapping.items()):
if k in use_positions and use_positions[k]: continue
print ("#define %s USE_%s /* %s */" % (k, k, v.__name__[3:]))
for k,v in sorted(use_positions.items()):
if not v: continue
for suf in v.keys():
tag = k + suf
print ("#define %s USE_%s" % (tag, tag))
print ('#pragma GCC diagnostic pop')
print ("")
print ("static const USE_TABLE_ELEMENT_TYPE use_table[] = {")
for u in uu:
if u <= last:
continue
block = data[u][1]
start = u//8*8
end = start+1
while end in uu and block == data[end][1]:
end += 1
end = (end-1)//8*8 + 7
if start != last + 1:
if start - last <= 1+16*3:
print_block (None, last+1, start-1, data)
last = start-1
else:
if last >= 0:
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print ()
print ()
print ("#define use_offset_0x%04xu %d" % (start, offset))
starts.append (start)
print_block (block, start, end, data)
last = end
ends.append (last + 1)
offset += ends[-1] - starts[-1]
print ()
print ()
occupancy = used * 100. / total
page_bits = 12
print ("}; /* Table items: %d; occupancy: %d%% */" % (offset, occupancy))
print ()
print ("USE_TABLE_ELEMENT_TYPE")
print ("hb_use_get_category (hb_codepoint_t u)")
print ("{")
print (" switch (u >> %d)" % page_bits)
print (" {")
pages = set([u>>page_bits for u in starts+ends])
for p in sorted(pages):
print (" case 0x%0Xu:" % p)
for (start,end) in zip (starts, ends):
if p not in [start>>page_bits, end>>page_bits]: continue
offset = "use_offset_0x%04xu" % start
print (" if (hb_in_range<hb_codepoint_t> (u, 0x%04Xu, 0x%04Xu)) return use_table[u - 0x%04Xu + %s];" % (start, end-1, start, offset))
print (" break;")
print ("")
print (" default:")
print (" break;")
print (" }")
print (" return USE_O;")
print ("}")
print ()
for k in sorted(use_mapping.keys()):
if k in use_positions and use_positions[k]: continue
print ("#undef %s" % k)
for k,v in sorted(use_positions.items()):
if not v: continue
for suf in v.keys():
tag = k + suf
print ("#undef %s" % tag)
print ()
print ()
print ('#endif')
print ("/* == End of generated table == */")
# Maintain at least 50% occupancy in the table */
if occupancy < 50:
raise Exception ("Table too sparse, please investigate: ", occupancy)
| endlessm/chromium-browser | third_party/harfbuzz-ng/src/src/gen-use-table.py | Python | bsd-3-clause | 15,523 |
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
from django.utils import six
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
# TODO: Raise helpful exceptions as they become known.
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, six.string_types),
'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float) + six.integer_types
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian' : PostGISDistance(prefix, operator),
'sphere' : PostGISSphereDistance(prefix, operator),
'spheroid' : PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
'dwithin' : (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
}
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby': self.geometry_functions['coveredby'],
'covers': self.geometry_functions['covers'],
'intersects': self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps': PostGISOperator('&&'),
}
# Native geometry type support added in PostGIS 2.0.
if version >= (2, 0, 0):
self.geometry = True
# Creating a dictionary lookup of all GIS terms for PostGIS.
self.gis_terms = set(['isnull'])
self.gis_terms.update(self.geometry_operators)
self.gis_terms.update(self.geometry_functions)
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
if version >= (2, 0, 0):
self.extent3d = prefix + '3DExtent'
self.length3d = prefix + '3DLength'
self.perimeter3d = prefix + '3DPerimeter'
else:
self.extent3d = prefix + 'Extent3D'
self.length3d = prefix + 'Length3D'
self.perimeter3d = prefix + 'Perimeter3D'
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
elif self.geometry:
# Postgis 2.0 supports type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % self.get_expression_column(value)
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np): return np == 2
def two_to_three(np): return np >= 2 and np <=3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only available from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, ('' if value else 'NOT ')), []
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union':
agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.postgis.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
return SpatialRefSys
| postrational/django | django/contrib/gis/db/backends/postgis/operations.py | Python | bsd-3-clause | 25,538 |
# -*- coding: utf-8 -*-
import os
import unittest
from manolo_scraper.spiders.mincu import MincuSpider
from utils import fake_response_from_file
class TestMincuSpider(unittest.TestCase):
def setUp(self):
self.spider = MincuSpider()
def test_parse_item(self):
filename = os.path.join('data/mincu', '18-08-2015.html')
items = self.spider.parse(fake_response_from_file(filename, meta={'date': u'18/08/2015'}))
item = next(items)
self.assertEqual(item.get('full_name'), u'INGRID BARRIONUEVO ECHEGARAY')
self.assertEqual(item.get('time_start'), u'16:40')
self.assertEqual(item.get('institution'), u'mincu')
self.assertEqual(item.get('id_document'), u'DNI')
self.assertEqual(item.get('id_number'), u'10085172')
self.assertEqual(item.get('entity'), u'PARTICULAR')
self.assertEqual(item.get('reason'), u'REUNIÓN DE TRABAJO')
self.assertEqual(item.get('host_name'), u'JOIZ ELIZABETH DOBLADILLO ORTIZ')
self.assertEqual(item.get('title'), u'[SERVICIOS DE UN ASISTENTE EN COMUNICACIONES]')
self.assertEqual(item.get('office'), u'QHAPAQ ÑAN')
self.assertEqual(item.get('time_end'), u'16:53')
self.assertEqual(item.get('date'), u'2015-08-18')
number_of_items = 1 + sum(1 for x in items)
self.assertEqual(number_of_items, 15)
| aniversarioperu/django-manolo | scrapers/tests/test_mincu_spider.py | Python | bsd-3-clause | 1,379 |
from flask import Flask
from flask.ext.restful import reqparse, abort, Api, Resource
app = Flask(__name__)
api = Api(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'task': 'profit!'},
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task', type=str)
# Todo
# show a single todo item and lets you delete them
class Todo(Resource):
def get(self, todo_id):
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
def delete(self, todo_id):
abort_if_todo_doesnt_exist(todo_id)
del TODOS[todo_id]
return '', 204
def put(self, todo_id):
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
return TODOS
def post(self):
args = parser.parse_args()
todo_id = 'todo%d' % (len(TODOS) + 1)
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
##
## Actually setup the Api resource routing here
##
api.add_resource(TodoList, '/todos')
api.add_resource(Todo, '/todos/<string:todo_id>')
if __name__ == '__main__':
app.run(debug=True)
| CanalTP/flask-restful | examples/todo.py | Python | bsd-3-clause | 1,446 |
from media_tree.contrib.cms_plugins.media_tree_image.models import MediaTreeImage
from media_tree.contrib.cms_plugins.helpers import PluginLink
from media_tree.models import FileNode
from media_tree.contrib.views.detail.image import ImageNodeDetailView
from django.utils.translation import ugettext_lazy as _
from cms.utils.page_resolver import get_page_from_path
from django.http import Http404
class ImagePluginDetailView(ImageNodeDetailView):
return_url = None
def get_object(self, *args, **kwargs):
obj = super(ImagePluginDetailView, self).get_object(*args, **kwargs)
if obj:
allowed = False
# validate that the object is actually published using the plugin...
for plugin in MediaTreeImage.objects.filter(node=obj):
# ...and on a publicly accessible page.
# TODO: Iterating all plugins and getting each page
# is a bit inefficient.
page = get_page_from_path(plugin.page.get_path())
if page:
allowed = True
break
if not allowed:
raise Http404
return obj
def get_context_data(self, *args, **kwargs):
context_data = super(ImagePluginDetailView, self).get_context_data(
*args, **kwargs)
if self.return_url:
page = get_page_from_path(self.return_url.strip('/'))
if page:
context_data.update({
'link': PluginLink(url=page.get_absolute_url(),
text=_('Back to %s') % page.get_title())
})
return context_data
def get(self, request, *args, **kwargs):
self.return_url = request.GET.get('return_url', None)
return super(ImagePluginDetailView, self).get(request, *args, **kwargs) | bittner/django-media-tree | media_tree/contrib/cms_plugins/media_tree_image/views.py | Python | bsd-3-clause | 1,854 |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run all python tests in this directory."""
import sys
import unittest
MODULES = [
'directory_storage_test',
'gsd_storage_test',
'hashing_tools_test',
'local_storage_cache_test',
]
# We use absolute imports for Py3 compatibility.
# This means for imports to resolve when testing we need to add the pynacl
# directory to the module search path.
sys.path.insert(0, './')
suite = unittest.TestLoader().loadTestsFromNames(MODULES)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
| endlessm/chromium-browser | native_client/pynacl/run_pynacl_tests.py | Python | bsd-3-clause | 757 |
#!/usr/bin/env python
import logging
# CUSTOM LOG LEVELS
LOG_LEVEL_TOOL = 25
# Terminal colors
TERMINAL_COLOR_BLUE = '\033[94m'
TERMINAL_COLOR_GREEN = '\033[92m'
TERMINAL_COLOR_YELLOW = '\033[93m'
TERMINAL_COLOR_RED = '\033[91m'
TERMINAL_COLOR_END = '\033[0m'
class ConsoleFormatter(logging.Formatter):
"""
Custom formatter to show logging messages differently on Console
"""
error_fmt = TERMINAL_COLOR_RED + "[!] %(message)s" + TERMINAL_COLOR_END
warn_fmt = TERMINAL_COLOR_YELLOW + "[*] %(message)s" + TERMINAL_COLOR_END
debug_fmt = TERMINAL_COLOR_GREEN + "[+] %(message)s" + TERMINAL_COLOR_END
info_fmt = TERMINAL_COLOR_BLUE + "[-] %(message)s" + TERMINAL_COLOR_END
def format(self, record):
# Save the original format configured by the user
# when the logger formatter was instantiated
format_orig = self._fmt
# Replace the original format with one customized by logging level
if record.levelno == logging.DEBUG:
self._fmt = self.debug_fmt
elif record.levelno == logging.INFO:
self._fmt = self.info_fmt
elif record.levelno == logging.ERROR:
self._fmt = self.error_fmt
elif record.levelno == logging.WARN:
self._fmt = self.warn_fmt
# Call the original formatter class to do the grunt work
result = super(ConsoleFormatter, self).format(record)
# Restore the original format configured by the user
self._fmt = format_orig
return result
class FileFormatter(logging.Formatter):
"""
Custom formatter for log files
"""
def __init__(self, *args, **kwargs):
super(FileFormatter, self).__init__()
self._fmt = "[%(levelname)s] [%(asctime)s] " + "[File '%(filename)s', line %(lineno)s, in %(funcName)s] -" + \
" %(message)s"
| DarKnight24/owtf | framework/lib/formatters.py | Python | bsd-3-clause | 1,857 |
# -*- coding: utf-8 -*-
"""
website.api
~~~~~~~~~~~
website api blueprint.
"""
| alibaba/FlexGW | website/api/__init__.py | Python | bsd-3-clause | 92 |
from cmsplugin_cascade.segmentation.mixins import EmulateUserModelMixin, EmulateUserAdminMixin
from shop.admin.customer import CustomerProxy
class EmulateCustomerModelMixin(EmulateUserModelMixin):
UserModel = CustomerProxy
class EmulateCustomerAdminMixin(EmulateUserAdminMixin):
UserModel = CustomerProxy
| divio/django-shop | shop/cascade/segmentation.py | Python | bsd-3-clause | 317 |
# Copyright 2010-2011, Sikuli.org
# Released under the MIT License.
from org.sikuli.script import VDictProxy
import java.io.File
##
# VDict implements a visual dictionary that has Python's conventional dict
# interfaces.
#
# A visual dictionary is a data type for storing key-value pairs using
# images as keys. Using a visual dictionary, a user can easily automate
# the tasks of saving and retrieving arbitrary data objects by images.
# The syntax of the visual dictionary data type is modeled after that of
# the built-in Python dictionary data type.
class VDict(VDictProxy):
##
# the default similarity for fuzzy matching. The range of this is from
# 0 to 1.0, where 0 matches everything and 1.0 does exactly matching.
# <br/>
# The default similarity is 0.7.
_DEFAULT_SIMILARITY = 0.7
_DEFAULT_GET_ITEM_N = 0
##
# Constructs a new visual dictionary with the same mapping as the given dict.
#
def __init__(self, dict=None):
self._keys = {}
if dict:
for k in dict.keys():
self[k] = dict[k]
##
# Returns the number of keys in this visual dictionary.
#
def __len__(self):
return self.size()
##
# Maps the specified key to the specified item in this visual dictionary.
#
def __setitem__(self, key, item):
self.insert(key, item)
self._keys[key] = item
##
# Tests if the specified object looks like a key in this visual dictionary
# with the default similarity.
#
def __contains__(self, key):
return len(self.get(key)) > 0
##
# Returns all values to which the specified key is fuzzily matched in
# this visual dictionary with the default similarity.
# <br/>
# This is a wrapper for the {@link #VDict.get get} method.
def __getitem__(self, key):
return self.get(key)
##
# Deletes the key and its corresponding value from this visual dictionary.
#
def __delitem__(self, key):
self.erase(key)
del self._keys[key]
##
# Returns a list of the keys in this visual dictionary.
#
def keys(self):
return self._keys.keys()
##
# Returns the value to which the specified key is exactly matched in
# this visual dictionary.
#
def get_exact(self, key):
if key == None: return None
return self.lookup(key)
##
# Returns the values to which the specified key is fuzzily matched in
# this visual dictionary with the given similarity and the given maximum
# number of return items.
# @param similarity the similarity for matching.
# @param n maximum number of return items.
#
def get(self, key, similarity=_DEFAULT_SIMILARITY, n=_DEFAULT_GET_ITEM_N):
if key == None: return None
return self.lookup_similar_n(key, similarity, n)
##
# Returns the value to which the specified key is best matched in
# this visual dictionary with the given similarity.
# @param similarity the similarity for matching.
#
def get1(self, key, similarity=_DEFAULT_SIMILARITY):
if key == None: return None
return self.lookup_similar(key, similarity)
| ck1125/sikuli | sikuli-script/src/main/python/sikuli/VDict.py | Python | mit | 3,120 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Customize Form is a Single DocType used to mask the Property Setter
Thus providing a better UI from user perspective
"""
import webnotes
from webnotes.utils import cstr
class DocType:
def __init__(self, doc, doclist=[]):
self.doc, self.doclist = doc, doclist
self.doctype_properties = [
'search_fields',
'default_print_format',
'read_only_onload',
'allow_print',
'allow_email',
'allow_copy',
'allow_attach',
'max_attachments'
]
self.docfield_properties = [
'idx',
'label',
'fieldtype',
'fieldname',
'options',
'permlevel',
'width',
'print_width',
'reqd',
'in_filter',
'in_list_view',
'hidden',
'print_hide',
'report_hide',
'allow_on_submit',
'depends_on',
'description',
'default',
'name'
]
self.property_restrictions = {
'fieldtype': [['Currency', 'Float'], ['Small Text', 'Data'], ['Text', 'Text Editor', 'Code']],
}
self.forbidden_properties = ['idx']
def get(self):
"""
Gets DocFields applied with Property Setter customizations via Customize Form Field
"""
self.clear()
if self.doc.doc_type:
from webnotes.model.doc import addchild
for d in self.get_ref_doclist():
if d.doctype=='DocField':
new = addchild(self.doc, 'fields', 'Customize Form Field',
self.doclist)
self.set(
{
'list': self.docfield_properties,
'doc' : d,
'doc_to_set': new
}
)
elif d.doctype=='DocType':
self.set({ 'list': self.doctype_properties, 'doc': d })
def get_ref_doclist(self):
"""
* Gets doclist of type self.doc.doc_type
* Applies property setter properties on the doclist
* returns the modified doclist
"""
from webnotes.model.doctype import get
ref_doclist = get(self.doc.doc_type)
ref_doclist = webnotes.doclist([ref_doclist[0]]
+ ref_doclist.get({"parent": self.doc.doc_type}))
return ref_doclist
def clear(self):
"""
Clear fields in the doc
"""
# Clear table before adding new doctype's fields
self.doclist = self.doc.clear_table(self.doclist, 'fields')
self.set({ 'list': self.doctype_properties, 'value': None })
def set(self, args):
"""
Set a list of attributes of a doc to a value
or to attribute values of a doc passed
args can contain:
* list --> list of attributes to set
* doc_to_set --> defaults to self.doc
* value --> to set all attributes to one value eg. None
* doc --> copy attributes from doc to doc_to_set
"""
if not 'doc_to_set' in args:
args['doc_to_set'] = self.doc
if 'list' in args:
if 'value' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = None
elif 'doc' in args:
for f in args['list']:
args['doc_to_set'].fields[f] = args['doc'].fields.get(f)
else:
webnotes.msgprint("Please specify args['list'] to set", raise_exception=1)
def post(self):
"""
Save diff between Customize Form Bean and DocType Bean as property setter entries
"""
if self.doc.doc_type:
from webnotes.model import doc
from core.doctype.doctype.doctype import validate_fields_for_doctype
this_doclist = webnotes.doclist([self.doc] + self.doclist)
ref_doclist = self.get_ref_doclist()
dt_doclist = doc.get('DocType', self.doc.doc_type)
# get a list of property setter docs
diff_list = self.diff(this_doclist, ref_doclist, dt_doclist)
self.set_properties(diff_list)
validate_fields_for_doctype(self.doc.doc_type)
webnotes.clear_cache(doctype=self.doc.doc_type)
webnotes.msgprint("Updated")
def diff(self, new_dl, ref_dl, dt_dl):
"""
Get difference between new_dl doclist and ref_dl doclist
then check how it differs from dt_dl i.e. default doclist
"""
import re
self.defaults = self.get_defaults()
diff_list = []
for new_d in new_dl:
for ref_d in ref_dl:
if ref_d.doctype == 'DocField' and new_d.name == ref_d.name:
for prop in self.docfield_properties:
# do not set forbidden properties like idx
if prop in self.forbidden_properties: continue
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
elif ref_d.doctype == 'DocType' and new_d.doctype == 'Customize Form':
for prop in self.doctype_properties:
d = self.prepare_to_set(prop, new_d, ref_d, dt_dl)
if d: diff_list.append(d)
break
return diff_list
def get_defaults(self):
"""
Get fieldtype and default value for properties of a field
"""
df_defaults = webnotes.conn.sql("""
SELECT fieldname, fieldtype, `default`, label
FROM `tabDocField`
WHERE parent='DocField' or parent='DocType'""", as_dict=1)
defaults = {}
for d in df_defaults:
defaults[d['fieldname']] = d
defaults['idx'] = {'fieldname' : 'idx', 'fieldtype' : 'Int', 'default' : 1, 'label' : 'idx'}
defaults['previous_field'] = {'fieldname' : 'previous_field', 'fieldtype' : 'Data', 'default' : None, 'label' : 'Previous Field'}
return defaults
def prepare_to_set(self, prop, new_d, ref_d, dt_doclist, delete=0):
"""
Prepares docs of property setter
sets delete property if it is required to be deleted
"""
# Check if property has changed compared to when it was loaded
if new_d.fields.get(prop) != ref_d.fields.get(prop) \
and not \
( \
new_d.fields.get(prop) in [None, 0] \
and ref_d.fields.get(prop) in [None, 0] \
) and not \
( \
new_d.fields.get(prop) in [None, ''] \
and ref_d.fields.get(prop) in [None, ''] \
):
#webnotes.msgprint("new: " + str(new_d.fields[prop]) + " | old: " + str(ref_d.fields[prop]))
# Check if the new property is same as that in original doctype
# If yes, we need to delete the property setter entry
for dt_d in dt_doclist:
if dt_d.name == ref_d.name \
and (new_d.fields.get(prop) == dt_d.fields.get(prop) \
or \
( \
new_d.fields.get(prop) in [None, 0] \
and dt_d.fields.get(prop) in [None, 0] \
) or \
( \
new_d.fields.get(prop) in [None, ''] \
and dt_d.fields.get(prop) in [None, ''] \
)):
delete = 1
break
value = new_d.fields.get(prop)
if prop in self.property_restrictions:
allow_change = False
for restrict_list in self.property_restrictions.get(prop):
if value in restrict_list and \
ref_d.fields.get(prop) in restrict_list:
allow_change = True
break
if not allow_change:
webnotes.msgprint("""\
You cannot change '%s' of '%s' from '%s' to '%s'.
%s can only be changed among %s.
<i>Ignoring this change and saving.</i>""" % \
(self.defaults.get(prop, {}).get("label") or prop,
new_d.fields.get("label") or new_d.fields.get("idx"),
ref_d.fields.get(prop), value,
self.defaults.get(prop, {}).get("label") or prop,
" -or- ".join([", ".join(r) for r in \
self.property_restrictions.get(prop)])))
return None
# If the above conditions are fulfilled,
# create a property setter doc, but dont save it yet.
from webnotes.model.doc import Document
d = Document('Property Setter')
d.doctype_or_field = ref_d.doctype=='DocField' and 'DocField' or 'DocType'
d.doc_type = self.doc.doc_type
d.field_name = ref_d.fieldname
d.property = prop
d.value = value
d.property_type = self.defaults[prop]['fieldtype']
#d.default_value = self.defaults[prop]['default']
if delete: d.delete = 1
if d.select_item:
d.select_item = self.remove_forbidden(d.select_item)
# return the property setter doc
return d
else: return None
def set_properties(self, ps_doclist):
"""
* Delete a property setter entry
+ if it already exists
+ if marked for deletion
* Save the property setter doc in the list
"""
for d in ps_doclist:
# Delete existing property setter entry
if not d.fields.get("field_name"):
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %(doc_type)s
AND property = %(property)s""", d.fields)
else:
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %(doc_type)s
AND field_name = %(field_name)s
AND property = %(property)s""", d.fields)
# Save the property setter doc if not marked for deletion i.e. delete=0
if not d.delete:
d.save(1)
def delete(self):
"""
Deletes all property setter entries for the selected doctype
and resets it to standard
"""
if self.doc.doc_type:
webnotes.conn.sql("""
DELETE FROM `tabProperty Setter`
WHERE doc_type = %s""", self.doc.doc_type)
webnotes.clear_cache(doctype=self.doc.doc_type)
self.get()
def remove_forbidden(self, string):
"""
Replace forbidden characters with a space
"""
forbidden = ['%', "'", '"', '#', '*', '?', '`']
for f in forbidden:
string.replace(f, ' ')
| gangadhar-kadam/sapphite_lib | core/doctype/customize_form/customize_form.py | Python | mit | 8,959 |
from __future__ import absolute_import, unicode_literals
import email
import logging
from email.utils import formataddr
from collections import defaultdict
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.template.loader import render_to_string
from django.utils import six, timezone
from django.utils.datastructures import MultiValueDict
from django.utils.six.moves.urllib.parse import urljoin
from djblets.mail.message import EmailMessage as DjbletsEmailMessage
from djblets.siteconfig.models import SiteConfiguration
from djblets.auth.signals import user_registered
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.admin.server import get_server_url
from reviewboard.reviews.models import Group, ReviewRequest, Review
from reviewboard.reviews.signals import (review_request_published,
review_published, reply_published,
review_request_closed)
from reviewboard.reviews.views import build_diff_comment_fragments
# A mapping of signals to EmailHooks.
_hooks = defaultdict(set)
def _ensure_unicode(text):
"""Return a unicode object for the given text.
Args:
text (bytes or unicode):
The text to decode.
Returns:
unicode: The decoded text.
"""
if isinstance(text, bytes):
text = text.decode('utf-8')
return text
def register_email_hook(signal, handler):
"""Register an e-mail hook.
Args:
signal (django.dispatch.Signal):
The signal that will trigger the e-mail to be sent. This is one of
:py:data:`~reviewboard.reviews.signals.review_request_published`,
:py:data:`~reviewboard.reviews.signals.review_request_closed`,
:py:data:`~reviewboard.reviews.signals.review_published`, or
:py:data:`~reviewboard.reviews.signals.reply_published`.
handler (reviewboard.extensions.hooks.EmailHook):
The ``EmailHook`` that will be triggered when an e-mail of the
chosen type is about to be sent.
"""
assert signal in (review_request_published, review_request_closed,
review_published, reply_published), (
'Invalid signal %r' % signal)
_hooks[signal].add(handler)
def unregister_email_hook(signal, handler):
"""Unregister an e-mail hook.
Args:
signal (django.dispatch.Signal):
The signal that will trigger the e-mail to be sent. This is one of
:py:data:`~reviewboard.reviews.signals.review_request_published`,
:py:data:`~reviewboard.reviews.signals.review_request_closed`,
:py:data:`~reviewboard.reviews.signals.review_published`, or
:py:data:`~reviewboard.reviews.signals.reply_published`.
handler (reviewboard.extensions.hooks.EmailHook):
The ``EmailHook`` that will be triggered when an e-mail of the
chosen type is about to be sent.
"""
assert signal in (review_request_published, review_request_closed,
review_published, reply_published), (
'Invalid signal %r' % signal)
_hooks[signal].discard(handler)
def review_request_closed_cb(sender, user, review_request, type, **kwargs):
"""Send e-mail when a review request is closed.
Listens to the
:py:data:`~reviewboard.reviews.signals.review_request_closed` signal and
sends an e-mail if this type of notification is enabled (through the
``mail_send_review_close_mail`` site configuration setting).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_review_close_mail'):
mail_review_request(review_request, user, close_type=type)
def review_request_published_cb(sender, user, review_request, trivial,
changedesc, **kwargs):
"""Send e-mail when a review request is published.
Listens to the
:py:data:`~reviewboard.reviews.signals.review_request_published` signal and
sends an e-mail if this type of notification is enabled through the
``mail_send_review_mail`` site configuration setting).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_review_mail') and not trivial:
mail_review_request(review_request, user, changedesc)
def review_published_cb(sender, user, review, to_submitter_only, **kwargs):
"""Send e-mail when a review is published.
Listens to the :py:data:`~reviewboard.reviews.signals.review_published`
signal and sends e-mail if this type of notification is enabled through the
``mail_send_review_mail`` site configuration setting).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_review_mail'):
mail_review(review, user, to_submitter_only)
def reply_published_cb(sender, user, reply, trivial, **kwargs):
"""Send e-mail when a review reply is published.
Listens to the :py:data:`~reviewboard.reviews.signals.reply_published`
signal and sends an e-mail if this type of notification is enabled (through
``mail_send_review_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_review_mail') and not trivial:
mail_reply(reply, user)
def user_registered_cb(user, **kwargs):
"""Send e-mail when a user is registered.
Listens for new user registrations and sends a new user registration
e-mail to administrators, if this type of notification is enabled (through
``mail_send_new_user_mail`` site configuration).
"""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('mail_send_new_user_mail'):
mail_new_user(user)
def connect_signals():
"""Connect e-mail callbacks to signals."""
review_request_published.connect(review_request_published_cb,
sender=ReviewRequest)
review_published.connect(review_published_cb, sender=Review)
reply_published.connect(reply_published_cb, sender=Review)
review_request_closed.connect(review_request_closed_cb,
sender=ReviewRequest)
user_registered.connect(user_registered_cb)
def build_email_address(fullname, email):
"""Build an e-mail address for the name and e-mail address.
Args:
fullname (unicode):
The full name associated with the e-mail address (or ``None``).
email (unicode):
The e-mail address.
Returns:
unicode: A properly formatted e-mail address.
"""
return formataddr((fullname, email))
def get_email_address_for_user(user):
"""Build an e-mail address for the given user.
Args:
user (django.contrib.auth.models.User):
The user.
Returns:
unicode: A properly formatted e-mail address for the user.
"""
return build_email_address(user.get_full_name(), user.email)
def get_email_addresses_for_group(group, review_request_id=None):
"""Build a list of e-mail addresses for the group.
Args:
group (reviewboard.reviews.models.Group):
The review group to build the e-mail addresses for.
Returns:
list: A list of properly formatted e-mail addresses for all users in
the review group.
"""
addresses = []
if group.mailing_list:
if ',' not in group.mailing_list:
# The mailing list field has only one e-mail address in it,
# so we can just use that and the group's display name.
addresses = [build_email_address(group.display_name,
group.mailing_list)]
else:
# The mailing list field has multiple e-mail addresses in it.
# We don't know which one should have the group's display name
# attached to it, so just return their custom list as-is.
addresses = group.mailing_list.split(',')
if not (group.mailing_list and group.email_list_only):
users_q = Q(is_active=True)
local_site = group.local_site
if local_site:
users_q = users_q & (Q(local_site=local_site) |
Q(local_site_admins=local_site))
users = group.users.filter(users_q).select_related('profile')
if review_request_id:
users = users.extra(select={
'visibility': """
SELECT accounts_reviewrequestvisit.visibility
FROM accounts_reviewrequestvisit
WHERE accounts_reviewrequestvisit.review_request_id =
%s
AND accounts_reviewrequestvisit.user_id =
reviews_group_users.user_id
""" % review_request_id
})
addresses.extend([
get_email_address_for_user(u)
for u in users
if (u.should_send_email() and
(not review_request_id or
u.visibility != ReviewRequestVisit.MUTED))
])
return addresses
class EmailMessage(DjbletsEmailMessage):
"""The Review Board EmailMessage subclass.
This class only differs from Djblets'
:py:class:`EmailMessage <djblets.email.message.EmailMessage>`
by using the site configuration to generate some e-mail settings.
"""
def __init__(self, subject, text_body, html_body, from_email, sender,
to, cc=None, in_reply_to=None, headers=None):
siteconfig = SiteConfiguration.objects.get_current()
auto_generated = siteconfig.get('mail_enable_autogenerated_header')
super(EmailMessage, self).__init__(
subject=subject,
text_body=text_body,
html_body=html_body,
from_email=from_email,
to=to,
cc=cc,
sender=sender,
in_reply_to=in_reply_to,
headers=headers,
auto_generated=auto_generated,
prevent_auto_responses=True)
def build_recipients(user, review_request, extra_recipients=None,
limit_recipients_to=None):
"""Build the recipient sets for an e-mail.
By default, the user sending the e-mail, the review request submitter (if
they are active), all active reviewers, and all active members of review
groups will be recipients of the e-mail.
If the ``limit_recipients_to`` parameter is provided, the given ``user``
and the review request submitter (if active) will still be recipients of
the e-mail, but all reviewers and members of review groups will not.
Instead, the recipients given in ``limit_recipients_to`` will be used.
Args:
user (django.contrib.auth.models.User):
The user sending the e-mail.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the e-mail corresponds to.
extra_recipients (list):
An optional list of extra recipients as
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>` that will
receive the e-mail.
limit_recipients_to (list):
An optional list of recipients as
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>` who will
receive the e-mail in place of the normal recipients.
Returns:
tuple: A 2-tuple of the To field and the CC field, as sets of
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
"""
recipients = set()
to_field = set()
local_site = review_request.local_site_id
submitter = review_request.submitter
target_people = review_request.target_people.filter(is_active=True).extra(
select={
'visibility': """
SELECT accounts_reviewrequestvisit.visibility
FROM accounts_reviewrequestvisit
WHERE accounts_reviewrequestvisit.review_request_id =
reviews_reviewrequest_target_people.reviewrequest_id
AND accounts_reviewrequestvisit.user_id =
reviews_reviewrequest_target_people.user_id
"""
})
starred_users = User.objects.filter(
is_active=True,
profile__starred_review_requests=review_request,
profile__should_send_email=True)
local_site_q = Q()
if local_site:
# Filter out users who are on the reviewer list in some form or have
# starred the review request but are no longer part of the LocalSite.
local_site_q = (Q(local_site=local_site) |
Q(local_site_admins=local_site))
target_people = target_people.filter(local_site_q)
starred_users = starred_users.filter(local_site_q)
if not extra_recipients:
extra_recipients = User.objects.none()
if user.should_send_email():
recipients.add(user)
if submitter.is_active and submitter.should_send_email():
recipients.add(submitter)
recipients.update(starred_users)
def _filter_recipients(to_filter):
"""Filter the given recipients.
All groups will be added to the resulting recipients. Only users with a
matching local site will be added to the resulting recipients.
Args:
to_filter (list):
A list of recipients as
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
"""
pks = set()
for recipient in to_filter:
if isinstance(recipient, User):
pks.add(recipient.pk)
elif isinstance(recipient, Group):
recipients.add(recipient)
else:
logging.error(
'Unexpected e-mail recipient %r; expected '
'django.contrib.auth.models.User or '
'reviewboard.reviews.models.Group.',
recipient)
if pks:
filtered_users = User.objects.filter(
Q(is_active=True, pk__in=pks),
local_site_q)
recipients.update(
recipient
for recipient in filtered_users.select_related('Profile')
if recipient.should_send_email()
)
if limit_recipients_to is not None:
_filter_recipients(limit_recipients_to)
else:
_filter_recipients(extra_recipients)
target_people = target_people.filter(is_active=True)
to_field.update(
recipient
for recipient in target_people.select_related('Profile')
if (recipient.should_send_email() and
recipient.visibility != ReviewRequestVisit.MUTED)
)
recipients.update(to_field)
recipients.update(review_request.target_groups.all())
if not user.should_send_own_updates():
recipients.discard(user)
to_field.discard(user)
if to_field:
cc_field = recipients.symmetric_difference(to_field)
else:
to_field = recipients
cc_field = set()
return to_field, cc_field
def recipients_to_addresses(recipients, review_request_id=None):
"""Return the set of e-mail addresses for the recipients.
Args:
recipients (list):
A list of :py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
Returns:
set: The e-mail addresses for all recipients.
"""
addresses = set()
for recipient in recipients:
assert isinstance(recipient, User) or isinstance(recipient, Group)
if isinstance(recipient, User):
addresses.add(get_email_address_for_user(recipient))
else:
addresses.update(get_email_addresses_for_group(recipient,
review_request_id))
return addresses
def send_review_mail(user, review_request, subject, in_reply_to,
to_field, cc_field, text_template_name,
html_template_name, context=None, extra_headers=None):
"""Format and send an e-mail out.
Args:
user (django.contrib.auth.models.User):
The user who is sending the e-mail.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that the e-mail is about.
subject (unicode):
The subject of the e-mail address.
in_reply_to (unicode):
The e-mail message ID for threading.
to_field (list):
The recipients to send the e-mail to. This should be a list of
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
cc_field (list):
The addresses to be CC'ed on the e-mail. This should be a list of
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
text_template_name (unicode):
The name for the text e-mail template.
html_template_name (unicode):
The name for the HTML e-mail template.
context (dict):
Optional extra context to provide to the template.
extra_headers (dict):
Either a dict or
:py:class:`~django.utils.datastructures.MultiValueDict` providing
additional headers to send with the e-mail.
Returns:
unicode: The resulting e-mail message ID.
"""
current_site = Site.objects.get_current()
local_site = review_request.local_site
from_email = get_email_address_for_user(user)
to_field = recipients_to_addresses(to_field, review_request.id)
cc_field = recipients_to_addresses(cc_field, review_request.id) - to_field
if not user.should_send_own_updates():
to_field.discard(get_email_address_for_user(user))
if not to_field and not cc_field:
# Nothing to send.
return
siteconfig = current_site.config.get()
domain_method = siteconfig.get("site_domain_method")
if not context:
context = {}
context['user'] = user
context['domain'] = current_site.domain
context['domain_method'] = domain_method
context['review_request'] = review_request
if review_request.local_site:
context['local_site_name'] = review_request.local_site.name
text_body = render_to_string(text_template_name, context)
html_body = render_to_string(html_template_name, context)
base_url = get_server_url(local_site=local_site)
headers = MultiValueDict({
'X-ReviewBoard-URL': [base_url],
'X-ReviewRequest-URL': [urljoin(base_url,
review_request.get_absolute_url())],
'X-ReviewGroup': [', '.join(group.name for group in
review_request.target_groups.all())],
})
if extra_headers:
if not isinstance(extra_headers, MultiValueDict):
extra_headers = MultiValueDict(
(key, [value])
for (key, value) in six.iteritems(extra_headers)
)
headers.update(extra_headers)
if review_request.repository:
headers['X-ReviewRequest-Repository'] = review_request.repository.name
latest_diffset = review_request.get_latest_diffset()
if latest_diffset:
modified_files = set()
for filediff in latest_diffset.files.all():
if filediff.deleted or filediff.copied or filediff.moved:
modified_files.add(filediff.source_file)
if filediff.is_new or filediff.copied or filediff.moved:
modified_files.add(filediff.dest_file)
for filename in modified_files:
headers.appendlist('X-ReviewBoard-Diff-For', filename)
sender = None
if settings.DEFAULT_FROM_EMAIL:
sender = build_email_address(user.get_full_name(),
settings.DEFAULT_FROM_EMAIL)
if sender == from_email:
# RFC 2822 states that we should only include Sender if the
# two are not equal.
sender = None
message = EmailMessage(subject.strip(),
text_body.encode('utf-8'),
html_body.encode('utf-8'),
from_email, sender,
list(to_field), list(cc_field),
in_reply_to, headers)
try:
message.send()
except Exception:
logging.exception("Error sending e-mail notification with subject "
"'%s' on behalf of '%s' to '%s'",
subject.strip(),
from_email,
','.join(list(to_field) + list(cc_field)))
return message.message_id
def mail_review_request(review_request, user, changedesc=None,
close_type=None):
"""Send an e-mail representing the supplied review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to send an e-mail about.
user (django.contrib.auth.models.User):
The user who triggered the e-mail (i.e., they published or closed
the review request).
changedesc (reviewboard.changedescs.models.ChangeDescription):
An optional change description showing what has changed in the
review request, possibly with explanatory text from the submitter.
This is created when saving a draft on a public review request and
will be ``None`` when publishing initially. This is used by the
template to add contextual (updated) flags to inform people what
has changed.
close_type (unicode):
How the review request was closed or ``None`` if it was published.
If this is not ``None`` it must be one of
:py:attr:`~reviewboard.reviews.models.ReviewRequest.SUBMITTED` or
:py:attr:`~reviewboard.reviews.models.ReviewRequest.DISCARDED`.
"""
# If the review request is not yet public or has been discarded, don't send
# any mail. Relax the "discarded" rule when e-mails are sent on closing
# review requests
if (not review_request.public or
(not close_type and review_request.status == 'D')):
return
summary = _ensure_unicode(review_request.summary)
subject = "Review Request %d: %s" % (review_request.display_id,
summary)
reply_message_id = None
if review_request.email_message_id:
# Fancy quoted "replies"
subject = "Re: " + subject
reply_message_id = review_request.email_message_id
extra_recipients = review_request.participants
else:
extra_recipients = None
extra_context = {}
if close_type:
changedesc = review_request.changedescs.filter(public=True).latest()
limit_recipients_to = None
if changedesc:
extra_context['change_text'] = changedesc.text
extra_context['changes'] = changedesc.fields_changed
fields_changed = changedesc.fields_changed
changed_field_names = set(fields_changed.keys())
if (changed_field_names and
changed_field_names.issubset(['target_people', 'target_groups'])):
# If the only changes are to the target reviewers, try to send a
# much more targeted e-mail (rather than sending it out to
# everyone, only send it to new people).
limit_recipients_to = set()
if 'target_people' in changed_field_names:
user_pks = [
item[2]
for item in fields_changed['target_people']['added']
]
limit_recipients_to.update(User.objects.filter(
pk__in=user_pks))
if 'target_groups' in changed_field_names:
group_pks = [
item[2]
for item in fields_changed['target_groups']['added']
]
limit_recipients_to.update(Group.objects.filter(
pk__in=group_pks))
submitter = review_request.submitter
to_field, cc_field = build_recipients(submitter, review_request,
extra_recipients,
limit_recipients_to)
extra_filter_kwargs = {}
if close_type:
signal = review_request_closed
extra_filter_kwargs['close_type'] = close_type
else:
signal = review_request_published
to_field, cc_field = filter_email_recipients_from_hooks(
to_field, cc_field, signal, review_request=review_request, user=user,
**extra_filter_kwargs)
review_request.time_emailed = timezone.now()
review_request.email_message_id = \
send_review_mail(review_request.submitter, review_request, subject,
reply_message_id, to_field, cc_field,
'notifications/review_request_email.txt',
'notifications/review_request_email.html',
extra_context)
review_request.save()
def mail_review(review, user, to_submitter_only):
"""Send an e-mail representing the supplied review.
Args:
review (reviewboard.reviews.models.Review):
The review to send an e-mail about.
to_submitter_only (bool):
Determines if the review is to the submitter only or not.
"""
review_request = review.review_request
if not review_request.public:
return
review.ordered_comments = \
review.comments.order_by('filediff', 'first_line')
extra_context = {
'user': review.user,
'review': review,
}
extra_headers = {}
if review.ship_it:
extra_headers['X-ReviewBoard-ShipIt'] = '1'
if review.ship_it_only:
extra_headers['X-ReviewBoard-ShipIt-Only'] = '1'
has_error, extra_context['comment_entries'] = \
build_diff_comment_fragments(
review.ordered_comments, extra_context,
"notifications/email_diff_comment_fragment.html")
reviewer = review.user
limit_to=None
if to_submitter_only:
limit_to = set([review_request.submitter, review.user])
to_field, cc_field = build_recipients(reviewer, review_request,
limit_recipients_to=limit_to)
to_field, cc_field = filter_email_recipients_from_hooks(
to_field, cc_field, review_published, review=review, user=user,
review_request=review_request)
summary = _ensure_unicode(review_request.summary)
review.email_message_id = send_review_mail(
reviewer,
review_request,
('Re: Review Request %d: %s'
% (review_request.display_id, summary)),
review_request.email_message_id,
to_field,
cc_field,
'notifications/review_email.txt',
'notifications/review_email.html',
extra_context,
extra_headers=extra_headers)
review.time_emailed = timezone.now()
review.save()
def mail_reply(reply, user):
"""Send an e-mail representing the supplied reply to a review.
Args:
reply (reviewboard.reviews.models.Review):
The review reply to send an e-mail about.
"""
review = reply.base_reply_to
review_request = review.review_request
if not review_request.public:
return
extra_context = {
'user': reply.user,
'review': review,
'reply': reply,
}
has_error, extra_context['comment_entries'] = \
build_diff_comment_fragments(
reply.comments.order_by('filediff', 'first_line'),
extra_context,
"notifications/email_diff_comment_fragment.html")
reviewer = reply.user
to_field, cc_field = build_recipients(reviewer, review_request,
review_request.participants)
to_field, cc_field = filter_email_recipients_from_hooks(
to_field, cc_field, reply_published, reply=reply, user=user,
review=review, review_request=review_request)
summary = _ensure_unicode(review_request.summary)
reply.email_message_id = send_review_mail(
user,
review_request,
('Re: Review Request %d: %s'
% (review_request.display_id, summary)),
review.email_message_id,
to_field,
cc_field,
'notifications/reply_email.txt',
'notifications/reply_email.html',
extra_context)
reply.time_emailed = timezone.now()
reply.save()
def mail_new_user(user):
"""Send an e-mail to administrators for newly registered users.
Args:
user (django.contrib.auth.models.User):
The user to send an e-mail about.
"""
current_site = Site.objects.get_current()
siteconfig = current_site.config.get_current()
domain_method = siteconfig.get("site_domain_method")
subject = "New Review Board user registration for %s" % user.username
from_email = get_email_address_for_user(user)
context = {
'domain': current_site.domain,
'domain_method': domain_method,
'user': user,
'user_url': reverse('admin:auth_user_change', args=(user.id,))
}
text_message = render_to_string('notifications/new_user_email.txt',
context)
html_message = render_to_string('notifications/new_user_email.html',
context)
message = EmailMessage(subject.strip(), text_message, html_message,
settings.SERVER_EMAIL, settings.SERVER_EMAIL,
[build_email_address(*a)
for a in settings.ADMINS], None, None)
try:
message.send()
except Exception as e:
logging.error("Error sending e-mail notification with subject '%s' on "
"behalf of '%s' to admin: %s",
subject.strip(), from_email, e, exc_info=1)
def filter_email_recipients_from_hooks(to_field, cc_field, signal, **kwargs):
"""Filter the e-mail recipients through configured e-mail hooks.
Args:
to_field (set):
The original To field of the e-mail, as a set of
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
cc_field (set):
The original CC field of the e-mail, as a set of
:py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
signal (django.dispatch.Signal):
The signal that triggered the e-mail.
**kwargs (dict):
Extra keyword arguments to pass to the e-mail hook.
Returns:
tuple: A 2-tuple of the To field and the CC field, as sets
of :py:class:`Users <django.contrib.auth.models.User>` and
:py:class:`Groups <reviewboard.reviews.models.Group>`.
"""
if signal in _hooks:
for hook in _hooks[signal]:
to_field = hook.get_to_field(to_field, **kwargs)
cc_field = hook.get_cc_field(cc_field, **kwargs)
return to_field, cc_field
# Fixes bug #3613
_old_header_init = email.header.Header.__init__
def _unified_header_init(self, *args, **kwargs):
kwargs['continuation_ws'] = b' '
_old_header_init(self, *args, **kwargs)
email.header.Header.__init__ = _unified_header_init
| beol/reviewboard | reviewboard/notifications/email.py | Python | mit | 32,206 |
"""
Copyright 2011 Jeff Garzik
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
try:
import http.client as httplib
except ImportError:
import httplib
import base64
import decimal
import json
import logging
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
USER_AGENT = "AuthServiceProxy/0.1"
HTTP_TIMEOUT = 30
log = logging.getLogger("HivemindRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
Exception.__init__(self)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return round(o, 8)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy(object):
__id_count = 0
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None):
self.__service_url = service_url
self.__service_name = service_name
self.__url = urlparse.urlparse(service_url)
if self.__url.port is None:
port = 80
else:
port = self.__url.port
(user, passwd) = (self.__url.username, self.__url.password)
try:
user = user.encode('utf8')
except AttributeError:
pass
try:
passwd = passwd.encode('utf8')
except AttributeError:
pass
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = httplib.HTTPSConnection(self.__url.hostname, port,
None, None, False,
timeout)
else:
self.__conn = httplib.HTTPConnection(self.__url.hostname, port,
False, timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self.__service_name is not None:
name = "%s.%s" % (self.__service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def __call__(self, *args):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self.__service_name,
json.dumps(args, default=EncodeDecimal)))
postdata = json.dumps({'version': '1.1',
'method': self.__service_name,
'params': args,
'id': AuthServiceProxy.__id_count}, default=EncodeDecimal)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
response = self._get_response()
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def _batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal)
log.debug("--> "+postdata)
self.__conn.request('POST', self.__url.path, postdata,
{'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'})
return self._get_response()
def _get_response(self):
http_response = self.__conn.getresponse()
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
if "error" in response and response["error"] is None:
log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal)))
else:
log.debug("<-- "+responsedata)
return response
| bitcoin-hivemind/hivemind | qa/rpc-tests/python-hivemindrpc/hivemindrpc/authproxy.py | Python | mit | 5,785 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'acq4/analysis/old/UncagingControlTemplate.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_UncagingControlWidget(object):
def setupUi(self, UncagingControlWidget):
UncagingControlWidget.setObjectName("UncagingControlWidget")
UncagingControlWidget.resize(442, 354)
self.gridLayout_4 = QtWidgets.QGridLayout(UncagingControlWidget)
self.gridLayout_4.setObjectName("gridLayout_4")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(UncagingControlWidget)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.thresholdSpin = QtWidgets.QDoubleSpinBox(UncagingControlWidget)
self.thresholdSpin.setObjectName("thresholdSpin")
self.gridLayout.addWidget(self.thresholdSpin, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(UncagingControlWidget)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.directTimeSpin = QtWidgets.QDoubleSpinBox(UncagingControlWidget)
self.directTimeSpin.setObjectName("directTimeSpin")
self.gridLayout.addWidget(self.directTimeSpin, 1, 1, 1, 1)
self.label_2 = QtWidgets.QLabel(UncagingControlWidget)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 2, 0, 1, 1)
self.poststimTimeSpin = QtWidgets.QDoubleSpinBox(UncagingControlWidget)
self.poststimTimeSpin.setObjectName("poststimTimeSpin")
self.gridLayout.addWidget(self.poststimTimeSpin, 2, 1, 1, 1)
self.gridLayout_4.addLayout(self.gridLayout, 0, 0, 1, 1)
self.groupBox_4 = QtWidgets.QGroupBox(UncagingControlWidget)
self.groupBox_4.setObjectName("groupBox_4")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox_4)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.groupBox_2 = QtWidgets.QGroupBox(self.groupBox_4)
self.groupBox_2.setTitle("")
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gradientRadio = QtWidgets.QRadioButton(self.groupBox_2)
self.gradientRadio.setObjectName("gradientRadio")
self.gridLayout_3.addWidget(self.gradientRadio, 0, 0, 1, 2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setObjectName("label_4")
self.horizontalLayout_2.addWidget(self.label_4)
self.colorSpin1 = QtWidgets.QDoubleSpinBox(self.groupBox_2)
self.colorSpin1.setObjectName("colorSpin1")
self.horizontalLayout_2.addWidget(self.colorSpin1)
self.gridLayout_3.addLayout(self.horizontalLayout_2, 1, 0, 1, 2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_5 = QtWidgets.QLabel(self.groupBox_2)
self.label_5.setObjectName("label_5")
self.horizontalLayout_3.addWidget(self.label_5)
self.colorSpin3 = QtWidgets.QDoubleSpinBox(self.groupBox_2)
self.colorSpin3.setMaximum(100.0)
self.colorSpin3.setObjectName("colorSpin3")
self.horizontalLayout_3.addWidget(self.colorSpin3)
self.gridLayout_3.addLayout(self.horizontalLayout_3, 2, 0, 1, 2)
self.gradientWidget = GradientWidget(self.groupBox_2)
self.gradientWidget.setObjectName("gradientWidget")
self.gridLayout_3.addWidget(self.gradientWidget, 3, 0, 2, 2)
self.rgbRadio = QtWidgets.QRadioButton(self.groupBox_2)
self.rgbRadio.setObjectName("rgbRadio")
self.gridLayout_3.addWidget(self.rgbRadio, 5, 0, 1, 2)
self.colorTracesCheck = QtWidgets.QCheckBox(self.groupBox_2)
self.colorTracesCheck.setObjectName("colorTracesCheck")
self.gridLayout_3.addWidget(self.colorTracesCheck, 7, 0, 1, 2)
self.svgCheck = QtWidgets.QCheckBox(self.groupBox_2)
self.svgCheck.setObjectName("svgCheck")
self.gridLayout_3.addWidget(self.svgCheck, 8, 0, 1, 2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setObjectName("label_6")
self.horizontalLayout_4.addWidget(self.label_6)
self.lowClipSpin = QtWidgets.QSpinBox(self.groupBox_2)
self.lowClipSpin.setObjectName("lowClipSpin")
self.horizontalLayout_4.addWidget(self.lowClipSpin)
self.highClipSpin = QtWidgets.QSpinBox(self.groupBox_2)
self.highClipSpin.setObjectName("highClipSpin")
self.horizontalLayout_4.addWidget(self.highClipSpin)
self.gridLayout_3.addLayout(self.horizontalLayout_4, 9, 0, 1, 2)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_7 = QtWidgets.QLabel(self.groupBox_2)
self.label_7.setObjectName("label_7")
self.horizontalLayout_5.addWidget(self.label_7)
self.downsampleSpin = QtWidgets.QSpinBox(self.groupBox_2)
self.downsampleSpin.setObjectName("downsampleSpin")
self.horizontalLayout_5.addWidget(self.downsampleSpin)
self.gridLayout_3.addLayout(self.horizontalLayout_5, 10, 0, 1, 2)
self.horizontalLayout.addWidget(self.groupBox_2)
self.gridLayout_4.addWidget(self.groupBox_4, 0, 1, 2, 1)
self.groupBox = QtWidgets.QGroupBox(UncagingControlWidget)
self.groupBox.setObjectName("groupBox")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.eventFindRadio = QtWidgets.QRadioButton(self.groupBox)
self.eventFindRadio.setObjectName("eventFindRadio")
self.gridLayout_2.addWidget(self.eventFindRadio, 0, 0, 1, 1)
self.chargeTransferRadio = QtWidgets.QRadioButton(self.groupBox)
self.chargeTransferRadio.setObjectName("chargeTransferRadio")
self.gridLayout_2.addWidget(self.chargeTransferRadio, 1, 0, 1, 1)
self.useSpontActCheck = QtWidgets.QCheckBox(self.groupBox)
self.useSpontActCheck.setObjectName("useSpontActCheck")
self.gridLayout_2.addWidget(self.useSpontActCheck, 2, 0, 1, 1)
self.medianCheck = QtWidgets.QCheckBox(self.groupBox)
self.medianCheck.setObjectName("medianCheck")
self.gridLayout_2.addWidget(self.medianCheck, 3, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox, 1, 0, 1, 1)
self.recolorBtn = QtWidgets.QPushButton(UncagingControlWidget)
self.recolorBtn.setObjectName("recolorBtn")
self.gridLayout_4.addWidget(self.recolorBtn, 2, 0, 1, 2)
self.retranslateUi(UncagingControlWidget)
Qt.QMetaObject.connectSlotsByName(UncagingControlWidget)
def retranslateUi(self, UncagingControlWidget):
_translate = Qt.QCoreApplication.translate
UncagingControlWidget.setWindowTitle(_translate("UncagingControlWidget", "Form"))
self.label.setText(_translate("UncagingControlWidget", "Noise Threshold"))
self.label_3.setText(_translate("UncagingControlWidget", "Direct Time"))
self.label_2.setText(_translate("UncagingControlWidget", "Post-Stim. Time"))
self.groupBox_4.setTitle(_translate("UncagingControlWidget", "Coloring Scheme:"))
self.gradientRadio.setText(_translate("UncagingControlWidget", "Gradient"))
self.label_4.setText(_translate("UncagingControlWidget", "Low % Cutoff"))
self.label_5.setText(_translate("UncagingControlWidget", "High % Cutoff"))
self.rgbRadio.setText(_translate("UncagingControlWidget", "RGB"))
self.colorTracesCheck.setText(_translate("UncagingControlWidget", "Color Traces by Laser Power"))
self.svgCheck.setText(_translate("UncagingControlWidget", "Prepare for SVG"))
self.label_6.setText(_translate("UncagingControlWidget", "Clip:"))
self.label_7.setText(_translate("UncagingControlWidget", "Downsample:"))
self.groupBox.setTitle(_translate("UncagingControlWidget", "Analysis Method:"))
self.eventFindRadio.setText(_translate("UncagingControlWidget", "Event Finding"))
self.chargeTransferRadio.setText(_translate("UncagingControlWidget", "Total Charge Transfer"))
self.useSpontActCheck.setText(_translate("UncagingControlWidget", "Use Spont. Activity"))
self.medianCheck.setText(_translate("UncagingControlWidget", "Use Median"))
self.recolorBtn.setText(_translate("UncagingControlWidget", "Re-Color"))
from acq4.pyqtgraph.GradientWidget import GradientWidget
| campagnola/acq4 | acq4/analysis/old/UncagingControlTemplate_pyqt5.py | Python | mit | 9,306 |
import numpy as np
from keras.datasets import mnist
from keras.layers import Activation
from keras.layers import Dense
from keras.models import Sequential
from keras.utils import np_utils
np.random.seed(1337)
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
def get_data():
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
return (X_train, Y_train), (X_test, Y_test), test_ids
def validate_regularizer(weight_reg=None, activity_reg=None):
model = Sequential()
model.add(Dense(50, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dense(10, W_regularizer=weight_reg,
activity_regularizer=activity_reg))
model.add(Activation('softmax'))
return model
| farizrahman4u/keras-contrib | keras_contrib/tests/regularizers.py | Python | mit | 1,414 |
import zlib
from test_support import TestFailed
import sys
import imp
try:
t = imp.find_module('test_zlib')
file = t[0]
except ImportError:
file = open(__file__)
buf = file.read() * 8
file.close()
# test the checksums (hex so the test doesn't break on 64-bit machines)
print hex(zlib.crc32('penguin')), hex(zlib.crc32('penguin', 1))
print hex(zlib.adler32('penguin')), hex(zlib.adler32('penguin', 1))
# make sure we generate some expected errors
try:
zlib.compress('ERROR', zlib.MAX_WBITS + 1)
except zlib.error, msg:
print "expecting", msg
try:
zlib.compressobj(1, 8, 0)
except ValueError, msg:
print "expecting", msg
try:
zlib.decompressobj(0)
except ValueError, msg:
print "expecting", msg
x = zlib.compress(buf)
y = zlib.decompress(x)
if buf != y:
print "normal compression/decompression failed"
else:
print "normal compression/decompression succeeded"
buf = buf * 16
co = zlib.compressobj(8, 8, -15)
x1 = co.compress(buf)
x2 = co.flush()
x = x1 + x2
dc = zlib.decompressobj(-15)
y1 = dc.decompress(x)
y2 = dc.flush()
y = y1 + y2
if buf != y:
print "compress/decompression obj failed"
else:
print "compress/decompression obj succeeded"
co = zlib.compressobj(2, 8, -12, 9, 1)
bufs = []
for i in range(0, len(buf), 256):
bufs.append(co.compress(buf[i:i+256]))
bufs.append(co.flush())
combuf = ''.join(bufs)
decomp1 = zlib.decompress(combuf, -12, -5)
if decomp1 != buf:
print "decompress with init options failed"
else:
print "decompress with init options succeeded"
deco = zlib.decompressobj(-12)
bufs = []
for i in range(0, len(combuf), 128):
bufs.append(deco.decompress(combuf[i:i+128]))
bufs.append(deco.flush())
decomp2 = ''.join(bufs)
if decomp2 != buf:
print "decompressobj with init options failed"
else:
print "decompressobj with init options succeeded"
print "should be '':", `deco.unconsumed_tail`
# Check a decompression object with max_length specified
deco = zlib.decompressobj(-12)
cb = combuf
bufs = []
while cb:
max_length = 1 + len(cb)/10
chunk = deco.decompress(cb, max_length)
if len(chunk) > max_length:
print 'chunk too big (%d>%d)' % (len(chunk),max_length)
bufs.append(chunk)
cb = deco.unconsumed_tail
bufs.append(deco.flush())
decomp2 = ''.join(buf)
if decomp2 != buf:
print "max_length decompressobj failed"
else:
print "max_length decompressobj succeeded"
# Misc tests of max_length
deco = zlib.decompressobj(-12)
try:
deco.decompress("", -1)
except ValueError:
pass
else:
print "failed to raise value error on bad max_length"
print "unconsumed_tail should be '':", `deco.unconsumed_tail`
# Test flush() with the various options, using all the different levels
# in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt if hasattr(zlib, opt)]
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
d = obj.compress( buf[:3000] )
d = d + obj.flush( sync )
d = d + obj.compress( buf[3000:] )
d = d + obj.flush()
if zlib.decompress(d) != buf:
print "Decompress failed: flush mode=%i, level=%i" % (sync,level)
del obj
# Test for the odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
random.seed(1)
print 'Testing on 17K of random data'
if hasattr(zlib, 'Z_SYNC_FLUSH'):
# Create compressor and decompressor objects
c=zlib.compressobj(9)
d=zlib.decompressobj()
# Try 17K of data
# generate random data stream
a=""
for i in range(17*1024):
a=a+chr(random.randint(0,255))
# compress, sync-flush, and decompress
t = d.decompress( c.compress(a)+c.flush(zlib.Z_SYNC_FLUSH) )
# if decompressed data is different from the input data, choke.
if len(t) != len(a):
print len(a),len(t),len(d.unused_data)
raise TestFailed, "output of 17K doesn't match"
def ignore():
"""An empty function with a big string.
Make the compression algorithm work a little harder.
"""
"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/test/test_zlib.py | Python | mit | 6,045 |
from django import template
from django.shortcuts import render_to_response, redirect, get_object_or_404
# from product.models import Slide
register = template.Library()
# @register.inclusion_tag('slides/slides.html')
# def get_main_slides():
# slides = Slide.objects.filter(published_main=1).order_by('ordering')
# return {'slides': slides}
# @register.inclusion_tag('comments/comments.html')
# def comments(paket, item_model, item_id):
# from comments.models import Comments
# nodes = Comments.objects.filter(paket=paket, item_model=item_model,item_id=item_id, published=1)
# return {'nodes':nodes, 'paket':paket, 'item_model':item_model, 'item_id':item_id}
# @register.filter(name='suit_conf')
# def suit_conf(name):
# value = get_config(name)
# return mark_safe(value) if isinstance(value, str) else value
# @register.tag
# def suit_date(parser, token):
# return NowNode(get_config('HEADER_DATE_FORMAT'))
# @register.tag
# def suit_time(parser, token):
# return NowNode(get_config('HEADER_TIME_FORMAT'))
# @register.filter
# def field_contents_foreign_linked(admin_field):
# """Return the .contents attribute of the admin_field, and if it
# is a foreign key, wrap it in a link to the admin page for that
# object.
# Use by replacing '{{ field.contents }}' in an admin template (e.g.
# fieldset.html) with '{{ field|field_contents_foreign_linked }}'.
# """
# fieldname = admin_field.field['field']
# displayed = admin_field.contents()
# obj = admin_field.form.instance
# if not hasattr(admin_field.model_admin,
# 'linked_readonly_fields') or fieldname not in admin_field \
# .model_admin \
# .linked_readonly_fields:
# return displayed
# try:
# fieldtype, attr, value = lookup_field(fieldname, obj,
# admin_field.model_admin)
# except ObjectDoesNotExist:
# fieldtype = None
# if isinstance(fieldtype, ForeignKey):
# try:
# url = admin_url(value)
# except NoReverseMatch:
# url = None
# if url:
# displayed = "<a href='%s'>%s</a>" % (url, displayed)
# return mark_safe(displayed)
# @register.filter
# def admin_url(obj):
# info = (obj._meta.app_label, obj._meta.module_name)
# return reverse("admin:%s_%s_change" % info, args=[obj.pk])
# @register.simple_tag
# def suit_bc(*args):
# return utils.value_by_version(args)
# @register.assignment_tag
# def suit_bc_value(*args):
# return utils.value_by_version(args)
| skylifewww/pangolin-fog | product/templatetags/category_tags.py | Python | mit | 2,627 |
#-*- encoding:utf-8 -*-
'''
Created on Nov 30, 2014
@author: letian
'''
import networkx as nx
from Segmentation import Segmentation
import numpy as np
class TextRank4Keyword(object):
def __init__(self, stop_words_file = None, delimiters = '?!;?!。;…\n'):
'''
`stop_words_file`:默认值为None,此时内部停止词表为空;可以设置为文件路径(字符串),将从停止词文件中提取停止词。
`delimiters`:默认值是`'?!;?!。;…\n'`,用来将文本拆分为句子。
self.words_no_filter:对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words:去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters:保留words_no_stop_words中指定词性的单词而得到的两级列表。
'''
self.text = ''
self.keywords = []
self.seg = Segmentation(stop_words_file=stop_words_file, delimiters=delimiters)
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.word_index = {}
self.index_word = {}
self.graph = None
def train(self, text, window = 2, lower = False, speech_tag_filter=True,
vertex_source = 'all_filters',
edge_source = 'no_stop_words'):
'''
`text`:文本内容,字符串。
`window`:窗口大小,int,用来构造单词之间的边。默认值为2。
`lower`:是否将文本转换为小写。默认为False。
`speech_tag_filter`:若值为True,将调用内部的词性列表来过滤生成words_all_filters。
若值为False,words_all_filters与words_no_stop_words相同。
`vertex_source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来构造pagerank对应的图中的节点。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。关键词也来自`vertex_source`。
`edge_source`:选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来构造pagerank对应的图中的节点之间的边。
默认值为`'no_stop_words'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。边的构造要结合`window`参数。
'''
self.text = text
self.word_index = {}
self.index_word = {}
self.keywords = []
self.graph = None
(_, self.words_no_filter, self.words_no_stop_words, self.words_all_filters) = self.seg.segment(text=text,
lower=lower,
speech_tag_filter=speech_tag_filter)
if vertex_source == 'no_filter':
vertex_source = self.words_no_filter
elif vertex_source == 'no_stop_words':
vertex_source = self.words_no_stop_words
else:
vertex_source = self.words_all_filters
if edge_source == 'no_filter':
edge_source = self.words_no_filter
elif vertex_source == 'all_filters':
edge_source = self.words_all_filters
else:
edge_source = self.words_no_stop_words
index = 0
for words in vertex_source:
for word in words:
if not self.word_index.has_key(word):
self.word_index[word] = index
self.index_word[index] = word
index += 1
words_number = index # 单词数量
self.graph = np.zeros((words_number, words_number))
for word_list in edge_source:
for w1, w2 in self.combine(word_list, window):
if not self.word_index.has_key(w1):
continue
if not self.word_index.has_key(w2):
continue
index1 = self.word_index[w1]
index2 = self.word_index[w2]
self.graph[index1][index2] = 1.0
self.graph[index2][index1] = 1.0
# for x in xrange(words_number):
# row_sum = np.sum(self.graph[x, :])
# if row_sum > 0:
# self.graph[x, :] = self.graph[x, :] / row_sum
nx_graph = nx.from_numpy_matrix(self.graph)
scores = nx.pagerank(nx_graph) # this is a dict
sorted_scores = sorted(scores.items(), key = lambda item: item[1], reverse=True)
for index, _ in sorted_scores:
self.keywords.append(self.index_word[index])
def combine(self, word_list, window = 2):
'''
构造在window下的单词组合,用来构造单词之间的边。使用了生成器。
word_list: 由单词组成的列表。
windows:窗口大小。
'''
window = int(window)
if window < 2: window = 2
for x in xrange(1, window):
if x >= len(word_list):
break
word_list2 = word_list[x:]
res = zip(word_list, word_list2)
for r in res:
yield r
def get_keywords(self, num = 6, word_min_len = 1):
'''
获取最重要的num个长度大于等于word_min_len的关键词。
返回关键词列表。
'''
result = []
count = 0
for word in self.keywords:
if count >= num:
break
if len(word) >= word_min_len:
result.append(word)
count += 1
return result
def get_keyphrases(self, keywords_num = 12, min_occur_num = 2):
'''
获取关键短语。
获取 keywords_num 个关键词构造在可能出现的短语,要求这个短语在原文本中至少出现的次数为min_occur_num。
返回关键短语的列表。
'''
keywords_set = set(self.get_keywords(num=keywords_num, word_min_len = 1))
keyphrases = set()
one = []
for sentence_list in self.words_no_filter:
for word in sentence_list:
# print '/'.join(one)
# print word
if word in keywords_set:
one.append(word)
else:
if len(one)>1:
keyphrases.add(''.join(one))
one = []
continue
one = []
return [phrase for phrase in keyphrases
if self.text.count(phrase) >= min_occur_num]
if __name__ == '__main__':
import codecs
text = codecs.open('../text/02.txt', 'r', 'utf-8').read()
# text = "坏人"
tr4w = TextRank4Keyword(stop_words_file='../stopword.data')
tr4w.train(text=text, speech_tag_filter=True, lower=True, window=2)
for word in tr4w.get_keywords(10, word_min_len=2):
print word
print '---'
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print phrase
| MSC19950601/TextRank4ZH | textrank4zh/TextRank4Keyword.py | Python | mit | 7,411 |
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueError('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
| anaruse/chainer | chainer/functions/connection/shift.py | Python | mit | 4,492 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import json
import os
import unittest
import numpy as np
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.structure_matcher import (
ElementComparator,
FrameworkComparator,
OccupancyComparator,
OrderDisorderElementComparator,
PointDefectComparator,
StructureMatcher,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
class StructureMatcherTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [
self.get_structure("Li2O"),
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.Li2O")),
]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False, attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual(
{k.symbol: v.symbol for k, v in m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"},
)
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu", "Ag"], [[0] * 3] * 5)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
sm = StructureMatcher(supercell_size="volume")
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="num_sites")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size="Ag")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size=["Ag", "Cu"])
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="wfieoh")
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1 / 3)
n2 = (len(s2) / l.volume) ** (1 / 3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
# check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
# check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341] * 2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
# check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
result = [
[True, False, True, False],
[True, False, True, False],
[True, True, False, True],
]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
# test supercell with match
result = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
# test supercell without match
result = [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test s2_supercell
result = [
[1, 1, 1],
[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test for multiple translation indices
s1 = Structure(l, ["Cu", "Ag", "Cu", "Ag", "Ag"], [[0] * 3] * 5)
s2 = Structure(l, ["Ag", "Cu", "Ag"], [[0] * 3] * 3)
result = [[1, 0, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [0.4, 0.4, 0.2], frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-0.4, -0.4, -0.2], frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
# Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0], 0.060895871160262717)
# Test partial occupancies.
s1 = Structure(
Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
s2 = Structure(
Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(
Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
# test symmetric
sm_coarse = sm = StructureMatcher(
comparator=ElementComparator(),
ltol=0.6,
stol=0.6,
angle_tol=6,
)
s1 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s1.vasp")
s2 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s2.vasp")
self.assertEqual(sm_coarse.fit(s1, s2), True)
self.assertEqual(sm_coarse.fit(s2, s1), False)
self.assertEqual(sm_coarse.fit(s1, s2, symmetric=True), False)
self.assertEqual(sm_coarse.fit(s2, s1, symmetric=True), False)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({"Ti": "Zr", "O": "Ti"})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [
self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4"),
]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.2,
angle_tol=2,
primitive_cell=False,
scale=False,
comparator=FrameworkComparator(),
)
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2, scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0], self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l1 = Lattice.from_parameters(1, 2.1, 1.9, 90, 89, 91)
l2 = Lattice.from_parameters(1.1, 2, 2, 89, 91, 90)
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_parameters(1.1, 2, 20, 89, 91, 90)
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [0.7, 0.5, 0.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si"], [[0, 0, 0.1], [0, 0, 0.2]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [0, 0.1, -0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=False, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)
def test_supercell_subsets(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="volume",
)
sm_no_s = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [0, 2, 1, 3, 4, 5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
# test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
# test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=False,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
result = sm.get_s2_like_s1(s1, s2)
for x, y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [2, 0, 1, 3, 5, 4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test the mapping
s2.make_supercell([2, 1, 1])
# equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species, s2[i].species)
del s1[0]
# s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species, s2[x].species)
# s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
# test when the supercell is a subset
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
def test_subset(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with not enough sites in s1
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Cl"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, -0.02], [0, 0, 0.001], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.98], [0, 0, 0.99], [0.7, 0.4, 0.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.75, 0.5, 0.5]]
prim = Structure(lp, [{"Na": 0.5}, {"Cl": 0.5}], pcoords)
supercell = Structure(ls, ["Na", "Cl"], scoords)
supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.5, 0, 0], [0.25, 0.5, 0.5], [0.75, 0.5, 0.5]]
s1 = Structure(lp, ["Na", "Cl"], pcoords)
s2 = Structure(ls, [{"Na": 0.5}, {"Na": 0.5}, {"Cl": 0.5}, {"Cl": 0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_disordered_to_disordered(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
coords = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Cl": 0.5}], coords)
s2 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Br": 0.5}], coords)
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.6, "K": 0.4}, "Cl"], pcoords)
s2 = Structure(lp, [{"Xa": 0.4, "Xb": 0.6}, "Cl"], pcoords)
s3 = Structure(lp, [{"Xa": 0.5, "Xb": 0.5}, "Cl"], pcoords)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OccupancyComparator(),
)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PNO4Se4.json"))
self.assertEqual(
sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{
Element("S"): Element("Se"),
Element("As"): Element("N"),
Element("Fe"): Element("Fe"),
Element("Na"): Element("Na"),
Element("P"): Element("P"),
Element("O"): Element("O"),
},
)
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element("N"): 0, Element("P"): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element("As")]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2), (0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
class PointDefectComparatorTest(PymatgenTest):
def test_defect_matching(self):
# SETUP DEFECTS FOR TESTING
# symmorphic defect test set
s_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CsSnI3.cif")) # tetragonal CsSnI3
identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]), Vacancy(s_struc, s_struc[1])]
identical_I_vacs_sublattice1 = [
Vacancy(s_struc, s_struc[4]),
Vacancy(s_struc, s_struc[5]),
Vacancy(s_struc, s_struc[8]),
Vacancy(s_struc, s_struc[9]),
] # in plane halides
identical_I_vacs_sublattice2 = [
Vacancy(s_struc, s_struc[6]),
Vacancy(s_struc, s_struc[7]),
] # out of plane halides
pdc = PointDefectComparator()
# NOW TEST DEFECTS
# test vacancy matching
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[0])) # trivial vacancy test
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[1])) # vacancies on same sublattice
for i, j in itertools.combinations(range(4), 2):
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i], identical_I_vacs_sublattice1[j]))
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0], identical_I_vacs_sublattice2[1]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# both vacancies, but different specie types
identical_I_vacs_sublattice1[0],
)
)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same specie type, different sublattice
identical_I_vacs_sublattice2[0],
)
)
# test substitutional matching
sub_Cs_on_I_sublattice1_set1 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[0].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice1_set2 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[1].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice2 = PeriodicSite("Cs", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
sub_Rb_on_I_sublattice2 = PeriodicSite("Rb", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial substitution test
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong specie)
Substitution(s_struc, sub_Cs_on_I_sublattice2),
Substitution(s_struc, sub_Rb_on_I_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong sublattice)
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice2),
)
)
# test symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by saturatated_
# interstitial_structure function)
inter_H_sublattice1_set1 = PeriodicSite("H", [0.0, 0.75, 0.25], s_struc.lattice)
inter_H_sublattice1_set2 = PeriodicSite("H", [0.0, 0.75, 0.75], s_struc.lattice)
inter_H_sublattice2 = PeriodicSite("H", [0.57796112, 0.06923687, 0.56923687], s_struc.lattice)
inter_H_sublattice3 = PeriodicSite("H", [0.25, 0.25, 0.54018268], s_struc.lattice)
inter_He_sublattice3 = PeriodicSite("He", [0.25, 0.25, 0.54018268], s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice3),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(s_struc, inter_H_sublattice3),
Interstitial(s_struc, inter_He_sublattice3),
)
)
# test non-symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by
# saturatated_interstitial_structure function)
ns_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CuCl.cif"))
ns_inter_H_sublattice1_set1 = PeriodicSite("H", [0.06924513, 0.06308959, 0.86766528], ns_struc.lattice)
ns_inter_H_sublattice1_set2 = PeriodicSite("H", [0.43691041, 0.36766528, 0.06924513], ns_struc.lattice)
ns_inter_H_sublattice2 = PeriodicSite("H", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
ns_inter_He_sublattice2 = PeriodicSite("He", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# different interstitials (wrong sublattice)
Interstitial(ns_struc, ns_inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(ns_struc, ns_inter_H_sublattice2),
Interstitial(ns_struc, ns_inter_He_sublattice2),
)
)
# test influence of charge on defect matching (default is to be charge agnostic)
vac_diff_chg = identical_Cs_vacs[0].copy()
vac_diff_chg.set_charge(3.0)
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
chargecheck_pdc = PointDefectComparator(check_charge=True) # switch to PDC which cares about charge state
self.assertFalse(chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
# test different supercell size
# (comparing same defect but different supercells - default is to not check for this)
sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)
sc_scaled_s_struc = s_struc.copy()
sc_scaled_s_struc.make_supercell([2, 2, 3])
sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[0].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt2_ps = PeriodicSite(
"I",
identical_I_vacs_sublattice2[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps1)
sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps2)
sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt2_ps)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect site but between different supercells
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[1],
# same coords, different lattice structure
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same sublattice, different coords
sc_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
sc_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defects (wrong sublattice)
sc_scaled_I_vac_sublatt2_defect,
)
)
# test same structure size, but scaled lattice volume
# (default is to not allow these to be equal, but check_lattice_scale=True allows for this)
vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)
vol_scaled_s_struc = s_struc.copy()
vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)
vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[4])
vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[5])
vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[6])
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect (but vol change)
vol_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same defect, different sublattice point (and vol change)
vol_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
vol_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defect (wrong sublattice)
vol_scaled_I_vac_sublatt2_defect,
)
)
# test identical defect which has had entire lattice shifted
shift_s_struc = s_struc.copy()
shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4], frac_coords=True, to_unit_cell=True)
shifted_identical_Cs_vacs = [
Vacancy(shift_s_struc, shift_s_struc[0]),
Vacancy(shift_s_struc, shift_s_struc[1]),
]
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but shifted)
shifted_identical_Cs_vacs[0],
)
)
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and shifted)
shifted_identical_Cs_vacs[1],
)
)
# test uniform lattice shift within non-symmorphic structure
shift_ns_struc = ns_struc.copy()
shift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
shift_ns_inter_H_sublattice1_set1 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set1.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
shift_ns_inter_H_sublattice1_set2 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set2.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# trivially same defect (but shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# same defect on different sublattice point (and shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set2),
)
)
# test a rotational + supercell type structure transformation (requires check_primitive_cell=True)
rotated_s_struc = s_struc.copy()
rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
rotated_identical_Cs_vacs = [
Vacancy(rotated_s_struc, rotated_s_struc[0]),
Vacancy(rotated_s_struc, rotated_s_struc[1]),
]
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but rotated)
rotated_identical_Cs_vacs[0],
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0], rotated_identical_Cs_vacs[0]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice (and rotated)
rotated_identical_Cs_vacs[1],
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and rotated)
rotated_identical_Cs_vacs[1],
)
)
# test a rotational + supercell + shift type structure transformation for non-symmorphic structure
rotANDshift_ns_struc = ns_struc.copy()
rotANDshift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])
rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[0])
rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[1])
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# trivially same defect (but rotated and sublattice shifted)
rotANDshift_ns_vac_Cs_set1,
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# same defect on different sublattice point (shifted and rotated)
rotANDshift_ns_vac_Cs_set2,
)
)
if __name__ == "__main__":
unittest.main()
| richardtran415/pymatgen | pymatgen/analysis/tests/test_structure_matcher.py | Python | mit | 47,994 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-whitelist=127.0.0.1', '-walletprematurewitness']])
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_jsonrpc(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| isle2983/bitcoin | qa/rpc-tests/nulldummy.py | Python | mit | 6,648 |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE.TXT file)
import os
from django.core.exceptions import PermissionDenied
from apps.gallery.models import Gallery
from apps.gallery.models import Picture
from apps.team.utils import assert_member
from apps.team import control as team_control
def can_edit(account, gallery):
return not ((gallery.team and not team_control.is_member(account, gallery.team)) or
(not gallery.team and gallery.created_by != account))
def _assert_can_edit(account, gallery):
if not can_edit(account, gallery):
raise PermissionDenied
def delete(account, gallery):
""" Delete gallery and all pictures belonging to it. """
_assert_can_edit(account, gallery)
for picture in gallery.pictures.all():
remove(account, picture)
gallery.delete()
def remove(account, picture):
""" Remove picture from the gallery and delete the image file on server. """
gallery = picture.gallery
_assert_can_edit(account, gallery)
if gallery.primary == picture:
gallery.primary = None
gallery.updated_by = account
gallery.save()
os.remove(picture.image.path)
os.remove(picture.preview.path)
os.remove(picture.thumbnail.path)
picture.delete()
return gallery
def setprimary(account, picture):
""" Set picture as the galleries primary picture. """
gallery = picture.gallery
_assert_can_edit(account, gallery)
gallery.primary = picture
gallery.save()
def add(account, image, gallery):
""" Add a picture to the gallery. """
_assert_can_edit(account, gallery)
picture = Picture()
picture.image = image
picture.preview = image
picture.thumbnail = image
picture.gallery = gallery
picture.created_by = account
picture.updated_by = account
picture.save()
return picture
def create(account, image, team):
""" Create a new gallery. """
if team:
assert_member(account, team)
gallery = Gallery()
gallery.created_by = account
gallery.updated_by = account
gallery.team = team
gallery.save()
picture = add(account, image, gallery)
gallery.primary = picture
gallery.save()
return gallery
| F483/bikesurf.org | apps/gallery/control.py | Python | mit | 2,303 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_equal
class UacommentTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "exceeds maximum length (256). Reduce the number or size of uacomments."
self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters"
self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected)
if __name__ == '__main__':
UacommentTest().main()
| Mrs-X/PIVX | test/functional/feature_uacomment.py | Python | mit | 1,480 |
# -*- python -*-
# Copyright (C) 2009 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/mingw32tdm/share/gcc-4.5.2/python'
libdir = '/mingw32tdm/lib/gcc/mingw32/4.5.2'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir in sys.path:
sys.path.insert(0, dir)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| ljnissen/shop_engine | mingw/lib/gcc/mingw32/4.5.2/libstdc++.dll.a-gdb.py | Python | mit | 2,316 |
# $Id: 150_srtp_1_1.py 369517 2012-07-01 17:28:57Z file $
#
from inc_cfg import *
test_param = TestParam(
"Callee=optional SRTP, caller=optional SRTP",
[
InstanceParam("callee", "--null-audio --use-srtp=1 --srtp-secure=0 --max-calls=1"),
InstanceParam("caller", "--null-audio --use-srtp=1 --srtp-secure=0 --max-calls=1")
]
)
| fluentstream/asterisk-p2p | res/pjproject/tests/pjsua/scripts-call/150_srtp_1_1.py | Python | gpl-2.0 | 340 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from twisted.cred import credentials
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
from twisted.python import log
from twisted.python import util
from twisted.spread import pb
from twisted.trial import unittest
import buildbot
from buildbot import config
from buildbot import pbmanager
from buildbot import worker
from buildbot.process import botmaster
from buildbot.process import builder
from buildbot.process import factory
from buildbot.test.fake import fakemaster
from buildbot.test.util.misc import TestReactorMixin
from buildbot.util.eventual import eventually
from buildbot.worker import manager as workermanager
PKI_DIR = util.sibpath(__file__, 'pki')
class FakeWorkerForBuilder(pb.Referenceable):
"""
Fake worker-side WorkerForBuilder object
"""
class FakeWorkerWorker(pb.Referenceable):
"""
Fake worker-side Worker object
@ivar master_persp: remote perspective on the master
"""
def __init__(self, callWhenBuilderListSet):
self.callWhenBuilderListSet = callWhenBuilderListSet
self.master_persp = None
self._detach_deferreds = []
self._detached = False
def waitForDetach(self):
if self._detached:
return defer.succeed(None)
d = defer.Deferred()
self._detach_deferreds.append(d)
return d
def setMasterPerspective(self, persp):
self.master_persp = persp
# clear out master_persp on disconnect
def clear_persp():
self.master_persp = None
persp.broker.notifyOnDisconnect(clear_persp)
def fire_deferreds():
self._detached = True
self._detach_deferreds, deferreds = None, self._detach_deferreds
for d in deferreds:
d.callback(None)
persp.broker.notifyOnDisconnect(fire_deferreds)
def remote_print(self, message):
log.msg("WORKER-SIDE: remote_print(%r)" % (message,))
def remote_getWorkerInfo(self):
return {
'info': 'here',
'worker_commands': {
'x': 1,
},
'numcpus': 1,
'none': None,
'os_release': b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode(),
b'\xe3\x83\xaa\xe3\x83\xaa\xe3\x83\xbc\xe3\x82\xb9\xe3'
b'\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode():
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode(),
}
def remote_getVersion(self):
return buildbot.version
def remote_getCommands(self):
return {'x': 1}
def remote_setBuilderList(self, builder_info):
builder_names = [n for n, dir in builder_info]
slbuilders = [FakeWorkerForBuilder() for n in builder_names]
eventually(self.callWhenBuilderListSet)
return dict(zip(builder_names, slbuilders))
class FakeBuilder(builder.Builder):
def attached(self, worker, commands):
return defer.succeed(None)
def detached(self, worker):
pass
def getOldestRequestTime(self):
return 0
def maybeStartBuild(self):
return defer.succeed(None)
class MyWorker(worker.Worker):
def attached(self, conn):
self.detach_d = defer.Deferred()
return super().attached(conn)
def detached(self):
super().detached()
self.detach_d, d = None, self.detach_d
d.callback(None)
class TestWorkerComm(unittest.TestCase, TestReactorMixin):
"""
Test handling of connections from workers as integrated with
- Twisted Spread
- real TCP connections.
- PBManager
@ivar master: fake build master
@ivar pbamanger: L{PBManager} instance
@ivar botmaster: L{BotMaster} instance
@ivar worker: master-side L{Worker} instance
@ivar workerworker: worker-side L{FakeWorkerWorker} instance
@ivar port: TCP port to connect to
@ivar server_connection_string: description string for the server endpoint
@ivar client_connection_string_tpl: description string template for the client
endpoint (expects to passed 'port')
@ivar endpoint: endpoint controlling the outbound connection
from worker to master
"""
@defer.inlineCallbacks
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantMq=True, wantData=True,
wantDb=True)
# set the worker port to a loopback address with unspecified
# port
self.pbmanager = self.master.pbmanager = pbmanager.PBManager()
yield self.pbmanager.setServiceParent(self.master)
# remove the fakeServiceParent from fake service hierarchy, and replace
# by a real one
yield self.master.workers.disownServiceParent()
self.workers = self.master.workers = workermanager.WorkerManager(
self.master)
yield self.workers.setServiceParent(self.master)
self.botmaster = botmaster.BotMaster()
yield self.botmaster.setServiceParent(self.master)
self.master.botmaster = self.botmaster
self.master.data.updates.workerConfigured = lambda *a, **k: None
yield self.master.startService()
self.buildworker = None
self.port = None
self.workerworker = None
self.endpoint = None
self.broker = None
self._detach_deferreds = []
# patch in our FakeBuilder for the regular Builder class
self.patch(botmaster, 'Builder', FakeBuilder)
self.server_connection_string = "tcp:0:interface=127.0.0.1"
self.client_connection_string_tpl = "tcp:host=127.0.0.1:port={port}"
def tearDown(self):
if self.broker:
del self.broker
if self.endpoint:
del self.endpoint
deferreds = self._detach_deferreds + [
self.pbmanager.stopService(),
self.botmaster.stopService(),
self.workers.stopService(),
]
# if the worker is still attached, wait for it to detach, too
if self.buildworker and self.buildworker.detach_d:
deferreds.append(self.buildworker.detach_d)
return defer.gatherResults(deferreds)
@defer.inlineCallbacks
def addWorker(self, **kwargs):
"""
Create a master-side worker instance and add it to the BotMaster
@param **kwargs: arguments to pass to the L{Worker} constructor.
"""
self.buildworker = MyWorker("testworker", "pw", **kwargs)
# reconfig the master to get it set up
new_config = self.master.config
new_config.protocols = {"pb": {"port": self.server_connection_string}}
new_config.workers = [self.buildworker]
new_config.builders = [config.BuilderConfig(
name='bldr',
workername='testworker', factory=factory.BuildFactory())]
yield self.botmaster.reconfigServiceWithBuildbotConfig(new_config)
yield self.workers.reconfigServiceWithBuildbotConfig(new_config)
# as part of the reconfig, the worker registered with the pbmanager, so
# get the port it was assigned
self.port = self.buildworker.registration.getPBPort()
def connectWorker(self, waitForBuilderList=True):
"""
Connect a worker the master via PB
@param waitForBuilderList: don't return until the setBuilderList has
been called
@returns: L{FakeWorkerWorker} and a Deferred that will fire when it
is detached; via deferred
"""
factory = pb.PBClientFactory()
creds = credentials.UsernamePassword(b"testworker", b"pw")
setBuilderList_d = defer.Deferred()
workerworker = FakeWorkerWorker(
lambda: setBuilderList_d.callback(None))
login_d = factory.login(creds, workerworker)
@login_d.addCallback
def logged_in(persp):
workerworker.setMasterPerspective(persp)
# set up to hear when the worker side disconnects
workerworker.detach_d = defer.Deferred()
persp.broker.notifyOnDisconnect(
lambda: workerworker.detach_d.callback(None))
self._detach_deferreds.append(workerworker.detach_d)
return workerworker
self.endpoint = clientFromString(
reactor, self.client_connection_string_tpl.format(port=self.port))
connected_d = self.endpoint.connect(factory)
dlist = [connected_d, login_d]
if waitForBuilderList:
dlist.append(setBuilderList_d)
d = defer.DeferredList(dlist,
consumeErrors=True, fireOnOneErrback=True)
d.addCallback(lambda _: workerworker)
return d
def workerSideDisconnect(self, worker):
"""Disconnect from the worker side"""
worker.master_persp.broker.transport.loseConnection()
@defer.inlineCallbacks
def test_connect_disconnect(self):
"""Test a single worker connecting and disconnecting."""
yield self.addWorker()
# connect
worker = yield self.connectWorker()
# disconnect
self.workerSideDisconnect(worker)
# wait for the resulting detach
yield worker.waitForDetach()
@defer.inlineCallbacks
def test_tls_connect_disconnect(self):
"""Test with TLS or SSL endpoint.
According to the deprecation note for the SSL client endpoint,
the TLS endpoint is supported from Twistd 16.0.
TODO add certificate verification (also will require some conditionals
on various versions, including PyOpenSSL, service_identity. The CA used
to generate the testing cert is in ``PKI_DIR/ca``
"""
def escape_colon(path):
# on windows we can't have \ as it serves as the escape character for :
return path.replace('\\', '/').replace(':', '\\:')
self.server_connection_string = (
"ssl:port=0:certKey={pub}:privateKey={priv}:" +
"interface=127.0.0.1").format(
pub=escape_colon(os.path.join(PKI_DIR, '127.0.0.1.crt')),
priv=escape_colon(os.path.join(PKI_DIR, '127.0.0.1.key')))
self.client_connection_string_tpl = "ssl:host=127.0.0.1:port={port}"
yield self.addWorker()
# connect
worker = yield self.connectWorker()
# disconnect
self.workerSideDisconnect(worker)
# wait for the resulting detach
yield worker.waitForDetach()
@defer.inlineCallbacks
def test_worker_info(self):
yield self.addWorker()
worker = yield self.connectWorker()
props = self.buildworker.info
# check worker info passing
self.assertEqual(props.getProperty("info"),
"here")
# check worker info passing with UTF-8
self.assertEqual(props.getProperty("os_release"),
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode())
self.assertEqual(props.getProperty(b'\xe3\x83\xaa\xe3\x83\xaa\xe3\x83\xbc\xe3\x82'
b'\xb9\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode()),
b'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88'.decode())
self.assertEqual(props.getProperty("none"), None)
self.assertEqual(props.getProperty("numcpus"), 1)
self.workerSideDisconnect(worker)
yield worker.waitForDetach()
@defer.inlineCallbacks
def _test_duplicate_worker(self):
yield self.addWorker()
# connect first worker
worker1 = yield self.connectWorker()
# connect second worker; this should fail
try:
yield self.connectWorker(waitForBuilderList=False)
connect_failed = False
except Exception:
connect_failed = True
self.assertTrue(connect_failed)
# disconnect both and wait for that to percolate
self.workerSideDisconnect(worker1)
yield worker1.waitForDetach()
# flush the exception logged for this on the master
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
@defer.inlineCallbacks
def _test_duplicate_worker_old_dead(self):
yield self.addWorker()
# connect first worker
worker1 = yield self.connectWorker()
# monkeypatch that worker to fail with PBConnectionLost when its
# remote_print method is called
def remote_print(message):
worker1.master_persp.broker.transport.loseConnection()
raise pb.PBConnectionLost("fake!")
worker1.remote_print = remote_print
# connect second worker; this should succeed, and the old worker
# should be disconnected.
worker2 = yield self.connectWorker()
# disconnect both and wait for that to percolate
self.workerSideDisconnect(worker2)
yield worker1.waitForDetach()
# flush the exception logged for this on the worker
self.assertEqual(len(self.flushLoggedErrors(pb.PBConnectionLost)), 1)
| tardyp/buildbot | master/buildbot/test/integration/test_worker_comm.py | Python | gpl-2.0 | 13,868 |
"""Tests for the unparse.py script in the Tools/parser directory."""
import unittest
import test.support
import io
import os
import random
import tokenize
import ast
from test.test_tools import basepath, toolsdir, skip_if_missing
skip_if_missing()
parser_path = os.path.join(toolsdir, "parser")
with test.support.DirsOnSysPath(parser_path):
import unparse
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
for_else = """\
def f():
for x in range(10):
break
else:
y = 2
z = 3
"""
while_else = """\
def g():
while True:
break
else:
y = 2
z = 3
"""
relative_import = """\
from . import fred
from .. import barney
from .australia import shrimp as prawns
"""
nonlocal_ex = """\
def f():
x = 1
def g():
nonlocal x
x = 2
y = 7
def h():
nonlocal x, y
"""
# also acts as test for 'except ... as ...'
raise_from = """\
try:
1 / 0
except ZeroDivisionError as e:
raise ArithmeticError from e
"""
class_decorator = """\
@f1(arg)
@f2
class Foo: pass
"""
elif1 = """\
if cond1:
suite1
elif cond2:
suite2
else:
suite3
"""
elif2 = """\
if cond1:
suite1
elif cond2:
suite2
"""
try_except_finally = """\
try:
suite1
except ex1:
suite2
except ex2:
suite3
else:
suite4
finally:
suite5
"""
with_simple = """\
with f():
suite1
"""
with_as = """\
with f() as x:
suite1
"""
with_two_items = """\
with f() as x, g() as y:
suite1
"""
class ASTTestCase(unittest.TestCase):
def assertASTEqual(self, ast1, ast2):
self.assertEqual(ast.dump(ast1), ast.dump(ast2))
def check_roundtrip(self, code1, filename="internal"):
ast1 = compile(code1, filename, "exec", ast.PyCF_ONLY_AST)
unparse_buffer = io.StringIO()
unparse.Unparser(ast1, unparse_buffer)
code2 = unparse_buffer.getvalue()
ast2 = compile(code2, filename, "exec", ast.PyCF_ONLY_AST)
self.assertASTEqual(ast1, ast2)
class UnparseTestCase(ASTTestCase):
# Tests for specific bugs found in earlier versions of unparse
def test_fstrings(self):
# See issue 25180
self.check_roundtrip(r"""f'{f"{0}"*3}'""")
self.check_roundtrip(r"""f'{f"{y}"*3}'""")
def test_del_statement(self):
self.check_roundtrip("del x, y, z")
def test_shifts(self):
self.check_roundtrip("45 << 2")
self.check_roundtrip("13 >> 7")
def test_for_else(self):
self.check_roundtrip(for_else)
def test_while_else(self):
self.check_roundtrip(while_else)
def test_unary_parens(self):
self.check_roundtrip("(-1)**7")
self.check_roundtrip("(-1.)**8")
self.check_roundtrip("(-1j)**6")
self.check_roundtrip("not True or False")
self.check_roundtrip("True or not False")
def test_integer_parens(self):
self.check_roundtrip("3 .__abs__()")
def test_huge_float(self):
self.check_roundtrip("1e1000")
self.check_roundtrip("-1e1000")
self.check_roundtrip("1e1000j")
self.check_roundtrip("-1e1000j")
def test_min_int(self):
self.check_roundtrip(str(-2**31))
self.check_roundtrip(str(-2**63))
def test_imaginary_literals(self):
self.check_roundtrip("7j")
self.check_roundtrip("-7j")
self.check_roundtrip("0j")
self.check_roundtrip("-0j")
def test_lambda_parentheses(self):
self.check_roundtrip("(lambda: int)()")
def test_chained_comparisons(self):
self.check_roundtrip("1 < 4 <= 5")
self.check_roundtrip("a is b is c is not d")
def test_function_arguments(self):
self.check_roundtrip("def f(): pass")
self.check_roundtrip("def f(a): pass")
self.check_roundtrip("def f(b = 2): pass")
self.check_roundtrip("def f(a, b): pass")
self.check_roundtrip("def f(a, b = 2): pass")
self.check_roundtrip("def f(a = 5, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b = 2): pass")
self.check_roundtrip("def f(*, a = 1, b): pass")
self.check_roundtrip("def f(*, a, b = 2): pass")
self.check_roundtrip("def f(a, b = None, *, c, **kwds): pass")
self.check_roundtrip("def f(a=2, *args, c=5, d, **kwds): pass")
self.check_roundtrip("def f(*args, **kwargs): pass")
def test_relative_import(self):
self.check_roundtrip(relative_import)
def test_nonlocal(self):
self.check_roundtrip(nonlocal_ex)
def test_raise_from(self):
self.check_roundtrip(raise_from)
def test_bytes(self):
self.check_roundtrip("b'123'")
def test_annotations(self):
self.check_roundtrip("def f(a : int): pass")
self.check_roundtrip("def f(a: int = 5): pass")
self.check_roundtrip("def f(*args: [int]): pass")
self.check_roundtrip("def f(**kwargs: dict): pass")
self.check_roundtrip("def f() -> None: pass")
def test_set_literal(self):
self.check_roundtrip("{'a', 'b', 'c'}")
def test_set_comprehension(self):
self.check_roundtrip("{x for x in range(5)}")
def test_dict_comprehension(self):
self.check_roundtrip("{x: x*x for x in range(10)}")
def test_class_decorators(self):
self.check_roundtrip(class_decorator)
def test_class_definition(self):
self.check_roundtrip("class A(metaclass=type, *[], **{}): pass")
def test_elifs(self):
self.check_roundtrip(elif1)
self.check_roundtrip(elif2)
def test_try_except_finally(self):
self.check_roundtrip(try_except_finally)
def test_starred_assignment(self):
self.check_roundtrip("a, *b, c = seq")
self.check_roundtrip("a, (*b, c) = seq")
self.check_roundtrip("a, *b[0], c = seq")
self.check_roundtrip("a, *(b, c) = seq")
def test_with_simple(self):
self.check_roundtrip(with_simple)
def test_with_as(self):
self.check_roundtrip(with_as)
def test_with_two_items(self):
self.check_roundtrip(with_two_items)
def test_dict_unpacking_in_dict(self):
# See issue 26489
self.check_roundtrip(r"""{**{'y': 2}, 'x': 1}""")
self.check_roundtrip(r"""{**{'y': 2}, **{'x': 1}}""")
class DirectoryTestCase(ASTTestCase):
"""Test roundtrip behaviour on all files in Lib and Lib/test."""
NAMES = None
# test directories, relative to the root of the distribution
test_directories = 'Lib', os.path.join('Lib', 'test')
@classmethod
def get_names(cls):
if cls.NAMES is not None:
return cls.NAMES
names = []
for d in cls.test_directories:
test_dir = os.path.join(basepath, d)
for n in os.listdir(test_dir):
if n.endswith('.py') and not n.startswith('bad'):
names.append(os.path.join(test_dir, n))
# Test limited subset of files unless the 'cpu' resource is specified.
if not test.support.is_resource_enabled("cpu"):
names = random.sample(names, 10)
# bpo-31174: Store the names sample to always test the same files.
# It prevents false alarms when hunting reference leaks.
cls.NAMES = names
return names
def test_files(self):
# get names of files to test
names = self.get_names()
for filename in names:
if test.support.verbose:
print('Testing %s' % filename)
# Some f-strings are not correctly round-tripped by
# Tools/parser/unparse.py. See issue 28002 for details.
# We need to skip files that contain such f-strings.
if os.path.basename(filename) in ('test_fstring.py', ):
if test.support.verbose:
print(f'Skipping {filename}: see issue 28002')
continue
with self.subTest(filename=filename):
source = read_pyfile(filename)
self.check_roundtrip(source)
if __name__ == '__main__':
unittest.main()
| FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_tools/test_unparse.py | Python | gpl-2.0 | 8,393 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
from marvin.cloudstackAPI import deleteAffinityGroup
from marvin.lib.utils import (cleanup_resources,
random_gen)
from marvin.lib.base import (Account,
Project,
ServiceOffering,
VirtualMachine,
AffinityGroup,
Domain)
from marvin.lib.common import (get_zone,
get_domain,
get_template,
list_hosts,
list_virtual_machines,
wait_for_cleanup)
from nose.plugins.attrib import attr
class Services:
"""Test Account Services
"""
def __init__(self):
self.services = {
"domain": {
"name": "NonRootDomain"
},
"domain_admin_account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "doadmintest",
"password": "password"
},
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "acc",
"password": "password"
},
"account_not_in_project": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "account_not_in_project",
"password": "password"
},
"project": {
"name": "Project",
"displaytext": "Project"
},
"project2": {
"name": "Project2",
"displaytext": "Project2"
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 64
},
"ostype": 'CentOS 5.3 (64-bit)',
"host_anti_affinity": {
"name": "",
"type": "host anti-affinity"
},
"virtual_machine" : {
}
}
class TestCreateAffinityGroup(cloudstackTestCase):
"""
Test various scenarios for Create Affinity Group API for projects
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestCreateAffinityGroup, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
#Get Zone, Domain and templates
cls.rootdomain = get_domain(cls.api_client)
cls.domain = Domain.create(cls.api_client, cls.services["domain"])
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.domain_admin_account = Account.create(
cls.api_client,
cls.services["domain_admin_account"],
domainid=cls.domain.id,
admin=True
)
cls.domain_api_client = cls.testClient.getUserApiClient(cls.domain_admin_account.name, cls.domain.name, 2)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_api_client = cls.testClient.getUserApiClient(cls.account.name, cls.domain.name, 0)
cls.account_not_in_project = Account.create(
cls.api_client,
cls.services["account_not_in_project"],
domainid=cls.domain.id
)
cls.account_not_in_project_api_client = cls.testClient.getUserApiClient(cls.account_not_in_project.name, cls.domain.name, 0)
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.project2 = Project.create(
cls.api_client,
cls.services["project2"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.debug("Created project with ID: %s" % cls.project.id)
cls.debug("Created project2 with ID: %s" % cls.project2.id)
# Add user to the project
cls.project.addAccount(
cls.api_client,
cls.account.name
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.account.domainid
)
cls._cleanup = []
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# #Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
#Clean up, terminate the created templates
cls.domain.delete(cls.api_client, cleanup=True)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None):
if not api_client:
api_client = self.api_client
if aff_grp is None:
aff_grp = self.services["host_anti_affinity"]
if aff_grp_name is None:
aff_grp["name"] = "aff_grp_" + random_gen(size=6)
else:
aff_grp["name"] = aff_grp_name
if projectid is None:
projectid = self.project.id
try:
return AffinityGroup.create(api_client, aff_grp, None, None, projectid)
except Exception as e:
raise Exception("Error: Creation of Affinity Group failed : %s" % e)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_01_admin_create_aff_grp_for_project(self):
"""
Test create affinity group as admin in project
@return:
"""
aff_grp = self.create_aff_grp()
self.debug("Created Affinity Group: %s" % aff_grp.name)
list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp.id)
self.assert_(isinstance(list_aff_grps, list) and len(list_aff_grps) > 0)
self.assert_(list_aff_grps[0].id == aff_grp.id)
self.assert_(list_aff_grps[0].projectid == self.project.id)
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_02_doadmin_create_aff_grp_for_project(self):
"""
Test create affinity group as domain admin for projects
@return:
"""
aff_grp = self.create_aff_grp(api_client=self.domain_api_client)
list_aff_grps = AffinityGroup.list(self.domain_api_client, id=aff_grp.id)
self.assert_(isinstance(list_aff_grps, list) and len(list_aff_grps) > 0)
self.assert_(list_aff_grps[0].id == aff_grp.id)
self.assert_(list_aff_grps[0].projectid == self.project.id)
self.cleanup.append(aff_grp)
@attr(tags=["vogxn", "simulator", "basic", "advanced"], required_hardware="false")
def test_03_user_create_aff_grp_for_project(self):
"""
Test create affinity group as user for projects
@return:
"""
aff_grp = self.create_aff_grp(api_client=self.account_api_client)
list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp.id)
self.assert_(isinstance(list_aff_grps, list) and len(list_aff_grps) > 0)
self.assert_(list_aff_grps[0].id == aff_grp.id)
self.assert_(list_aff_grps[0].projectid == self.project.id)
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_4_user_create_aff_grp_existing_name_for_project(self):
"""
Test create affinity group that exists (same name) for projects
@return:
"""
failed_aff_grp = None
aff_grp = self.create_aff_grp(api_client=self.account_api_client)
with self.assertRaises(Exception):
failed_aff_grp = self.create_aff_grp(api_client=self.account_api_client,aff_grp_name = aff_grp.name)
if failed_aff_grp:
self.cleanup.append(failed_aff_grp)
self.cleanup.append(aff_grp)
class TestListAffinityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestListAffinityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
#Get Zone, Domain and templates
cls.rootdomain = get_domain(cls.api_client)
cls.domain = Domain.create(cls.api_client, cls.services["domain"])
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.domain_admin_account = Account.create(
cls.api_client,
cls.services["domain_admin_account"],
domainid=cls.domain.id,
admin=True
)
cls.domain_api_client = cls.testClient.getUserApiClient(cls.domain_admin_account.name, cls.domain.name, 2)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_api_client = cls.testClient.getUserApiClient(cls.account.name, cls.domain.name, 0)
cls.account_not_in_project = Account.create(
cls.api_client,
cls.services["account_not_in_project"],
domainid=cls.domain.id
)
cls.account_not_in_project_api_client = cls.testClient.getUserApiClient(cls.account_not_in_project.name, cls.domain.name, 0)
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.project2 = Project.create(
cls.api_client,
cls.services["project2"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.debug("Created project with ID: %s" % cls.project.id)
cls.debug("Created project2 with ID: %s" % cls.project2.id)
# Add user to the project
cls.project.addAccount(
cls.api_client,
cls.account.name
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.account.domainid
)
cls._cleanup = []
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# #Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cls.domain.delete(cls.api_client, cleanup=True)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None):
if not api_client:
api_client = self.api_client
if aff_grp is None:
aff_grp = self.services["host_anti_affinity"]
if aff_grp_name is None:
aff_grp["name"] = "aff_grp_" + random_gen(size=6)
else:
aff_grp["name"] = aff_grp_name
if projectid is None:
projectid = self.project.id
try:
return AffinityGroup.create(api_client, aff_grp, None, None, projectid)
except Exception as e:
raise Exception("Error: Creation of Affinity Group failed : %s" % e)
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None):
self.debug('Creating VM in AffinityGroups=%s' % ag_list)
if api_client is None:
api_client = self.api_client
if projectid is None:
projectid = self.project.id
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
projectid=projectid,
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
affinitygroupnames=ag_list
)
self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list)))
list_vm = list_virtual_machines(api_client, id=vm.id, projectid=projectid)
self.assertEqual(isinstance(list_vm, list), True,"Check list response returns a valid list")
self.assertNotEqual(len(list_vm),0, "Check VM available in List Virtual Machines")
vm_response = list_vm[0]
self.assertEqual(vm_response.state, 'Running',msg="VM is not in Running state")
self.assertEqual(vm_response.projectid, projectid,msg="VM is not in project")
return vm, vm_response.hostid
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_01_list_aff_grps_for_vm(self):
"""
List affinity group for a vm for projects
"""
aff_grps = []
aff_grps.append(self.create_aff_grp(self.domain_api_client, projectid=self.project.id))
vm, hostid = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grps[0].name])
list_aff_grps = AffinityGroup.list(self.api_client,virtualmachineid=vm.id)
self.assertEqual(list_aff_grps[0].name, aff_grps[0].name,"Listing Affinity Group by VM id failed")
self.assertEqual(list_aff_grps[0].projectid, self.project.id,"Listing Affinity Group by VM id failed, vm was not in project")
vm.delete(self.api_client)
#Wait for expunge interval to cleanup VM
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
self.cleanup.append(aff_grps[0])
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_02_list_multiple_aff_grps_for_vm(self):
"""
List multiple affinity groups associated with a vm for projects
"""
aff_grp_01 = self.create_aff_grp(self.account_api_client)
aff_grp_02 = self.create_aff_grp(self.account_api_client)
aff_grps_names = [aff_grp_01.name, aff_grp_02.name]
vm, hostid = self.create_vm_in_aff_grps(ag_list=aff_grps_names)
list_aff_grps = AffinityGroup.list(self.api_client,
virtualmachineid=vm.id)
list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name]
aff_grps_names.sort()
list_aff_grps_names.sort()
self.assertEqual(aff_grps_names, list_aff_grps_names,"One of the Affinity Groups is missing %s" % list_aff_grps_names)
vm.delete(self.api_client)
#Wait for expunge interval to cleanup VM
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
self.cleanup.append(aff_grp_01)
self.cleanup.append(aff_grp_02)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_03_list_aff_grps_by_id(self):
"""
List affinity groups by id for projects
"""
aff_grp = self.create_aff_grp(self.account_api_client)
list_aff_grps = AffinityGroup.list(self.account_api_client, id=aff_grp.id, projectid=self.project.id)
self.assertEqual(list_aff_grps[0].id, aff_grp.id,"Listing Affinity Group by id failed")
with self.assertRaises(Exception):
AffinityGroup.list(self.account_not_in_project_api_client, id=aff_grp.id, projectid=self.project.id)
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_04_list_aff_grps_by_name(self):
"""
List Affinity Groups by name for projects
"""
aff_grp = self.create_aff_grp(self.account_api_client)
list_aff_grps = AffinityGroup.list(self.account_api_client, name=aff_grp.name, projectid=self.project.id)
self.assertEqual(list_aff_grps[0].name, aff_grp.name,"Listing Affinity Group by name failed")
with self.assertRaises(Exception):
AffinityGroup.list(self.account_not_in_project_api_client, id=aff_grp.id, projectid=self.project.id)
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_05_list_aff_grps_by_non_existing_id(self):
"""
List Affinity Groups by non-existing id for projects
"""
aff_grp = self.create_aff_grp(self.account_api_client)
list_aff_grps = AffinityGroup.list(self.account_api_client, id=1234, projectid=self.project.id)
self.assertEqual(list_aff_grps, None, "Listing Affinity Group by non-existing id succeeded.")
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_06_list_aff_grps_by_non_existing_name(self):
"""
List Affinity Groups by non-existing name for projects
"""
aff_grp = self.create_aff_grp(self.account_api_client)
list_aff_grps = AffinityGroup.list(self.account_api_client, name="inexistantName", projectid=self.project.id)
self.assertEqual(list_aff_grps, None, "Listing Affinity Group by non-existing name succeeded.")
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_07_list_all_vms_in_aff_grp(self):
"""
List affinity group should list all for a vms associated with that group for projects
"""
aff_grp = self.create_aff_grp(self.account_api_client)
vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name])
vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp.name])
list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp.id, projectid=self.project.id)
self.assertEqual(list_aff_grps[0].name, aff_grp.name, "Listing Affinity Group by id failed")
self.assertEqual(list_aff_grps[0].virtualmachineIds[0], vm1.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm1.id))
self.assertEqual(list_aff_grps[0].virtualmachineIds[1], vm2.id, "List affinity group response.virtualmachineIds for group: %s doesn't contain vmid : %s" % (aff_grp.name, vm2.id))
vm1.delete(self.api_client)
vm2.delete(self.api_client)
#Wait for expunge interval to cleanup VM
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
self.cleanup.append(aff_grp)
class TestDeleteAffinityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeleteAffinityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
#Get Zone, Domain and templates
cls.rootdomain = get_domain(cls.api_client)
cls.domain = Domain.create(cls.api_client, cls.services["domain"])
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.domain_admin_account = Account.create(
cls.api_client,
cls.services["domain_admin_account"],
domainid=cls.domain.id,
admin=True
)
cls.domain_api_client = cls.testClient.getUserApiClient(cls.domain_admin_account.name, cls.domain.name, 2)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_api_client = cls.testClient.getUserApiClient(cls.account.name, cls.domain.name, 0)
cls.account_not_in_project = Account.create(
cls.api_client,
cls.services["account_not_in_project"],
domainid=cls.domain.id
)
cls.account_not_in_project_api_client = cls.testClient.getUserApiClient(cls.account_not_in_project.name, cls.domain.name, 0)
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.project2 = Project.create(
cls.api_client,
cls.services["project2"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.debug("Created project with ID: %s" % cls.project.id)
cls.debug("Created project2 with ID: %s" % cls.project2.id)
# Add user to the project
cls.project.addAccount(
cls.api_client,
cls.account.name
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.account.domainid
)
cls._cleanup = []
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# #Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cls.domain.delete(cls.api_client, cleanup=True)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None):
if not api_client:
api_client = self.api_client
if aff_grp is None:
aff_grp = self.services["host_anti_affinity"]
if aff_grp_name is None:
aff_grp["name"] = "aff_grp_" + random_gen(size=6)
else:
aff_grp["name"] = aff_grp_name
if projectid is None:
projectid = self.project.id
try:
return AffinityGroup.create(api_client, aff_grp, None, None, projectid)
except Exception as e:
raise Exception("Error: Creation of Affinity Group failed : %s" % e)
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None):
self.debug('Creating VM in AffinityGroups=%s' % ag_list)
if api_client is None:
api_client = self.api_client
if projectid is None:
projectid = self.project.id
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
projectid=projectid,
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
affinitygroupnames=ag_list
)
self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list)))
list_vm = list_virtual_machines(self.api_client, id=vm.id, projectid=projectid)
self.assertEqual(isinstance(list_vm, list), True,"Check list response returns an invalid list %s" % list_vm)
self.assertNotEqual(len(list_vm),0, "Check VM available in TestDeployVMAffinityGroups")
self.assertEqual(list_vm[0].id, vm.id,"Listed vm does not have the same ids")
vm_response = list_vm[0]
self.assertEqual(vm.state, 'Running',msg="VM is not in Running state")
self.assertEqual(vm.projectid, projectid,msg="VM is not in project")
self.assertNotEqual(vm_response.hostid, None, "Host id was null for vm %s" % vm_response)
return vm, vm_response.hostid
def delete_aff_group(self, apiclient, **kwargs):
cmd = deleteAffinityGroup.deleteAffinityGroupCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
return apiclient.deleteAffinityGroup(cmd)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_01_delete_aff_grp_by_id(self):
"""
#Delete Affinity Group by id.
"""
aff_grp1 = self.create_aff_grp(self.account_api_client)
aff_grp2 = self.create_aff_grp(self.account_api_client)
aff_grp1.delete(self.account_api_client)
with self.assertRaises(Exception):
list_aff_grps = AffinityGroup.list(self.api_client, id=aff_grp1.id)
self.cleanup.append(aff_grp2)
@attr(tags=["simulator", "basic", "advanced"], required_hardware="false")
def test_02_delete_aff_grp_by_id_another_user(self):
"""
#Delete Affinity Group by id should fail for user not in project
"""
aff_grp1 = self.create_aff_grp(self.account_api_client)
aff_grp2 = self.create_aff_grp(self.account_api_client)
with self.assertRaises(Exception):
aff_grp1.delete(self.account_not_in_project_api_client)
self.cleanup.append(aff_grp1)
self.cleanup.append(aff_grp2)
class TestUpdateVMAffinityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUpdateVMAffinityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
#Get Zone, Domain and templates
cls.rootdomain = get_domain(cls.api_client)
cls.domain = Domain.create(cls.api_client, cls.services["domain"])
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.domain_admin_account = Account.create(
cls.api_client,
cls.services["domain_admin_account"],
domainid=cls.domain.id,
admin=True
)
cls.domain_api_client = cls.testClient.getUserApiClient(cls.domain_admin_account.name, cls.domain.name, 2)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_api_client = cls.testClient.getUserApiClient(cls.account.name, cls.domain.name, 0)
cls.account_not_in_project = Account.create(
cls.api_client,
cls.services["account_not_in_project"],
domainid=cls.domain.id
)
cls.account_not_in_project_api_client = cls.testClient.getUserApiClient(cls.account_not_in_project.name, cls.domain.name, 0)
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.project2 = Project.create(
cls.api_client,
cls.services["project2"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.debug("Created project with ID: %s" % cls.project.id)
cls.debug("Created project2 with ID: %s" % cls.project2.id)
# Add user to the project
cls.project.addAccount(
cls.api_client,
cls.account.name
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.account.domainid
)
cls._cleanup = []
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# #Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cls.domain.delete(cls.api_client, cleanup=True)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None):
if not api_client:
api_client = self.api_client
if aff_grp is None:
aff_grp = self.services["host_anti_affinity"]
if aff_grp_name is None:
aff_grp["name"] = "aff_grp_" + random_gen(size=6)
else:
aff_grp["name"] = aff_grp_name
if projectid is None:
projectid = self.project.id
try:
return AffinityGroup.create(api_client, aff_grp, None, None, projectid)
except Exception as e:
raise Exception("Error: Creation of Affinity Group failed : %s" % e)
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None):
self.debug('Creating VM in AffinityGroups=%s' % ag_list)
if api_client is None:
api_client = self.api_client
if projectid is None:
projectid = self.project.id
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
projectid=projectid,
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
affinitygroupnames=ag_list
)
self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list)))
list_vm = list_virtual_machines(self.api_client, id=vm.id, projectid=projectid)
self.assertEqual(isinstance(list_vm, list), True,"Check list response returns an invalid list %s" % list_vm)
self.assertNotEqual(len(list_vm),0, "Check VM available in TestDeployVMAffinityGroups")
self.assertEqual(list_vm[0].id, vm.id,"Listed vm does not have the same ids")
vm_response = list_vm[0]
self.assertEqual(vm.state, 'Running',msg="VM is not in Running state")
self.assertEqual(vm.projectid, projectid,msg="VM is not in project")
self.assertNotEqual(vm_response.hostid, None, "Host id was null for vm %s" % vm_response)
return vm, vm_response.hostid
@attr(tags=["simulator", "basic", "advanced", "multihost"], required_hardware="false")
def test_01_update_aff_grp_by_ids(self):
"""
Update the list of affinityGroups by using affinity groupids
"""
aff_grp1 = self.create_aff_grp(self.account_api_client)
aff_grp2 = self.create_aff_grp(self.account_api_client)
vm1, hostid1 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name])
vm2, hostid2 = self.create_vm_in_aff_grps(ag_list=[aff_grp1.name])
vm1.stop(self.api_client)
list_aff_grps = AffinityGroup.list(self.api_client, projectid=self.project.id)
self.assertEqual(len(list_aff_grps), 2 , "2 affinity groups should be present")
vm1.update_affinity_group(self.api_client,affinitygroupids=[list_aff_grps[0].id,list_aff_grps[1].id])
list_aff_grps = AffinityGroup.list(self.api_client,virtualmachineid=vm1.id)
list_aff_grps_names = [list_aff_grps[0].name, list_aff_grps[1].name]
aff_grps_names = [aff_grp1.name, aff_grp2.name]
aff_grps_names.sort()
list_aff_grps_names.sort()
self.assertEqual(aff_grps_names, list_aff_grps_names,"One of the Affinity Groups is missing %s" % list_aff_grps_names)
vm1.start(self.api_client)
vm_status = VirtualMachine.list(self.api_client, id=vm1.id)
self.assertNotEqual(vm_status[0].hostid, hostid2, "The virtual machine started on host %s violating the host anti-affinity rule" %vm_status[0].hostid)
vm1.delete(self.api_client)
vm2.delete(self.api_client)
#Wait for expunge interval to cleanup VM
wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
aff_grp1.delete(self.api_client)
aff_grp2.delete(self.api_client)
class TestDeployVMAffinityGroups(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestDeployVMAffinityGroups, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
#Get Zone, Domain and templates
cls.rootdomain = get_domain(cls.api_client)
cls.domain = Domain.create(cls.api_client, cls.services["domain"])
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["zoneid"] = cls.zone.id
cls.domain_admin_account = Account.create(
cls.api_client,
cls.services["domain_admin_account"],
domainid=cls.domain.id,
admin=True
)
cls.domain_api_client = cls.testClient.getUserApiClient(cls.domain_admin_account.name, cls.domain.name, 2)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.account_api_client = cls.testClient.getUserApiClient(cls.account.name, cls.domain.name, 0)
cls.account_not_in_project = Account.create(
cls.api_client,
cls.services["account_not_in_project"],
domainid=cls.domain.id
)
cls.account_not_in_project_api_client = cls.testClient.getUserApiClient(cls.account_not_in_project.name, cls.domain.name, 0)
cls.project = Project.create(
cls.api_client,
cls.services["project"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.project2 = Project.create(
cls.api_client,
cls.services["project2"],
account=cls.domain_admin_account.name,
domainid=cls.domain_admin_account.domainid
)
cls.debug("Created project with ID: %s" % cls.project.id)
cls.debug("Created project2 with ID: %s" % cls.project2.id)
# Add user to the project
cls.project.addAccount(
cls.api_client,
cls.account.name
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
domainid=cls.account.domainid
)
cls._cleanup = []
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
try:
# #Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.api_client, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cls.domain.delete(cls.api_client, cleanup=True)
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None):
if not api_client:
api_client = self.api_client
if aff_grp is None:
aff_grp = self.services["host_anti_affinity"]
if aff_grp_name is None:
aff_grp["name"] = "aff_grp_" + random_gen(size=6)
else:
aff_grp["name"] = aff_grp_name
if projectid is None:
projectid = self.project.id
try:
return AffinityGroup.create(api_client, aff_grp, None, None, projectid)
except Exception as e:
raise Exception("Error: Creation of Affinity Group failed : %s" % e)
def create_vm_in_aff_grps(self, api_client=None, ag_list=[], projectid=None):
self.debug('Creating VM in AffinityGroups=%s' % ag_list)
if api_client is None:
api_client = self.api_client
if projectid is None:
projectid = self.project.id
vm = VirtualMachine.create(
api_client,
self.services["virtual_machine"],
projectid=projectid,
templateid=self.template.id,
serviceofferingid=self.service_offering.id,
affinitygroupnames=ag_list
)
self.debug('Created VM=%s in Affinity Group=%s' % (vm.id, tuple(ag_list)))
list_vm = list_virtual_machines(self.api_client, id=vm.id, projectid=projectid)
self.assertEqual(isinstance(list_vm, list), True,"Check list response returns an invalid list %s" % list_vm)
self.assertNotEqual(len(list_vm),0, "Check VM available in TestDeployVMAffinityGroups")
self.assertEqual(list_vm[0].id, vm.id,"Listed vm does not have the same ids")
vm_response = list_vm[0]
self.assertEqual(vm.state, 'Running',msg="VM is not in Running state")
self.assertEqual(vm.projectid, projectid,msg="VM is not in project")
self.assertNotEqual(vm_response.hostid, None, "Host id was null for vm %s" % vm_response)
return vm, vm_response.hostid
@attr(tags=["simulator", "basic", "advanced", "multihost"], required_hardware="false")
def test_01_deploy_vm_anti_affinity_group(self):
"""
test DeployVM in anti-affinity groups
deploy VM1 and VM2 in the same host-anti-affinity groups
Verify that the vms are deployed on separate hosts
"""
aff_grp = self.create_aff_grp(self.account_api_client)
vm1, hostid1 = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name])
vm2, hostid2 = self.create_vm_in_aff_grps(self.account_api_client, ag_list=[aff_grp.name])
self.assertNotEqual(hostid1, hostid2, msg="Both VMs of affinity group %s are on the same host: %s , %s, %s, %s" % (aff_grp.name, vm1, hostid1, vm2, hostid2))
vm1.delete(self.api_client)
vm2.delete(self.api_client)
wait_for_cleanup(self.api_client, ["expunge.delay", "expunge.interval"])
self.cleanup.append(aff_grp)
@attr(tags=["simulator", "basic", "advanced", "multihost"], required_hardware="false")
def test_02_deploy_vm_anti_affinity_group_fail_on_not_enough_hosts(self):
"""
test DeployVM in anti-affinity groups with more vms than hosts.
"""
hosts = list_hosts(self.api_client, type="routing")
aff_grp = self.create_aff_grp(self.account_api_client)
vms = []
for host in hosts:
vms.append(self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name]))
vm_failed = None
with self.assertRaises(Exception):
vm_failed = self.create_vm_in_aff_grps(self.account_api_client,ag_list=[aff_grp.name])
self.assertEqual(len(hosts), len(vms), "Received %s and %s " % (hosts, vms))
if vm_failed:
vm_failed.expunge(self.api_client)
self.cleanup.append(aff_grp)
| ikoula/cloudstack | test/integration/component/test_affinity_groups_projects.py | Python | gpl-2.0 | 41,778 |
"""Support functions for loading the reference count data file."""
__version__ = '$Revision: 1.2 $'
import os
import string
import sys
# Determine the expected location of the reference count file:
try:
p = os.path.dirname(__file__)
except NameError:
p = sys.path[0]
p = os.path.normpath(os.path.join(os.getcwd(), p, os.pardir,
"api", "refcounts.dat"))
DEFAULT_PATH = p
del p
def load(path=DEFAULT_PATH):
return loadfile(open(path))
def loadfile(fp):
d = {}
while 1:
line = fp.readline()
if not line:
break
line = string.strip(line)
if line[:1] in ("", "#"):
# blank lines and comments
continue
parts = string.split(line, ":", 4)
function, type, arg, refcount, comment = parts
if refcount == "null":
refcount = None
elif refcount:
refcount = int(refcount)
else:
refcount = None
#
# Get the entry, creating it if needed:
#
try:
entry = d[function]
except KeyError:
entry = d[function] = Entry(function)
#
# Update the entry with the new parameter or the result information.
#
if arg:
entry.args.append((arg, type, refcount))
else:
entry.result_type = type
entry.result_refs = refcount
return d
class Entry:
def __init__(self, name):
self.name = name
self.args = []
self.result_type = ''
self.result_refs = None
def dump(d):
"""Dump the data in the 'canonical' format, with functions in
sorted order."""
items = d.items()
items.sort()
first = 1
for k, entry in items:
if first:
first = 0
else:
print
s = entry.name + ":%s:%s:%s:"
if entry.result_refs is None:
r = ""
else:
r = entry.result_refs
print s % (entry.result_type, "", r)
for t, n, r in entry.args:
if r is None:
r = ""
print s % (t, n, r)
def main():
d = load()
dump(d)
if __name__ == "__main__":
main()
| atmark-techno/atmark-dist | user/python/Doc/tools/refcounts.py | Python | gpl-2.0 | 2,236 |
#!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'float-libcall', result="""
# DURATION TID FUNCTION
[18276] | main() {
0.371 ms [18276] | expf(1.000000) = 2.718282;
0.118 ms [18276] | log(2.718282) = 1.000000;
3.281 ms [18276] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
# cygprof doesn't support arguments now
if cflags.find('-finstrument-functions') >= 0:
return TestBase.TEST_SKIP
ldflags += " -lm"
return TestBase.build(self, name, cflags, ldflags)
def setup(self):
self.option = '-A "expf@fparg1/32" -R "expf@retval/f32" '
self.option += '-A "log@fparg1/64" -R "log@retval/f64" '
| namhyung/uftrace | tests/t198_lib_arg_float.py | Python | gpl-2.0 | 799 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for core additions
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '15.5.2018'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.testing import unittest, start_app
from qgis.core import metaEnumFromValue, metaEnumFromType, QgsTolerance, QgsMapLayer
import sip
start_app()
class TestCoreAdditions(unittest.TestCase):
def testMetaEnum(self):
me = metaEnumFromValue(QgsTolerance.Pixels)
self.assertIsNotNone(me)
self.assertEqual(me.valueToKey(QgsTolerance.Pixels), 'Pixels')
# if using same variable twice (e.g. me = me2), this seg faults
me2 = metaEnumFromValue(QgsTolerance.Pixels, QgsTolerance)
self.assertIsNotNone(me)
self.assertEqual(me2.valueToKey(QgsTolerance.Pixels), 'Pixels')
# do not raise error
self.assertIsNone(metaEnumFromValue(1, QgsTolerance, False))
# do not provide an int
with self.assertRaises(TypeError):
metaEnumFromValue(1)
# QgsMapLayer.LayerType is not a Q_ENUM
with self.assertRaises(ValueError):
metaEnumFromValue(QgsMapLayer.LayerType)
if __name__ == "__main__":
unittest.main()
| raymondnijssen/QGIS | tests/src/python/test_core_additions.py | Python | gpl-2.0 | 1,581 |
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2009, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
__doc__="""Esx
Plugin to gather information about virtual machines running
under a VMWare ESX server v3.0
"""
import Globals
from Products.DataCollector.plugins.CollectorPlugin \
import SnmpPlugin, GetTableMap
from Products.DataCollector.plugins.DataMaps \
import ObjectMap
class Esx(SnmpPlugin):
# compname = "os"
relname = "guestDevices"
modname = 'ZenPacks.zenoss.ZenossVirtualHostMonitor.VirtualMachine'
columns = {
'.1': 'snmpindex',
'.2': 'displayName',
'.4': 'osType',
'.5': 'memory',
'.6': 'adminStatus',
'.7': 'vmid',
'.8': 'operStatus',
}
snmpGetTableMaps = (
GetTableMap('vminfo', '.1.3.6.1.4.1.6876.2.1.1', columns),
)
def process(self, device, results, log):
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
table = tabledata.get("vminfo")
rm = self.relMap()
for info in table.values():
info['adminStatus'] = info['adminStatus'] == 'poweredOn'
info['operStatus'] = info['operStatus'] == 'running'
info['snmpindex'] = info['vmid']
del info['vmid']
om = self.objectMap(info)
om.id = self.prepId(om.displayName)
rm.append(om)
return [rm]
| zenoss/ZenPacks.community.VMwareESXMonitor | ZenPacks/community/VMwareESXMonitor/modeler/plugins/zenoss/snmp/Esx.py | Python | gpl-2.0 | 1,873 |
#-----------------------------------------------------------------------------
#remove duplicates v1.3
#best way to remove duplicates, just select the objects you want the duplicates removed, then run this scrpit
import bpy
for obj in bpy.context.selected_objects:
if obj.type == 'MESH':
bpy.data.scenes[0].objects.active = obj # make obj active to do operations on it
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # set 3D View to Object Mode (probably redundant)
bpy.ops.object.mode_set(mode='EDIT', toggle=False) # set 3D View to Edit Mode
bpy.context.tool_settings.mesh_select_mode = [False, False, True] # set to face select in 3D View Editor
bpy.ops.mesh.select_all(action='SELECT') # make sure all faces in mesh are selected
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # very silly, you have to be in object mode to select faces!!
found = set([]) # set of found sorted vertices pairs
for face in obj.data.polygons:
facevertsorted = sorted(face.vertices[:]) # sort vertices of the face to compare later
if str(facevertsorted) not in found: # if sorted vertices are not in the set
found.add(str(facevertsorted)) # add them in the set
obj.data.polygons[face.index].select = False # deselect faces i want to keep
bpy.ops.object.mode_set(mode='EDIT', toggle=False) # set to Edit Mode AGAIN
bpy.ops.mesh.delete(type='FACE') # delete double faces
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent(inside=False) # recalculate normals
bpy.ops.mesh.remove_doubles(threshold=0.0001, use_unselected=False) #remove doubles
bpy.ops.mesh.normals_make_consistent(inside=False) # recalculate normals (this one or two lines above is redundant)
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) # set to Object Mode AGAIN
| infobeisel/polyvr | extras/blender_scripts/remove_double_vertices_and_faces.py | Python | gpl-3.0 | 2,067 |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# declarations for external metaweb api.
#
#
# from metaweb.api import HTTPMetawebSession
#
# mss = HTTPMetawebSession('sandbox.freebase.com')
# print mss.mqlread([dict(name=None, type='/type/type')])
#
#
#
__all__ = ['MetawebError', 'MetawebSession', 'HTTPMetawebSession', 'attrdict']
__version__ = '0.1'
import os, sys, re
import urllib2
import cookielib
import simplejson
from urllib import quote as urlquote
import pprint
import socket
import logging
try:
import httplib2
from httplib2cookie import CookiefulHttp
except ImportError:
httplib2 = None
CookiefulHttp = None
print ('freebase.api: you can install httplib2 for better performance')
import simplejson.encoder
# remove whitespace from json encoded output
simplejson.JSONEncoder.item_separator = ','
simplejson.JSONEncoder.key_separator = ':'
# don't escape slashes, we're not pasting into script tags here.
if simplejson.encoder.ESCAPE_DCT.get('/', None) == r'\/':
simplejson.encoder.ESCAPE_DCT['/'] = '/'
def urlencode_weak(s):
return urlquote(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
# TODO expose the common parts of the result envelope
class MetawebError(Exception):
"""
an error report from the metaweb service.
"""
pass
# TODO right now this is a completely unnecessary superclass.
# is there enough common behavior between session types
# to justify it?
class MetawebSession(object):
"""
MetawebSession is the base class for MetawebSession, subclassed for
different connection types. Only http is available externally.
This is more of an interface than a class
"""
# interface definition here...
# from httplib2
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
class HTTPMetawebSession(MetawebSession):
"""
a MetawebSession is a request/response queue.
this version uses the HTTP api, and is synchronous.
"""
# share cookies across sessions, so that different sessions can
# see each other's writes immediately.
_default_cookiejar = cookielib.CookieJar()
def __init__(self, service_url, username=None, password=None, prev_session=None, cookiejar=None, cookiefile=None):
"""
create a new MetawebSession for interacting with the Metaweb.
a new session will inherit state from prev_session if present,
"""
super(HTTPMetawebSession, self).__init__()
self.log = logging.getLogger()
assert not service_url.endswith('/')
if not '/' in service_url: # plain host:port
service_url = 'http://' + service_url
self.service_url = service_url
self.username = username
self.password = password
self.tid = None
if prev_session:
self.service_url = prev.service_url
if cookiefile is not None:
cookiejar = self.open_cookie_file(cookiefile)
if cookiejar is not None:
self.cookiejar = cookiejar
elif prev_session:
self.cookiejar = prev_session.cookiejar
else:
self.cookiejar = self._default_cookiejar
if CookiefulHttp is not None:
self.httpclient = CookiefulHttp(cookiejar=self.cookiejar)
else:
cookiespy = urllib2.HTTPCookieProcessor(self.cookiejar)
self.opener = urllib2.build_opener(cookiespy)
def open_cookie_file(self, cookiefile=None):
if cookiefile is None or cookiefile == '':
if os.environ.has_key('HOME'):
cookiefile = os.path.join(os.environ['HOME'], '.pyfreebase/cookiejar')
else:
raise MetawebError("no cookiefile specified and no $HOME/.pyfreebase directory" % cookiefile)
cookiejar = cookielib.LWPCookieJar(cookiefile)
if os.path.exists(cookiefile):
cookiejar.load(ignore_discard=True)
return cookiejar
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'POST':
assert body is not None or form is not None
elif method == 'GET':
assert body is None
else:
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# XXX This is a lousy way to parse Content-Type, where is
# the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(k), urlencode_weak(v))
for k,v in form.items()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
# XXX encoding and stuff
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct
if ct == 'multipart/form-encoded':
# XXX fixme
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
headers['user-agent'] = 'python freebase.api-%s' % __version__
#if self.tid is not None:
# headers['x-metaweb-tid'] = self.tid
####### DEBUG MESSAGE - should check log level before generating
if form is None:
formstr = ''
else:
formstr = 'FORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = 'HEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.debug('%s %s%s%s', method, url, formstr, headerstr)
#######
if CookiefulHttp is not None:
return self._httplib2_request(url, method, body, headers)
else:
return self._urllib2_request(url, method, body, headers)
def _raise_service_error(self, status, ctype, body):
is_jsbody = (e.info().type.endswith('javascript')
or e.info().type.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %r %r' % (url, str(e), body)
def _urllib2_request(self, url, method, body, headers):
req = urllib2.Request(url, body, headers)
try:
resp = self.opener.open(req)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except urllib2.HTTPError, e:
_raise_service_error(e.code, e.info().type, e.fp.read())
for header in resp.info().headers:
self.log.debug('HTTP HEADER %s', header)
name, value = re.split("[:\n\r]", header, 1)
if name.lower() == 'x-metaweb-tid':
self.tid = value.strip()
return (resp, resp.read())
def _httplib2_request(self, url, method, body, headers):
try:
resp, content = self.httpclient.request(url, method=method,
body=body, headers=headers)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except httplib2.HttpLib2ErrorWithResponse, e:
self._raise_service_error(resp.status, resp['content-type'], content)
except httplib2.HttpLib2Error, e:
raise MetawebError(u'HTTP error: %s' % (e,))
#tid = resp.get('x-metaweb-tid', None)
return (resp, content)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = simplejson.loads(json)
except ValueError, e:
self.log.error('error parsing json string %r' % json)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
def _check_mqlerror(self, r):
if r.code != '/api/status/ok':
for msg in r.messages:
self.log.error('mql error: %s %s %r' % (msg.code, msg.message, msg.get('query', None)))
raise MetawebError, 'query failed: %s %r' % (r.messages[0].code, r.messages[0].get('query', None))
def _mqlresult(self, r):
self._check_mqlerror(r)
# should check log level to avoid redundant simplejson.dumps
rstr = simplejson.dumps(r.result, indent=2)
if rstr[0] == '{':
rstr = rstr[1:-2]
self.log.info('result: %s', rstr)
return r.result
def login(self):
"""sign in to the service"""
assert self.username is not None
assert self.password is not None
self.log.debug('LOGIN USERNAME: %s', self.username)
try:
r = self._httpreq_json('/api/account/login', 'POST',
form=dict(username=self.username,
password=self.password))
except urllib2.HTTPError, e:
raise MetawebError("login error: %s", e)
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages))
self.log.debug('LOGIN RESP: %r', r)
self.log.debug('LOGIN COOKIES: %s', self.cookiejar)
def mqlreaditer(self, sq):
"""read a structure query"""
cursor = True
while 1:
subq = dict(query=[sq], cursor=cursor, escape=False)
qstr = simplejson.dumps(subq)
service = '/api/service/mqlread'
r = self._httpreq_json(service, form=dict(query=qstr))
for item in self._mqlresult(r):
yield item
if r['cursor']:
cursor = r['cursor']
self.log.info('CONTINUING with %s', cursor)
else:
return
def mqlread(self, sq):
"""read a structure query"""
subq = dict(query=sq, escape=False)
if isinstance(sq, list):
subq['cursor'] = True
service = '/api/service/mqlread'
# should check log level to avoid redundant simplejson.dumps
self.log.info('%s: %s',
service,
simplejson.dumps(sq, indent=2)[1:-2])
qstr = simplejson.dumps(subq)
r = self._httpreq_json(service, form=dict(query=qstr))
return self._mqlresult(r)
def trans(self, guid):
"""translate blob from guid """
url = '/api/trans/raw' + urlquote(guid)
self.log.info(url)
resp, body = self._httpreq(url)
self.log.info('%d bytes' % len(body))
return body
def mqlwrite(self, sq):
"""do a mql write"""
query = dict(query=sq, escape=False)
qstr = simplejson.dumps(query)
self.log.debug('MQLWRITE: %s', qstr)
service = '/api/service/mqlwrite'
# should check log level to avoid redundant simplejson.dumps
self.log.info('%s: %s',
service,
simplejson.dumps(sq, indent=2)[1:-2])
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLWRITE RESP: %r', r)
return self._mqlresult(r)
def mqlflush(self):
"""ask the service not to hand us old data"""
self.log.debug('MQLFLUSH')
service = '/api/service/mqlwrite'
r = self._httpreq_json(service, 'POST', form={})
self._check_mqlerror(r)
return r
def upload(self, body, content_type, document_id=False):
"""upload to the metaweb"""
service = '/api/service/upload'
self.log.info('POST %s: %s (%d bytes)',
service, content_type, len(body))
headers = {}
if content_type is not None:
headers['content-type'] = content_type
form = None
if document_id is not False:
if document_id is None:
form = { 'document': '' }
else:
form = { 'document': document_id }
# note the use of both body and form.
# form parameters get encoded into the URL in this case
r = self._httpreq_json(service, 'POST',
headers=headers, body=body, form=form)
return self._mqlresult(r)
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
mss = HTTPMetawebSession('sandbox.freebase.com')
self.mss.log.setLevel(logging.DEBUG)
self.mss.log.addHandler(console)
print mss.mqlread([dict(name=None, type='/type/type')])
| artzub/code_swarm-gource-my-conf | tools/codeswarm/lib/freebase/api/session.py | Python | gpl-3.0 | 17,231 |
#!/usr/bin/env python
import argparse
import os
import sys
import math
import pyRootPwa
import pyRootPwa.core
def writeParticleToFile (outFile, particleName, particleMomentum):
if pyRootPwa.core.particleDataTable.isInTable(particleName):
partProperties = pyRootPwa.core.particleDataTable.entry(particleName)
charge = partProperties.charge
energy = math.sqrt(particleMomentum.Px()**2 + particleMomentum.Py()**2 + particleMomentum.Pz()**2 + partProperties.mass2)
outFile.write(
str(pyRootPwa.core.particleDataTable.geantIdFromParticleName(particleName)) + " " +
str(charge) + " " +
'%.16e' % particleMomentum.Px() + " " +
'%.16e' % particleMomentum.Py() + " " +
'%.16e' % particleMomentum.Pz() + " " +
'%.16e' % energy + "\n"
)
return True
else:
pyRootPwa.utils.printErr("particle '" + particleName + "' not found in particleDataTable.")
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Converts ROOTPWA .root file to .evt file."
)
parser.add_argument("inputFileName", help="The path to the RootPwa input file")
parser.add_argument("outputFileName", help="The path to the ASCII evt output file")
parser.add_argument("-p", "--particleDataTable", help="The path of particleDataTable file (default: '$ROOTPWA/particleData/particleDataTable.txt')",
default='$ROOTPWA/particleData/particleDataTable.txt')
args = parser.parse_args()
printWarn = pyRootPwa.utils.printWarn
printErr = pyRootPwa.utils.printErr
printSucc = pyRootPwa.utils.printSucc
ROOT = pyRootPwa.ROOT
pdtPath = os.path.expandvars(args.particleDataTable)
if not pyRootPwa.core.particleDataTable.instance.readFile(pdtPath):
printErr("error loading particleDataTable from '" + pdtPath + "'. Aborting...")
sys.exit(1)
inputFile = ROOT.TFile(args.inputFileName, "READ")
if not inputFile:
printErr("error opening input file. Aborting...")
sys.exit(1)
metaData = pyRootPwa.core.eventMetadata.readEventFile(inputFile)
if metaData == 0:
printErr("error reading metaData. Input file is not a RootPWA root file.")
prodKinPartNames = metaData.productionKinematicsParticleNames()
decayKinPartNames = metaData.decayKinematicsParticleNames()
tree = metaData.eventTree()
with open(args.outputFileName, 'w') as outputEvtFile:
particleCount = len(prodKinPartNames) + len(decayKinPartNames)
for event in tree:
prodKinMomenta = event.__getattr__(metaData.productionKinematicsMomentaBranchName)
decayKinMomenta = event.__getattr__(metaData.decayKinematicsMomentaBranchName)
if particleCount != (prodKinMomenta.GetEntries() + decayKinMomenta.GetEntries()):
printErr("particle count in metaData does not match particle count in event data.")
sys.exit(1)
outputEvtFile.write(str(particleCount) + '\n')
for particle in range(prodKinMomenta.GetEntries()):
if not writeParticleToFile(outputEvtFile, prodKinPartNames[particle], prodKinMomenta[particle]):
printErr("failed writing particle '" + particle + "' to output file.")
sys.exit(1)
for particle in range(decayKinMomenta.GetEntries()):
if not writeParticleToFile(outputEvtFile, decayKinPartNames[particle], decayKinMomenta[particle]):
printErr("failed writing particle '" + particle + "' to output file.")
sys.exit(1)
inputFile.Close()
printSucc("successfully converted '" + args.inputFileName + "' to '" + args.outputFileName + "'.")
| legordian/ROOTPWA | pyInterface/scripts/convertTreeToEvt.py | Python | gpl-3.0 | 3,620 |
#-*- coding: utf-8 -*-
'''
Created on 24 дек. 20%0
@author: ivan
'''
import random
all_agents = """
Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3
Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)
Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1
Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)
Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)
Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)
"""
def get_ranmom_agent():
agents = None
for i in xrange(10):
agents = all_agents.replace(str(i), str(random.randint(0, 10)))
return agents.splitlines()[random.randint(1, 10)]
| sitexa/foobnix | foobnix/util/agent.py | Python | gpl-3.0 | 1,349 |
import os
import numpy as np
from vrml.vrml97 import basenodes, nodetypes, parser, parseprocessor
class VRML_Loader(object):
"""
Parser for VRML files. The VRML language is described in its specification
at http://www.web3d.org/documents/specifications/14772/V2.0/index.html
"""
def __init__(self, environment, filename, translation=None, transform=None):
self.environment = environment
self.filename = filename
if translation is None:
translation = (0.0, 0.0, 0.0)
elif len(translation) != 3:
raise ValueError("Translation must be a 3-component offset")
self.translation = tuple(translation)
self._transform = transform
vrml_parser = parser.Parser(parser.grammar, "vrmlFile")
processor = parseprocessor.ParseProcessor(baseURI=self.filename)
with open(self.filename, 'r') as f:
data = f.read()
self._scene = vrml_parser.parse(data, processor=processor)[1][1]
self._objects = None
def get_objects(self):
"""
Retrieve the objects from the VRML scene file.
The objects are provided as a list of lists of lists, where the deepest
nested lists are faces describing a polygon using point locations. Each
element of the list can therefore have multiple faces.
"""
if self._objects is None:
self._objects = []
self._parse_children(self._scene, self._transform)
return self._objects
def _parse_children(self, group, transform=None):
for child in group.children:
if isinstance(child, basenodes.Inline):
# Include the objects from the referenced file into the scene.
path = os.path.join(os.path.dirname(self.filename),
child.url[0])
loader = VRML_Loader(self.environment, path,
translation=self.translation,
transform=transform)
self._objects.extend(loader.get_objects())
elif isinstance(child, basenodes.Transform):
# Jumble up transformation matrices, in case they are nested.
forward = child.localMatrices().data[0]
if forward is not None:
if transform is not None:
new_transform = np.dot(transform, forward)
else:
new_transform = forward
else:
new_transform = transform
self._parse_children(child, new_transform)
elif isinstance(child, nodetypes.Grouping):
# Retrieve children from grouped nodes.
self._parse_children(child, transform)
elif isinstance(child, basenodes.Shape):
# Parse the coordinates from a shape's geometry.
self._parse_geometry(child.geometry, transform)
def _parse_geometry(self, geometry, transform=None):
faces = []
face = []
for i in geometry.coordIndex:
if i == -1:
faces.append(face)
face = []
else:
point = geometry.coord.point[i]
if transform is not None:
# The translation matrices from the VRML library are for
# affine translations, but they are transposed for some
# reason. See vrml.vrml97.transformmatrix, e.g. line 319.
point = np.dot(transform.T, np.append(point, 1).T)
# Convert to Location
# VRML geometry notation is in (x,z,y) where y is the vertical
# axis (using GL notation here). We have to convert it to
# (z,x,y) since the z/x are related to distances on the ground
# in north and east directions, respectively, and y is still
# the altitude.
north = point[1] + self.translation[0]
east = point[0] - self.translation[1]
alt = point[2] + self.translation[2]
loc = self.environment.get_location(north, east, alt)
face.append(loc)
if len(face) > 0:
faces.append(face)
self._objects.append(faces)
| timvandermeij/mobile-radio-tomography | environment/VRML_Loader.py | Python | gpl-3.0 | 4,378 |
import sys
import numpy as np
from scipy import stats
import subprocess as sp
import datetime
import socket
import os
exec_name = sys.argv[1]
max_t = int(sys.argv[2])
ntries = 5
tot_timings = []
for t_idx in range(1,max_t + 1):
cur_timings = []
for _ in range(ntries):
# Run the process.
p = sp.Popen([exec_name,str(t_idx)],stdout=sp.PIPE,stderr=sp.STDOUT)
# Wait for it to finish and get stdout.
out = p.communicate()[0]
# Parse the stderr in order to find the time.
out = out.split(bytes('\n','ascii'))[1].split()[0][0:-1]
cur_timings.append(float(out))
tot_timings.append(cur_timings)
tot_timings = np.array(tot_timings)
retval = np.array([np.mean(tot_timings,axis=1),stats.sem(tot_timings,axis=1)])
fmt='{fname}_%Y%m%d%H%M%S'
filename = datetime.datetime.now().strftime(fmt).format(fname=socket.gethostname() + '_' + os.path.basename(exec_name)) + '.txt'
np.savetxt(filename,retval)
| darioizzo/piranha | tools/benchmark.py | Python | gpl-3.0 | 911 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Per-course integration tests for Course Builder."""
__author__ = [
'Todd Larsen ([email protected])'
]
from modules.courses import courses_pageobjects
from tests.integration import integration
class AvailabilityTests(integration.TestBase):
def setUp(self):
super(AvailabilityTests, self).setUp()
self.login(self.LOGIN, admin=True)
def test_availability_page_js(self):
"""Checks the parts of the Publish > Availability page contents that
are dynamically altered by availability.js.
"""
sample_course_name = '' # Power Searching course w/ blank namespace.
sample_availablity_page = self.load_dashboard(
sample_course_name
).click_availability(
cls=courses_pageobjects.CourseAvailabilityPage
).verify_content_present_no_msgs(
has_triggers=True
).verify_add_trigger_button(
)
empty_course_name = self.create_new_course(login=False)[0]
self.load_dashboard(
empty_course_name
).click_availability(
cls=courses_pageobjects.CourseAvailabilityPage
).verify_empty_content_msgs(
).verify_no_trigger_button(
)
| GirlsCodePy/girlscode-coursebuilder | modules/courses/courses_integration_tests.py | Python | gpl-3.0 | 1,816 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libreosteoweb', '0018_auto_20150420_1232'),
]
operations = [
migrations.AlterField(
model_name='regulardoctor',
name='phone',
field=models.CharField(max_length=100, null=True, verbose_name='Phone', blank=True),
),
]
| littlejo/Libreosteo | libreosteoweb/migrations/0019_auto_20150420_1821.py | Python | gpl-3.0 | 460 |
import boto,sys,euca_admin
from boto.exception import EC2ResponseError
from euca_admin.generic import BooleanResponse
from euca_admin.generic import StringList
from boto.resultset import ResultSet
from euca_admin import EucaAdmin
from optparse import OptionParser
SERVICE_PATH = '/services/Accounts'
class Group():
def __init__(self, groupName=None):
self.group_groupName = groupName
self.group_users = StringList()
self.group_auths = StringList()
self.euca = EucaAdmin(path=SERVICE_PATH)
def __repr__(self):
r = 'GROUP \t%s\t' % (self.group_groupName)
r = '%s\nUSERS\t%s\t%s' % (r,self.group_groupName,self.group_users)
r = '%s\nAUTH\t%s\t%s' % (r,self.group_groupName,self.group_auths)
return r
def startElement(self, name, attrs, connection):
if name == 'euca:users':
return self.group_users
if name == 'euca:authorizations':
return self.group_auths
else:
return None
def endElement(self, name, value, connection):
if name == 'euca:groupName':
self.group_groupName = value
else:
setattr(self, name, value)
def get_describe_parser(self):
parser = OptionParser("usage: %prog [GROUPS...]",version="Eucalyptus %prog VERSION")
return parser.parse_args()
def cli_describe(self):
(options, args) = self.get_describe_parser()
self.group_describe(args)
def group_describe(self,groups=None):
params = {}
if groups:
self.euca.connection.build_list_params(params,groups,'GroupNames')
try:
list = self.euca.connection.get_list('DescribeGroups', params, [('euca:item', Group)])
for i in list:
print i
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def get_single_parser(self):
parser = OptionParser("usage: %prog GROUPNAME",version="Eucalyptus %prog VERSION")
(options,args) = parser.parse_args()
if len(args) != 1:
print "ERROR Required argument GROUPNAME is missing or malformed."
parser.print_help()
sys.exit(1)
else:
return (options,args)
def cli_add(self):
(options, args) = self.get_single_parser()
self.group_add(args[0])
def group_add(self, groupName):
try:
reply = self.euca.connection.get_object('AddGroup', {'GroupName':groupName}, BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
def cli_delete(self):
(options, args) = self.get_single_parser()
self.group_delete(args[0])
def group_delete(self, groupName):
try:
reply = self.euca.connection.get_object('DeleteGroup', {'GroupName':groupName},BooleanResponse)
print reply
except EC2ResponseError, ex:
self.euca.handle_error(ex)
| Shebella/HIPPO | clc/tools/src/euca_admin/groups.py | Python | gpl-3.0 | 2,776 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser
from tinycss.parsing import remove_whitespace, split_on_comma, ParseError
class MediaQuery(object):
__slots__ = 'media_type', 'expressions', 'negated'
def __init__(self, media_type='all', expressions=(), negated=False):
self.media_type = media_type
self.expressions = expressions
self.negated = negated
def __repr__(self):
return '<MediaQuery type=%s negated=%s expressions=%s>' % (
self.media_type, self.negated, self.expressions)
def __eq__(self, other):
return self.media_type == getattr(other, 'media_type', None) and \
self.negated == getattr(other, 'negated', None) and \
self.expressions == getattr(other, 'expressions', None)
class MalformedExpression(Exception):
def __init__(self, tok, msg):
Exception.__init__(self, msg)
self.tok = tok
class CSSMedia3Parser(CSS21Parser):
''' Parse media queries as defined by the CSS 3 media module '''
def parse_media(self, tokens, errors):
if not tokens:
return [MediaQuery('all')]
queries = []
for part in split_on_comma(remove_whitespace(tokens)):
negated = False
media_type = None
expressions = []
try:
for i, tok in enumerate(part):
if i == 0 and tok.type == 'IDENT':
val = tok.value.lower()
if val == 'only':
continue # ignore leading ONLY
if val == 'not':
negated = True
continue
if media_type is None and tok.type == 'IDENT':
media_type = tok.value
continue
elif media_type is None:
media_type = 'all'
if tok.type == 'IDENT' and tok.value.lower() == 'and':
continue
if not tok.is_container:
raise MalformedExpression(tok, 'expected a media expression not a %s' % tok.type)
if tok.type != '(':
raise MalformedExpression(tok, 'media expressions must be in parentheses not %s' % tok.type)
content = remove_whitespace(tok.content)
if len(content) == 0:
raise MalformedExpression(tok, 'media expressions cannot be empty')
if content[0].type != 'IDENT':
raise MalformedExpression(content[0], 'expected a media feature not a %s' % tok.type)
media_feature, expr = content[0].value, None
if len(content) > 1:
if len(content) < 3:
raise MalformedExpression(content[1], 'malformed media feature definition')
if content[1].type != ':':
raise MalformedExpression(content[1], 'expected a :')
expr = content[2:]
if len(expr) == 1:
expr = expr[0]
elif len(expr) == 3 and (expr[0].type, expr[1].type, expr[1].value, expr[2].type) == (
'INTEGER', 'DELIM', '/', 'INTEGER'):
# This should really be moved into token_data, but
# since RATIO is not part of CSS 2.1 and does not
# occur anywhere else, we special case it here.
r = expr[0]
r.value = (expr[0].value, expr[2].value)
r.type = 'RATIO'
r._as_css = expr[0]._as_css + expr[1]._as_css + expr[2]._as_css
expr = r
else:
raise MalformedExpression(expr[0], 'malformed media feature definition')
expressions.append((media_feature, expr))
except MalformedExpression as err:
errors.extend(ParseError(err.tok, err.message))
media_type, negated, expressions = 'all', True, ()
queries.append(MediaQuery(media_type or 'all', expressions=tuple(expressions), negated=negated))
return queries
| nozuono/calibre-webserver | src/tinycss/media3.py | Python | gpl-3.0 | 4,645 |
from south.db import db
from django.db import models
from transifex.releases.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
}
complete_apps = ['releases']
| tymofij/adofex | transifex/releases/migrations/0001_initial.py | Python | gpl-3.0 | 323 |
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class QualityReviewObjective(Document):
pass
| frappe/erpnext | erpnext/quality_management/doctype/quality_review_objective/quality_review_objective.py | Python | gpl-3.0 | 228 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
# Deleting field id
db.delete_column('layers_layer', 'id')
# set new primary key for layers_layer
db.create_primary_key('layers_layer', ['resourcebase_ptr_id'])
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 10, 5, 46, 57, 679891)'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 10, 5, 46, 57, 688538)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 1, 10, 5, 46, 57, 688151)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'base.contactrole': {
'Meta': {'unique_together': "(('contact', 'resource', 'role'),)", 'object_name': 'ContactRole'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Profile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.ResourceBase']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['people.Role']"})
},
u'base.region': {
'Meta': {'ordering': "('name',)", 'object_name': 'Region'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'base.resourcebase': {
'Meta': {'object_name': 'ResourceBase'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bbox_x0': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_x1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_y0': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'bbox_y1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '10', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.TopicCategory']", 'null': 'True', 'blank': 'True'}),
'constraints_other': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contacts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['people.Profile']", 'through': u"orm['base.ContactRole']", 'symmetrical': 'False'}),
'csw_anytext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'csw_insert_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'csw_mdsource': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '256'}),
'csw_schema': ('django.db.models.fields.CharField', [], {'default': "'http://www.isotc211.org/2005/gmd'", 'max_length': '64'}),
'csw_type': ('django.db.models.fields.CharField', [], {'default': "'dataset'", 'max_length': '32'}),
'csw_typename': ('django.db.models.fields.CharField', [], {'default': "'gmd:MD_Metadata'", 'max_length': '32'}),
'csw_wkt_geometry': ('django.db.models.fields.TextField', [], {'default': "'POLYGON((-180 -90,-180 90,180 90,180 -90,-180 -90))'"}),
'data_quality_statement': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_type': ('django.db.models.fields.CharField', [], {'default': "'publication'", 'max_length': '255'}),
'distribution_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'distribution_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'eng'", 'max_length': '3'}),
'maintenance_frequency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'metadata_uploaded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'metadata_xml': ('django.db.models.fields.TextField', [], {'default': '\'<gmd:MD_Metadata xmlns:gmd="http://www.isotc211.org/2005/gmd"/>\'', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['base.Region']", 'symmetrical': 'False', 'blank': 'True'}),
'restriction_code_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.RestrictionCodeType']", 'null': 'True', 'blank': 'True'}),
'spatial_representation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.SpatialRepresentationType']", 'null': 'True', 'blank': 'True'}),
'srid': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '255'}),
'supplemental_information': ('django.db.models.fields.TextField', [], {'default': "u'No information provided'"}),
'temporal_extent_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'temporal_extent_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.Thumbnail']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
u'base.restrictioncodetype': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'RestrictionCodeType'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'gn_description': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'base.spatialrepresentationtype': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'SpatialRepresentationType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gn_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'base.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thumb_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'thumb_spec': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True'})
},
u'base.topiccategory': {
'Meta': {'ordering': "('identifier',)", 'object_name': 'TopicCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'gn_description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "'location'", 'max_length': '255'}),
'is_choice': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'layers.attribute': {
'Meta': {'object_name': 'Attribute'},
'attribute': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'attribute_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'attribute_type': ('django.db.models.fields.CharField', [], {'default': "'xsd:string'", 'max_length': '50'}),
'average': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_stats_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'layer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_set'", 'to': u"orm['layers.Layer']"}),
'max': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'median': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'min': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'stddev': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'sum': ('django.db.models.fields.CharField', [], {'default': "'NA'", 'max_length': '255', 'null': 'True'}),
'unique_values': ('django.db.models.fields.TextField', [], {'default': "'NA'", 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'layers.layer': {
'Meta': {'object_name': 'Layer', '_ormbases': [u'base.ResourceBase']},
'default_style': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'layer_default_style'", 'null': 'True', 'to': u"orm['layers.Style']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'popular_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'resourcebase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.ResourceBase']", 'unique': 'True', 'primary_key': 'True'}),
'share_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'store': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'storeType': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'styles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'layer_styles'", 'symmetrical': 'False', 'to': u"orm['layers.Style']"}),
'typename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'workspace': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'layers.style': {
'Meta': {'object_name': 'Style'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sld_body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sld_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sld_url': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'sld_version': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'workspace': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'people.profile': {
'Meta': {'object_name': 'Profile'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'delivery': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'profile': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'voice': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'people.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['layers']
| GISPPU/GrenadaLandInformation | geonode/layers/migrations/0010_remove_unneeded_objects.py | Python | gpl-3.0 | 20,864 |
from nose.tools import eq_, with_setup
from threading import Thread
from Queue import Queue
from time import sleep
def setup():
global Person, neo4django, gdb, neo4jrestclient, neo_constants, settings, models
from neo4django.tests import Person, neo4django, gdb, neo4jrestclient, \
neo_constants, settings
from neo4django.db import models
def teardown():
gdb.cleandb()
@with_setup(None, teardown)
def test_typenode_transactionality():
class RaceModel(models.NodeModel):
pass
exc_queue = Queue()
def race():
r = RaceModel()
try:
r.save()
except Exception, e:
exc_queue.put(str(e))
else:
exc_queue.put(True)
num_threads = 5
for i in xrange(num_threads):
thread = Thread(target=race)
thread.start()
for i in xrange(num_threads):
val = exc_queue.get()
if val is not True:
raise AssertionError('There was an error saving one of the '
'RaceModels (#%d) - "%s"' % (i, val))
#check the number of typenodes
typenode_script = "g.v(0).outE('<<TYPE>>').inV.filter{it.model_name=='%s'}"
typenode_script %= RaceModel.__name__
typenodes = gdb.extensions.GremlinPlugin.execute_script(typenode_script)
eq_(len(typenodes), 1)
def race(func, num_threads):
"""
Run a multi-threaded race on func. Func should accept a single argument-
a Queue. If func succeeds, it should `q.put(True)`- if it fails, it should
`q.put('error message')`.
"""
exc_queue = Queue()
for i in xrange(num_threads):
thread = Thread(target=func, args=(exc_queue,))
thread.start()
for i in xrange(num_threads):
val = exc_queue.get()
if val is not True:
raise AssertionError('There was an error running race (#%d) - "%s"'
% (i, val))
@with_setup(None, teardown)
def test_autoproperty_transactionality():
class AutoRaceModel(models.NodeModel):
some_id = models.AutoProperty()
def autorace(queue):
r = AutoRaceModel()
try:
r.save()
except Exception, e:
queue.put(str(e))
else:
queue.put(True)
race(autorace, 3)
eq_(len(set(m.some_id for m in AutoRaceModel.objects.all())), 3)
| coffenbacher/neo4django | neo4django/tests/synchronicity_tests.py | Python | gpl-3.0 | 2,374 |
# coding=utf-8
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
from flask import request, jsonify, make_response
import re
from octoprint.settings import settings, valid_boolean_trues
from octoprint.server import printer, restricted_access, NO_CONTENT
from octoprint.server.api import api
import octoprint.util as util
#~~ Printer
@api.route("/printer", methods=["GET"])
def printerState():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
# process excludes
excludes = []
if "exclude" in request.values:
excludeStr = request.values["exclude"]
if len(excludeStr.strip()) > 0:
excludes = filter(lambda x: x in ["temperature", "sd", "state"], map(lambda x: x.strip(), excludeStr.split(",")))
result = {}
# add temperature information
if not "temperature" in excludes:
result.update({"temperature": _getTemperatureData(lambda x: x)})
# add sd information
if not "sd" in excludes and settings().getBoolean(["feature", "sdSupport"]):
result.update({"sd": {"ready": printer.isSdReady()}})
# add state information
if not "state" in excludes:
state = printer.getCurrentData()["state"]
result.update({"state": state})
return jsonify(result)
#~~ Tool
@api.route("/printer/tool", methods=["POST"])
@restricted_access
def printerToolCommand():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
valid_commands = {
"select": ["tool"],
"target": ["targets"],
"offset": ["offsets"],
"extrude": ["amount"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
validation_regex = re.compile("tool\d+")
##~~ tool selection
if command == "select":
tool = data["tool"]
if re.match(validation_regex, tool) is None:
return make_response("Invalid tool: %s" % tool, 400)
if not tool.startswith("tool"):
return make_response("Invalid tool for selection: %s" % tool, 400)
printer.changeTool(tool)
##~~ temperature
elif command == "target":
targets = data["targets"]
# make sure the targets are valid and the values are numbers
validated_values = {}
for tool, value in targets.iteritems():
if re.match(validation_regex, tool) is None:
return make_response("Invalid target for setting temperature: %s" % tool, 400)
if not isinstance(value, (int, long, float)):
return make_response("Not a number for %s: %r" % (tool, value), 400)
validated_values[tool] = value
# perform the actual temperature commands
for tool in validated_values.keys():
printer.setTemperature(tool, validated_values[tool])
##~~ temperature offset
elif command == "offset":
offsets = data["offsets"]
# make sure the targets are valid, the values are numbers and in the range [-50, 50]
validated_values = {}
for tool, value in offsets.iteritems():
if re.match(validation_regex, tool) is None:
return make_response("Invalid target for setting temperature: %s" % tool, 400)
if not isinstance(value, (int, long, float)):
return make_response("Not a number for %s: %r" % (tool, value), 400)
if not -50 <= value <= 50:
return make_response("Offset %s not in range [-50, 50]: %f" % (tool, value), 400)
validated_values[tool] = value
# set the offsets
printer.setTemperatureOffset(validated_values)
##~~ extrusion
elif command == "extrude":
if printer.isPrinting():
# do not extrude when a print job is running
return make_response("Printer is currently printing", 409)
amount = data["amount"]
if not isinstance(amount, (int, long, float)):
return make_response("Not a number for extrusion amount: %r" % amount, 400)
printer.extrude(amount)
return NO_CONTENT
@api.route("/printer/tool", methods=["GET"])
def printerToolState():
def deleteBed(x):
data = dict(x)
if "bed" in data.keys():
del data["bed"]
return data
return jsonify(_getTemperatureData(deleteBed))
##~~ Heated bed
@api.route("/printer/bed", methods=["POST"])
@restricted_access
def printerBedCommand():
if not printer.isOperational():
return make_response("Printer is not operational", 409)
valid_commands = {
"target": ["target"],
"offset": ["offset"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
##~~ temperature
if command == "target":
target = data["target"]
# make sure the target is a number
if not isinstance(target, (int, long, float)):
return make_response("Not a number: %r" % target, 400)
# perform the actual temperature command
printer.setTemperature("bed", target)
##~~ temperature offset
elif command == "offset":
offset = data["offset"]
# make sure the offset is valid
if not isinstance(offset, (int, long, float)):
return make_response("Not a number: %r" % offset, 400)
if not -50 <= offset <= 50:
return make_response("Offset not in range [-50, 50]: %f" % offset, 400)
# set the offsets
printer.setTemperatureOffset({"bed": offset})
return NO_CONTENT
@api.route("/printer/bed", methods=["GET"])
def printerBedState():
def deleteTools(x):
data = dict(x)
for k in data.keys():
if k.startswith("tool"):
del data[k]
return data
return jsonify(_getTemperatureData(deleteTools))
##~~ Print head
@api.route("/printer/printhead", methods=["POST"])
@restricted_access
def printerPrintheadCommand():
if not printer.isOperational() or printer.isPrinting():
# do not jog when a print job is running or we don't have a connection
return make_response("Printer is not operational or currently printing", 409)
valid_commands = {
"jog": [],
"home": ["axes"]
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
valid_axes = ["x", "y", "z"]
##~~ jog command
if command == "jog":
# validate all jog instructions, make sure that the values are numbers
validated_values = {}
for axis in valid_axes:
if axis in data:
value = data[axis]
if not isinstance(value, (int, long, float)):
return make_response("Not a number for axis %s: %r" % (axis, value), 400)
validated_values[axis] = value
# execute the jog commands
for axis, value in validated_values.iteritems():
printer.jog(axis, value)
##~~ home command
elif command == "home":
validated_values = []
axes = data["axes"]
for axis in axes:
if not axis in valid_axes:
return make_response("Invalid axis: %s" % axis, 400)
validated_values.append(axis)
# execute the home command
printer.home(validated_values)
return NO_CONTENT
##~~ SD Card
@api.route("/printer/sd", methods=["POST"])
@restricted_access
def printerSdCommand():
if not settings().getBoolean(["feature", "sdSupport"]):
return make_response("SD support is disabled", 404)
if not printer.isOperational() or printer.isPrinting() or printer.isPaused():
return make_response("Printer is not operational or currently busy", 409)
valid_commands = {
"init": [],
"refresh": [],
"release": []
}
command, data, response = util.getJsonCommandFromRequest(request, valid_commands)
if response is not None:
return response
if command == "init":
printer.initSdCard()
elif command == "refresh":
printer.refreshSdFiles()
elif command == "release":
printer.releaseSdCard()
return NO_CONTENT
@api.route("/printer/sd", methods=["GET"])
def printerSdState():
if not settings().getBoolean(["feature", "sdSupport"]):
return make_response("SD support is disabled", 404)
return jsonify(ready=printer.isSdReady())
##~~ Commands
@api.route("/printer/command", methods=["POST"])
@restricted_access
def printerCommand():
# TODO: document me
if not printer.isOperational():
return make_response("Printer is not operational", 409)
if not "application/json" in request.headers["Content-Type"]:
return make_response("Expected content type JSON", 400)
data = request.json
parameters = {}
if "parameters" in data.keys(): parameters = data["parameters"]
commands = []
if "command" in data.keys(): commands = [data["command"]]
elif "commands" in data.keys(): commands = data["commands"]
commandsToSend = []
for command in commands:
commandToSend = command
if len(parameters) > 0:
commandToSend = command % parameters
commandsToSend.append(commandToSend)
printer.commands(commandsToSend)
return NO_CONTENT
@api.route("/printer/command/custom", methods=["GET"])
def getCustomControls():
# TODO: document me
customControls = settings().get(["controls"])
return jsonify(controls=customControls)
def _getTemperatureData(filter):
if not printer.isOperational():
return make_response("Printer is not operational", 409)
tempData = printer.getCurrentTemperatures()
result = {
"temps": filter(tempData)
}
if "history" in request.values.keys() and request.values["history"] in valid_boolean_trues:
tempHistory = printer.getTemperatureHistory()
limit = 300
if "limit" in request.values.keys() and unicode(request.values["limit"]).isnumeric():
limit = int(request.values["limit"])
history = list(tempHistory)
limit = min(limit, len(history))
result.update({
"history": map(lambda x: filter(x), history[-limit:])
})
return result
| C-o-r-E/OctoPrint | src/octoprint/server/api/printer.py | Python | agpl-3.0 | 9,354 |
#!/usr/bin/python
#
# \file 0_setup.py
# \brief Setup rbank
# \date 2009-03-10-22-43-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Setup rbank
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
log = open("log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Setup rbank")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Setup source directories
printLog(log, ">>> Setup source directories <<<")
for dir in RBankCmbSourceDirectories:
mkPath(log, DatabaseDirectory + "/" + dir)
mkPath(log, LeveldesignWorldDirectory)
# Setup export directories
printLog(log, ">>> Setup export directories <<<")
mkPath(log, ExportBuildDirectory + "/" + RBankCmbExportDirectory)
mkPath(log, ExportBuildDirectory + "/" + RBankCmbTagExportDirectory)
mkPath(log, ExportBuildDirectory + "/" + SmallbankExportDirectory)
# Setup build directories
printLog(log, ">>> Setup build directories <<<")
mkPath(log, ExportBuildDirectory + "/" + ZoneWeldBuildDirectory)
for dir in IgLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
for dir in ShapeLookupDirectories:
mkPath(log, ExportBuildDirectory + "/" + dir)
mkPath(log, ExportBuildDirectory + "/" + RbankBboxBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + IgLandBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + IgOtherBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankTessellationBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankSmoothBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankRawBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankPreprocBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankRetrieversBuildDirectory)
mkPath(log, ExportBuildDirectory + "/" + RbankOutputBuildDirectory)
# Setup client directories
printLog(log, ">>> Setup client directories <<<")
mkPath(log, InstallDirectory + "/" + PacsInstallDirectory)
log.close()
# end of file
| osgcc/ryzom | nel/tools/build_gamedata/processes/rbank/0_setup.py | Python | agpl-3.0 | 2,998 |
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr.interfaces.v1.serializer import serialize_with
def get_serializer(serpy):
return serialize_with(serpy)
def get_obj_serializer(obj):
return get_serializer(serpy=obj.output_type_serializer)
| xlqian/navitia | source/jormungandr/jormungandr/interfaces/v1/decorators.py | Python | agpl-3.0 | 1,461 |
#!/usr/bin/env python
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
call_command(
'dumpdata',
"waffle.flag",
indent=4,
use_natural_foreign_keys=True,
use_natural_primary_keys=True,
output='base/fixtures/waffle_flags.json'
)
| uclouvain/OSIS-Louvain | base/management/commands/dump_waffle_flags.py | Python | agpl-3.0 | 436 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# ipy.py: Interaction with IPython and Jupyter.
##
# © 2016 Chris Ferrie ([email protected]) and
# Christopher E. Granade ([email protected])
#
# This file is a part of the Qinfer project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## FEATURES ###################################################################
from __future__ import absolute_import
from __future__ import division
## EXPORTS ###################################################################
__all__ = ['IPythonProgressBar']
## IMPORTS ####################################################################
try:
from IPython.display import display
import ipywidgets as ipw
except:
display = None
ipw = None
## CLASSES ###################################################################
class IPythonProgressBar(object):
"""
Represents a progress bar as an IPython widget. If the widget
is closed by the user, or by calling ``finalize()``, any further
operations will be ignored.
.. note::
This progress bar is compatible with QuTiP progress bar
classes.
"""
def __init__(self):
if ipw is None:
raise ImportError("IPython support requires the ipywidgets package.")
self.widget = ipw.FloatProgress(
value=0.0, min=0.0, max=100.0, step=0.5,
description=""
)
@property
def description(self):
"""
Text description for the progress bar widget,
or ``None`` if the widget has been closed.
:type: `str`
"""
try:
return self.widget.description
except:
return None
@description.setter
def description(self, value):
try:
self.widget.description = value
except:
pass
def start(self, max):
"""
Displays the progress bar for a given maximum value.
:param float max: Maximum value of the progress bar.
"""
try:
self.widget.max = max
display(self.widget)
except:
pass
def update(self, n):
"""
Updates the progress bar to display a new value.
"""
try:
self.widget.value = n
except:
pass
def finished(self):
"""
Destroys the progress bar.
"""
try:
self.widget.close()
except:
pass
| MichalKononenko/python-qinfer | src/qinfer/ipy.py | Python | agpl-3.0 | 3,141 |
# -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import tools
from openerp.osv import fields, orm
class hr_language(orm.Model):
_name = 'hr.language'
_columns = {
'name': fields.selection(tools.scan_languages(), 'Language', required=True),
'description': fields.char('Description', size=64, required=True, translate=True),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'read': fields.boolean('Read'),
'write': fields.boolean('Write'),
'speak': fields.boolean('Speak'),
}
_defaults = {
'read': True,
'write': True,
'speak': True,
}
class hr_employee(orm.Model):
_inherit = 'hr.employee'
_columns = {
'language_ids': fields.one2many('hr.language', 'employee_id', 'Languages'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| bwrsandman/openerp-hr | hr_language/hr_language.py | Python | agpl-3.0 | 1,839 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# distributions.py: module for probability distributions.
##
# © 2017, Chris Ferrie ([email protected]) and
# Christopher Granade ([email protected]).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
## IMPORTS ###################################################################
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future.utils import with_metaclass
import numpy as np
import scipy.stats as st
import scipy.linalg as la
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
from scipy.spatial import ConvexHull, Delaunay
from functools import partial
import abc
from qinfer import utils as u
from qinfer.metrics import rescaled_distance_mtx
from qinfer.clustering import particle_clusters
from qinfer._exceptions import ApproximationWarning
import warnings
## EXPORTS ###################################################################
__all__ = [
'Distribution',
'SingleSampleMixin',
'MixtureDistribution',
'ParticleDistribution',
'ProductDistribution',
'UniformDistribution',
'DiscreteUniformDistribution',
'MVUniformDistribution',
'ConstantDistribution',
'NormalDistribution',
'MultivariateNormalDistribution',
'SlantedNormalDistribution',
'LogNormalDistribution',
'BetaDistribution',
'DirichletDistribution',
'BetaBinomialDistribution',
'GammaDistribution',
'GinibreUniform',
'HaarUniform',
'HilbertSchmidtUniform',
'PostselectedDistribution',
'ConstrainedSumDistribution',
'InterpolatedUnivariateDistribution'
]
## FUNCTIONS #################################################################
def scipy_dist(name, *args, **kwargs):
"""
Wraps calling a scipy.stats distribution to allow for pickling.
See https://github.com/scipy/scipy/issues/3125.
"""
return getattr(st, name)(*args, **kwargs)
## ABSTRACT CLASSES AND MIXINS ###############################################
class Distribution(with_metaclass(abc.ABCMeta, object)):
"""
Abstract base class for probability distributions on one or more random
variables.
"""
@abc.abstractproperty
def n_rvs(self):
"""
The number of random variables that this distribution is over.
:type: `int`
"""
pass
@abc.abstractmethod
def sample(self, n=1):
"""
Returns one or more samples from this probability distribution.
:param int n: Number of samples to return.
:rtype: numpy.ndarray
:return: An array containing samples from the
distribution of shape ``(n, d)``, where ``d`` is the number of
random variables.
"""
pass
class SingleSampleMixin(with_metaclass(abc.ABCMeta, object)):
"""
Mixin class that extends a class so as to generate multiple samples
correctly, given a method ``_sample`` that generates one sample at a time.
"""
@abc.abstractmethod
def _sample(self):
pass
def sample(self, n=1):
samples = np.zeros((n, self.n_rvs))
for idx in range(n):
samples[idx, :] = self._sample()
return samples
## CLASSES ###################################################################
class MixtureDistribution(Distribution):
r"""
Samples from a weighted list of distributions.
:param weights: Length ``n_dist`` list or ``np.ndarray``
of probabilites summing to 1.
:param dist: Either a length ``n_dist`` list of ``Distribution`` instances,
or a ``Distribution`` class, for example, ``NormalDistribution``.
It is assumed that a list of ``Distribution``s all
have the same ``n_rvs``.
:param dist_args: If ``dist`` is a class, an array
of shape ``(n_dist, n_rvs)`` where ``dist_args[k,:]`` defines
the arguments of the k'th distribution. Use ``None`` if the distribution
has no arguments.
:param dist_kw_args: If ``dist`` is a class, a dictionary
where each key's value is an array
of shape ``(n_dist, n_rvs)`` where ``dist_kw_args[key][k,:]`` defines
the keyword argument corresponding to ``key`` of the k'th distribution.
Use ``None`` if the distribution needs no keyword arguments.
:param bool shuffle: Whether or not to shuffle result after sampling. Not shuffling
will result in variates being in the same order as
the distributions. Default is ``True``.
"""
def __init__(self, weights, dist, dist_args=None, dist_kw_args=None, shuffle=True):
super(MixtureDistribution, self).__init__()
self._weights = weights
self._n_dist = len(weights)
self._shuffle = shuffle
try:
self._example_dist = dist[0]
self._is_dist_list = True
self._dist_list = dist
assert(self._n_dist == len(self._dist_list))
except:
self._is_dist_list = False
self._dist = dist
self._dist_args = dist_args
self._dist_kw_args = dist_kw_args
assert(self._n_dist == self._dist_args.shape[0])
self._example_dist = self._dist(
*self._dist_arg(0),
**self._dist_kw_arg(0)
)
def _dist_arg(self, k):
"""
Returns the arguments for the k'th distribution.
:param int k: Index of distribution in question.
:rtype: ``np.ndarary``
"""
if self._dist_args is not None:
return self._dist_args[k,:]
else:
return []
def _dist_kw_arg(self, k):
"""
Returns a dictionary of keyword arguments
for the k'th distribution.
:param int k: Index of the distribution in question.
:rtype: ``dict``
"""
if self._dist_kw_args is not None:
return {
key:self._dist_kw_args[key][k,:]
for key in self._dist_kw_args.keys()
}
else:
return {}
@property
def n_rvs(self):
return self._example_dist.n_rvs
@property
def n_dist(self):
"""
The number of distributions in the mixture distribution.
"""
return self._n_dist
def sample(self, n=1):
# how many samples to take from each dist
ns = np.random.multinomial(n, self._weights)
idxs = np.arange(self.n_dist)[ns > 0]
if self._is_dist_list:
# sample from each distribution
samples = np.concatenate([
self._dist_list[k].sample(n=ns[k])
for k in idxs
])
else:
# instantiate each distribution and then sample
samples = np.concatenate([
self._dist(
*self._dist_arg(k),
**self._dist_kw_arg(k)
).sample(n=ns[k])
for k in idxs
])
# in-place shuffling
if self._shuffle:
np.random.shuffle(samples)
return samples
class ParticleDistribution(Distribution):
r"""
A distribution consisting of a list of weighted vectors.
Note that either `n_mps` or both (`particle_locations`, `particle_weights`)
must be specified, or an error will be raised.
:param numpy.ndarray particle_weights: Length ``n_particles`` list
of particle weights.
:param particle_locations: Shape ``(n_particles, n_mps)`` array of
particle locations.
:param int n_mps: Dimension of parameter space. This parameter should
only be set when `particle_weights` and `particle_locations` are
not set (and vice versa).
"""
def __init__(self, n_mps=None, particle_locations=None, particle_weights=None):
super(ParticleDistribution, self).__init__()
if particle_locations is None or particle_weights is None:
# Initialize with single particle at origin.
self.particle_locations = np.zeros((1, n_mps))
self.particle_weights = np.ones((1,))
elif n_mps is None:
self.particle_locations = particle_locations
self.particle_weights = np.abs(particle_weights)
self.particle_weights = self.particle_weights / np.sum(self.particle_weights)
else:
raise ValueError('Either the dimension of parameter space, `n_mps`, or the particles, `particle_locations` and `particle_weights` must be specified.')
@property
def n_particles(self):
"""
Returns the number of particles in the distribution
:type: `int`
"""
return self.particle_locations.shape[0]
@property
def n_ess(self):
"""
Returns the effective sample size (ESS) of the current particle
distribution.
:type: `float`
:return: The effective sample size, given by :math:`1/\sum_i w_i^2`.
"""
return 1 / (np.sum(self.particle_weights**2))
## DISTRIBUTION CONTRACT ##
@property
def n_rvs(self):
"""
Returns the dimension of each particle.
:type: `int`
"""
return self.particle_locations.shape[1]
def sample(self, n=1):
"""
Returns random samples from the current particle distribution according
to particle weights.
:param int n: The number of samples to draw.
:return: The sampled model parameter vectors.
:rtype: `~numpy.ndarray` of shape ``(n, updater.n_rvs)``.
"""
cumsum_weights = np.cumsum(self.particle_weights)
return self.particle_locations[np.minimum(cumsum_weights.searchsorted(
np.random.random((n,)),
side='right'
), len(cumsum_weights) - 1)]
## MOMENT FUNCTIONS ##
@staticmethod
def particle_mean(weights, locations):
r"""
Returns the arithmetic mean of the `locations` weighted by `weights`
:param numpy.ndarray weights: Weights of each particle in array of
shape ``(n_particles,)``.
:param numpy.ndarray locations: Locations of each particle in array
of shape ``(n_particles, n_modelparams)``
:rtype: :class:`numpy.ndarray`, shape ``(n_modelparams,)``.
:returns: An array containing the mean
"""
return np.dot(weights, locations)
@classmethod
def particle_covariance_mtx(cls, weights, locations):
"""
Returns an estimate of the covariance of a distribution
represented by a given set of SMC particle.
:param weights: An array of shape ``(n_particles,)`` containing
the weights of each particle.
:param location: An array of shape ``(n_particles, n_modelparams)``
containing the locations of each particle.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
"""
# Find the mean model vector, shape (n_modelparams, ).
mu = cls.particle_mean(weights, locations)
# Transpose the particle locations to have shape
# (n_modelparams, n_particles).
xs = locations.transpose([1, 0])
# Give a shorter name to the particle weights, shape (n_particles, ).
ws = weights
cov = (
# This sum is a reduction over the particle index, chosen to be
# axis=2. Thus, the sum represents an expectation value over the
# outer product $x . x^T$.
#
# All three factors have the particle index as the rightmost
# index, axis=2. Using the Einstein summation convention (ESC),
# we can reduce over the particle index easily while leaving
# the model parameter index to vary between the two factors
# of xs.
#
# This corresponds to evaluating A_{m,n} = w_{i} x_{m,i} x_{n,i}
# using the ESC, where A_{m,n} is the temporary array created.
np.einsum('i,mi,ni', ws, xs, xs)
# We finish by subracting from the above expectation value
# the outer product $mu . mu^T$.
- np.dot(mu[..., np.newaxis], mu[np.newaxis, ...])
)
# The SMC approximation is not guaranteed to produce a
# positive-semidefinite covariance matrix. If a negative eigenvalue
# is produced, we should warn the caller of this.
assert np.all(np.isfinite(cov))
if not np.all(la.eig(cov)[0] >= 0):
warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
return cov
def est_mean(self):
"""
Returns the mean value of the current particle distribution.
:rtype: :class:`numpy.ndarray`, shape ``(n_mps,)``.
:returns: An array containing the an estimate of the mean model vector.
"""
return self.particle_mean(self.particle_weights,
self.particle_locations)
def est_meanfn(self, fn):
"""
Returns an the expectation value of a given function
:math:`f` over the current particle distribution.
Here, :math:`f` is represented by a function ``fn`` that is vectorized
over particles, such that ``f(modelparams)`` has shape
``(n_particles, k)``, where ``n_particles = modelparams.shape[0]``, and
where ``k`` is a positive integer.
:param callable fn: Function implementing :math:`f` in a vectorized
manner. (See above.)
:rtype: :class:`numpy.ndarray`, shape ``(k, )``.
:returns: An array containing the an estimate of the mean of :math:`f`.
"""
return np.einsum('i...,i...',
self.particle_weights, fn(self.particle_locations)
)
def est_covariance_mtx(self, corr=False):
"""
Returns the full-rank covariance matrix of the current particle
distribution.
:param bool corr: If `True`, the covariance matrix is normalized
by the outer product of the square root diagonal of the covariance matrix,
i.e. the correlation matrix is returned instead.
:rtype: :class:`numpy.ndarray`, shape
``(n_modelparams, n_modelparams)``.
:returns: An array containing the estimated covariance matrix.
"""
cov = self.particle_covariance_mtx(self.particle_weights,
self.particle_locations)
if corr:
dstd = np.sqrt(np.diag(cov))
cov /= (np.outer(dstd, dstd))
return cov
## INFORMATION QUANTITIES ##
def est_entropy(self):
r"""
Estimates the entropy of the current particle distribution
as :math:`-\sum_i w_i \log w_i` where :math:`\{w_i\}`
is the set of particles with nonzero weight.
"""
nz_weights = self.particle_weights[self.particle_weights > 0]
return -np.sum(np.log(nz_weights) * nz_weights)
def _kl_divergence(self, other_locs, other_weights, kernel=None, delta=1e-2):
"""
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
"""
if kernel is None:
kernel = st.norm(loc=0, scale=1).pdf
dist = rescaled_distance_mtx(self, other_locs) / delta
K = kernel(dist)
return -self.est_entropy() - (1 / delta) * np.sum(
self.particle_weights *
np.log(
np.sum(
other_weights * K,
axis=1 # Sum over the particles of ``other``.
)
),
axis=0 # Sum over the particles of ``self``.
)
def est_kl_divergence(self, other, kernel=None, delta=1e-2):
"""
Finds the KL divergence between this and another particle
distribution by using a kernel density estimator to smooth over the
other distribution's particles.
:param SMCUpdater other:
"""
return self._kl_divergence(
other.particle_locations,
other.particle_weights,
kernel, delta
)
## CLUSTER ESTIMATION METHODS #############################################
def est_cluster_moments(self, cluster_opts=None):
# TODO: document
if cluster_opts is None:
cluster_opts = {}
for cluster_label, cluster_particles in particle_clusters(
self.particle_locations, self.particle_weights,
**cluster_opts
):
w = self.particle_weights[cluster_particles]
l = self.particle_locations[cluster_particles]
yield (
cluster_label,
sum(w), # The zeroth moment is very useful here!
self.particle_mean(w, l),
self.particle_covariance_mtx(w, l)
)
def est_cluster_covs(self, cluster_opts=None):
# TODO: document
cluster_moments = np.array(
list(self.est_cluster_moments(cluster_opts=cluster_opts)),
dtype=[
('label', 'int'),
('weight', 'float64'),
('mean', '{}float64'.format(self.n_rvs)),
('cov', '{0},{0}float64'.format(self.n_rvs)),
])
ws = cluster_moments['weight'][:, np.newaxis, np.newaxis]
within_cluster_var = np.sum(ws * cluster_moments['cov'], axis=0)
between_cluster_var = self.particle_covariance_mtx(
# Treat the cluster means as a new very small particle cloud.
cluster_moments['weight'], cluster_moments['mean']
)
total_var = within_cluster_var + between_cluster_var
return within_cluster_var, between_cluster_var, total_var
def est_cluster_metric(self, cluster_opts=None):
"""
Returns an estimate of how much of the variance in the current posterior
can be explained by a separation between *clusters*.
"""
wcv, bcv, tv = self.est_cluster_covs(cluster_opts)
return np.diag(bcv) / np.diag(tv)
## REGION ESTIMATION METHODS ##############################################
def est_credible_region(self, level=0.95, return_outside=False, modelparam_slice=None):
"""
Returns an array containing particles inside a credible region of a
given level, such that the described region has probability mass
no less than the desired level.
Particles in the returned region are selected by including the highest-
weight particles first until the desired credibility level is reached.
:param float level: Crediblity level to report.
:param bool return_outside: If `True`, the return value is a tuple
of the those particles within the credible region, and the rest
of the posterior particle cloud.
:param slice modelparam_slice: Slice over which model parameters
to consider.
:rtype: :class:`numpy.ndarray`, shape ``(n_credible, n_mps)``,
where ``n_credible`` is the number of particles in the credible
region and ``n_mps`` corresponds to the size of ``modelparam_slice``.
If ``return_outside`` is ``True``, this method instead
returns tuple ``(inside, outside)`` where ``inside`` is as
described above, and ``outside`` has shape ``(n_particles-n_credible, n_mps)``.
:return: An array of particles inside the estimated credible region. Or,
if ``return_outside`` is ``True``, both the particles inside and the
particles outside, as a tuple.
"""
# which slice of modelparams to take
s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:]
mps = self.particle_locations[:, s_]
# Start by sorting the particles by weight.
# We do so by obtaining an array of indices `id_sort` such that
# `particle_weights[id_sort]` is in descending order.
id_sort = np.argsort(self.particle_weights)[::-1]
# Find the cummulative sum of the sorted weights.
cumsum_weights = np.cumsum(self.particle_weights[id_sort])
# Find all the indices where the sum is less than level.
# We first find id_cred such that
# `all(cumsum_weights[id_cred] <= level)`.
id_cred = cumsum_weights <= level
# By construction, by adding the next particle to id_cred, it must be
# true that `cumsum_weights[id_cred] >= level`, as required.
id_cred[np.sum(id_cred)] = True
# We now return a slice onto the particle_locations by first permuting
# the particles according to the sort order, then by selecting the
# credible particles.
if return_outside:
return (
mps[id_sort][id_cred],
mps[id_sort][np.logical_not(id_cred)]
)
else:
return mps[id_sort][id_cred]
def region_est_hull(self, level=0.95, modelparam_slice=None):
"""
Estimates a credible region over models by taking the convex hull of
a credible subset of particles.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param slice modelparam_slice: Slice over which model parameters
to consider.
:return: The tuple ``(faces, vertices)`` where ``faces`` describes all the
vertices of all of the faces on the exterior of the convex hull, and
``vertices`` is a list of all vertices on the exterior of the
convex hull.
:rtype: ``faces`` is a ``numpy.ndarray`` with shape
``(n_face, n_mps, n_mps)`` and indeces ``(idx_face, idx_vertex, idx_mps)``
where ``n_mps`` corresponds to the size of ``modelparam_slice``.
``vertices`` is an ``numpy.ndarray`` of shape ``(n_vertices, n_mps)``.
"""
points = self.est_credible_region(
level=level,
modelparam_slice=modelparam_slice
)
hull = ConvexHull(points)
return points[hull.simplices], points[u.uniquify(hull.vertices.flatten())]
def region_est_ellipsoid(self, level=0.95, tol=0.0001, modelparam_slice=None):
r"""
Estimates a credible region over models by finding the minimum volume
enclosing ellipse (MVEE) of a credible subset of particles.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param float tol: The allowed error tolerance in the MVEE optimization
(see :meth:`~qinfer.utils.mvee`).
:param slice modelparam_slice: Slice over which model parameters
to consider.
:return: A tuple ``(A, c)`` where ``A`` is the covariance
matrix of the ellipsoid and ``c`` is the center.
A point :math:`\vec{x}` is in the ellipsoid whenever
:math:`(\vec{x}-\vec{c})^{T}A^{-1}(\vec{x}-\vec{c})\leq 1`.
:rtype: ``A`` is ``np.ndarray`` of shape ``(n_mps,n_mps)`` and
``centroid`` is ``np.ndarray`` of shape ``(n_mps)``.
``n_mps`` corresponds to the size of ``param_slice``.
"""
_, vertices = self.region_est_hull(level=level, modelparam_slice=modelparam_slice)
A, centroid = u.mvee(vertices, tol)
return A, centroid
def in_credible_region(self, points, level=0.95, modelparam_slice=None, method='hpd-hull', tol=0.0001):
"""
Decides whether each of the points lie within a credible region
of the current distribution.
If ``tol`` is ``None``, the particles are tested directly against
the convex hull object. If ``tol`` is a positive ``float``,
particles are tested to be in the interior of the smallest
enclosing ellipsoid of this convex hull, see
:meth:`SMCUpdater.region_est_ellipsoid`.
:param np.ndarray points: An ``np.ndarray`` of shape ``(n_mps)`` for
a single point, or of shape ``(n_points, n_mps)`` for multiple points,
where ``n_mps`` corresponds to the same dimensionality as ``param_slice``.
:param float level: The desired crediblity level (see
:meth:`SMCUpdater.est_credible_region`).
:param str method: A string specifying which credible region estimator to
use. One of ``'pce'``, ``'hpd-hull'`` or ``'hpd-mvee'`` (see below).
:param float tol: The allowed error tolerance for those methods
which require a tolerance (see :meth:`~qinfer.utils.mvee`).
:param slice modelparam_slice: A slice describing which model parameters
to consider in the credible region, effectively marginizing out the
remaining parameters. By default, all model parameters are included.
:return: A boolean array of shape ``(n_points, )`` specifying whether
each of the points lies inside the confidence region.
Methods
~~~~~~~
The following values are valid for the ``method`` argument.
- ``'pce'``: Posterior Covariance Ellipsoid.
Computes the covariance
matrix of the particle distribution marginalized over the excluded
slices and uses the :math:`\chi^2` distribution to determine
how to rescale it such the the corresponding ellipsoid has
the correct size. The ellipsoid is translated by the
mean of the particle distribution. It is determined which
of the ``points`` are on the interior.
- ``'hpd-hull'``: High Posterior Density Convex Hull.
See :meth:`SMCUpdater.region_est_hull`. Computes the
HPD region resulting from the particle approximation, computes
the convex hull of this, and it is determined which
of the ``points`` are on the interior.
- ``'hpd-mvee'``: High Posterior Density Minimum Volume Enclosing Ellipsoid.
See :meth:`SMCUpdater.region_est_ellipsoid`
and :meth:`~qinfer.utils.mvee`. Computes the
HPD region resulting from the particle approximation, computes
the convex hull of this, and determines the minimum enclosing
ellipsoid. Deterimines which
of the ``points`` are on the interior.
"""
if method == 'pce':
s_ = np.s_[modelparam_slice] if modelparam_slice is not None else np.s_[:]
A = self.est_covariance_mtx()[s_, s_]
c = self.est_mean()[s_]
# chi-squared distribution gives correct level curve conversion
mult = st.chi2.ppf(level, c.size)
results = u.in_ellipsoid(points, mult * A, c)
elif method == 'hpd-mvee':
tol = 0.0001 if tol is None else tol
A, c = self.region_est_ellipsoid(level=level, tol=tol, modelparam_slice=modelparam_slice)
results = u.in_ellipsoid(points, np.linalg.inv(A), c)
elif method == 'hpd-hull':
# it would be more natural to call region_est_hull,
# but that function uses ConvexHull which has no
# easy way of determining if a point is interior.
# Here, Delaunay gives us access to all of the
# necessary simplices.
# this fills the convex hull with (n_mps+1)-dimensional
# simplices; the convex hull is an almost-everywhere
# disjoint union of these simplices
hull = Delaunay(self.est_credible_region(level=level, modelparam_slice=modelparam_slice))
# now we just check whether each of the given points are in
# any of the simplices. (http://stackoverflow.com/a/16898636/1082565)
results = hull.find_simplex(points) >= 0
return results
class ProductDistribution(Distribution):
r"""
Takes a non-zero number of QInfer distributions :math:`D_k` as input
and returns their Cartesian product.
In other words, the returned distribution is
:math:`\Pr(D_1, \dots, D_N) = \prod_k \Pr(D_k)`.
:param Distribution factors:
Distribution objects representing :math:`D_k`.
Alternatively, one iterable argument can be given,
in which case the factors are the values drawn from that iterator.
"""
def __init__(self, *factors):
if len(factors) == 1:
try:
self._factors = list(factors[0])
except:
self._factors = factors
else:
self._factors = factors
@property
def n_rvs(self):
return sum([f.n_rvs for f in self._factors])
def sample(self, n=1):
return np.hstack([f.sample(n) for f in self._factors])
_DEFAULT_RANGES = np.array([[0, 1]])
_DEFAULT_RANGES.flags.writeable = False # Prevent anyone from modifying the
# default ranges.
## CLASSES ###################################################################
class UniformDistribution(Distribution):
"""
Uniform distribution on a given rectangular region.
:param numpy.ndarray ranges: Array of shape ``(n_rvs, 2)``, where ``n_rvs``
is the number of random variables, specifying the upper and lower limits
for each variable.
"""
def __init__(self, ranges=_DEFAULT_RANGES):
if not isinstance(ranges, np.ndarray):
ranges = np.array(ranges)
if len(ranges.shape) == 1:
ranges = ranges[np.newaxis, ...]
self._ranges = ranges
self._n_rvs = ranges.shape[0]
self._delta = ranges[:, 1] - ranges[:, 0]
@property
def n_rvs(self):
return self._n_rvs
def sample(self, n=1):
shape = (n, self._n_rvs)# if n == 1 else (self._n_rvs, n)
z = np.random.random(shape)
return self._ranges[:, 0] + z * self._delta
def grad_log_pdf(self, var):
# THIS IS NOT TECHNICALLY LEGIT; BCRB doesn't technically work with a
# prior that doesn't go to 0 at its end points. But we do it anyway.
if var.shape[0] == 1:
return 12/(self._delta)**2
else:
return np.zeros(var.shape)
class ConstantDistribution(Distribution):
"""
Represents a determinstic variable; useful for combining with other
distributions, marginalizing, etc.
:param values: Shape ``(n,)`` array or list of values :math:`X_0` such that
:math:`\Pr(X) = \delta(X - X_0)`.
"""
def __init__(self, values):
self._values = np.array(values)[np.newaxis, :]
@property
def n_rvs(self):
return self._values.shape[1]
def sample(self, n=1):
return np.repeat(self._values, n, axis=0)
class NormalDistribution(Distribution):
"""
Normal or truncated normal distribution over a single random
variable.
:param float mean: Mean of the represented random variable.
:param float var: Variance of the represented random variable.
:param tuple trunc: Limits at which the PDF of this
distribution should be truncated, or ``None`` if
the distribution is to have infinite support.
"""
def __init__(self, mean, var, trunc=None):
self.mean = mean
self.var = var
if trunc is not None:
low, high = trunc
sigma = np.sqrt(var)
a = (low - mean) / sigma
b = (high - mean) / sigma
self.dist = partial(scipy_dist, 'truncnorm', a, b, loc=mean, scale=np.sqrt(var))
else:
self.dist = partial(scipy_dist, 'norm', mean, np.sqrt(var))
@property
def n_rvs(self):
return 1
def sample(self, n=1):
return self.dist().rvs(size=n)[:, np.newaxis]
def grad_log_pdf(self, x):
return -(x - self.mean) / self.var
class MultivariateNormalDistribution(Distribution):
"""
Multivariate (vector-valued) normal distribution.
:param np.ndarray mean: Array of shape ``(n_rvs, )``
representing the mean of the distribution.
:param np.ndarray cov: Array of shape ``(n_rvs, n_rvs)``
representing the covariance matrix of the distribution.
"""
def __init__(self, mean, cov):
# Flatten the mean first, so we have a strong guarantee about its
# shape.
self.mean = np.array(mean).flatten()
self.cov = cov
self.invcov = la.inv(cov)
@property
def n_rvs(self):
return self.mean.shape[0]
def sample(self, n=1):
return np.einsum("ij,nj->ni", la.sqrtm(self.cov), np.random.randn(n, self.n_rvs)) + self.mean
def grad_log_pdf(self, x):
return -np.dot(self.invcov, (x - self.mean).transpose()).transpose()
class SlantedNormalDistribution(Distribution):
r"""
Uniform distribution on a given rectangular region with
additive noise. Random variates from this distribution
follow :math:`X+Y` where :math:`X` is drawn uniformly
with respect to the rectangular region defined by ranges, and
:math:`Y` is normally distributed about 0 with variance
``weight**2``.
:param numpy.ndarray ranges: Array of shape ``(n_rvs, 2)``, where ``n_rvs``
is the number of random variables, specifying the upper and lower limits
for each variable.
:param float weight: Number specifying the inverse variance
of the additive noise term.
"""
def __init__(self, ranges=_DEFAULT_RANGES, weight=0.01):
if not isinstance(ranges, np.ndarray):
ranges = np.array(ranges)
if len(ranges.shape) == 1:
ranges = ranges[np.newaxis, ...]
self._ranges = ranges
self._n_rvs = ranges.shape[0]
self._delta = ranges[:, 1] - ranges[:, 0]
self._weight = weight
@property
def n_rvs(self):
return self._n_rvs
def sample(self, n=1):
shape = (n, self._n_rvs)# if n == 1 else (self._n_rvs, n)
z = np.random.randn(n, self._n_rvs)
return self._ranges[:, 0] + \
self._weight*z + \
np.random.rand(n, self._n_rvs)*self._delta[np.newaxis,:]
class LogNormalDistribution(Distribution):
"""
Log-normal distribution.
:param mu: Location parameter (numeric), set to 0 by default.
:param sigma: Scale parameter (numeric), set to 1 by default.
Must be strictly greater than zero.
"""
def __init__(self, mu=0, sigma=1):
self.mu = mu # lognormal location parameter
self.sigma = sigma # lognormal scale parameter
self.dist = partial(scipy_dist, 'lognorm', 1, mu, sigma) # scipy distribution location = 0
@property
def n_rvs(self):
return 1
def sample(self, n=1):
return self.dist().rvs(size=n)[:, np.newaxis]
class BetaDistribution(Distribution):
r"""
The beta distribution, whose pdf at :math:`x` is proportional to
:math:`x^{\alpha-1}(1-x)^{\beta-1}`.
Note that either ``alpha`` and ``beta``, or ``mean`` and ``var``, must be
specified as inputs;
either case uniquely determines the distribution.
:param float alpha: The alpha shape parameter of the beta distribution.
:param float beta: The beta shape parameter of the beta distribution.
:param float mean: The desired mean value of the beta distribution.
:param float var: The desired variance of the beta distribution.
"""
def __init__(self, alpha=None, beta=None, mean=None, var=None):
if alpha is not None and beta is not None:
self.alpha = alpha
self.beta = beta
self.mean = alpha / (alpha + beta)
self.var = alpha * beta / ((alpha + beta) ** 2 * (alpha + beta + 1))
elif mean is not None and var is not None:
self.mean = mean
self.var = var
self.alpha = mean ** 2 * (1 - mean) / var - mean
self.beta = (1 - mean) ** 2 * mean / var - (1 - mean)
else:
raise ValueError(
"BetaDistribution requires either (alpha and beta) "
"or (mean and var)."
)
self.dist = st.beta(a=self.alpha, b=self.beta)
@property
def n_rvs(self):
return 1
def sample(self, n=1):
return self.dist.rvs(size=n)[:, np.newaxis]
class DirichletDistribution(Distribution):
r"""
The dirichlet distribution, whose pdf at :math:`x` is proportional to
:math:`\prod_i x_i^{\alpha_i-1}`.
:param alpha: The list of concentration parameters.
"""
def __init__(self, alpha):
self._alpha = np.array(alpha)
if self.alpha.ndim != 1:
raise ValueError('The input alpha must be a 1D list of concentration parameters.')
self._dist = st.dirichlet(alpha=self.alpha)
@property
def alpha(self):
return self._alpha
@property
def n_rvs(self):
return self._alpha.size
def sample(self, n=1):
return self._dist.rvs(size=n)
class BetaBinomialDistribution(Distribution):
r"""
The beta-binomial distribution, whose pmf at the non-negative
integer :math:`k` is equal to
:math:`\binom{n}{k}\frac{B(k+\alpha,n-k+\beta)}{B(\alpha,\beta)}`
with :math:`B(\cdot,\cdot)` the beta function.
This is the compound distribution whose variates are binomial distributed
with a bias chosen from a beta distribution.
Note that either ``alpha`` and ``beta``, or ``mean`` and ``var``, must be
specified as inputs;
either case uniquely determines the distribution.
:param int n: The :math:`n` parameter of the beta-binomial distribution.
:param float alpha: The alpha shape parameter of the beta-binomial distribution.
:param float beta: The beta shape parameter of the beta-binomial distribution.
:param float mean: The desired mean value of the beta-binomial distribution.
:param float var: The desired variance of the beta-binomial distribution.
"""
def __init__(self, n, alpha=None, beta=None, mean=None, var=None):
self.n = n
if alpha is not None and beta is not None:
self.alpha = alpha
self.beta = beta
self.mean = n * alpha / (alpha + beta)
self.var = n * alpha * beta * (alpha + beta + n) / ((alpha + beta) ** 2 * (alpha + beta + 1))
elif mean is not None and var is not None:
self.mean = mean
self.var = var
self.alpha = - mean * (var + mean **2 - n * mean) / (mean ** 2 + n * (var - mean))
self.beta = (n - mean) * (var + mean ** 2 - n * mean) / ((n - mean) * mean - n * var)
else:
raise ValueError("BetaBinomialDistribution requires either (alpha and beta) or (mean and var).")
# Beta-binomial is a compound distribution, drawing binomial
# RVs off of a beta-distrubuted bias.
self._p_dist = st.beta(a=self.alpha, b=self.beta)
@property
def n_rvs(self):
return 1
def sample(self, n=1):
p_vals = self._p_dist.rvs(size=n)[:, np.newaxis]
# numpy.random.binomial supports sampling using different p values,
# whereas scipy does not.
return np.random.binomial(self.n, p_vals)
class GammaDistribution(Distribution):
r"""
The gamma distribution, whose pdf at :math:`x` is proportional to
:math:`x^{-\alpha-1}e^{-x\beta}`.
Note that either alpha and beta, or mean and var, must be
specified as inputs;
either case uniquely determines the distribution.
:param float alpha: The alpha shape parameter of the gamma distribution.
:param float beta: The beta shape parameter of the gamma distribution.
:param float mean: The desired mean value of the gamma distribution.
:param float var: The desired variance of the gamma distribution.
"""
def __init__(self, alpha=None, beta=None, mean=None, var=None):
if alpha is not None and beta is not None:
self.alpha = alpha
self.beta = beta
self.mean = alpha / beta
self.var = alpha / beta ** 2
elif mean is not None and var is not None:
self.mean = mean
self.var = var
self.alpha = mean ** 2 / var
self.beta = mean / var
else:
raise ValueError("GammaDistribution requires either (alpha and beta) or (mean and var).")
# This is the distribution we want up to a scale factor of beta
self._dist = st.gamma(self.alpha)
@property
def n_rvs(self):
return 1
def sample(self, n=1):
return self._dist.rvs(size=n)[:, np.newaxis] / self.beta
class MVUniformDistribution(Distribution):
r"""
Uniform distribution over the rectangle
:math:`[0,1]^{\text{dim}}` with the restriction
that vector must sum to 1. Equivalently, a
uniform distribution over the ``dim-1`` simplex
whose vertices are the canonical unit vectors of
:math:`\mathbb{R}^\text{dim}`.
:param int dim: Number of dimensions; ``n_rvs``.
"""
def __init__(self, dim = 6):
warnings.warn(
"This class has been deprecated, and may "
"be renamed in future versions.",
DeprecationWarning
)
self._dim = dim
@property
def n_rvs(self):
return self._dim
def sample(self, n = 1):
return np.random.mtrand.dirichlet(np.ones(self._dim),n)
class DiscreteUniformDistribution(Distribution):
"""
Discrete uniform distribution over the integers between
``0`` and ``2**num_bits-1`` inclusive.
:param int num_bits: non-negative integer specifying
how big to make the interval.
"""
def __init__(self, num_bits):
self._num_bits = num_bits
@property
def n_rvs(self):
return 1
def sample(self, n=1):
z = np.random.randint(2**self._num_bits,size=n)
return z
class HilbertSchmidtUniform(SingleSampleMixin, Distribution):
"""
Creates a new Hilber-Schmidt uniform prior on state space of dimension ``dim``.
See e.g. [Mez06]_ and [Mis12]_.
:param int dim: Dimension of the state space.
"""
def __init__(self, dim=2):
warnings.warn(
"This class has been deprecated; please see "
"qinfer.tomography.GinibreDistribution(rank=None).",
DeprecationWarning
)
self.dim = dim
self.paulis1Q = np.array([[[1,0],[0,1]],[[1,0],[0,-1]],[[0,-1j],[1j,0]],[[0,1],[1,0]]])
self.paulis = self.make_Paulis(self.paulis1Q, 4)
@property
def n_rvs(self):
return self.dim**2 - 1
def sample(self):
#Generate random unitary (see e.g. http://arxiv.org/abs/math-ph/0609050v2)
g = (np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim))/np.sqrt(2.0)
q,r = la.qr(g)
d = np.diag(r)
ph = d/np.abs(d)
ph = np.diag(ph)
U = np.dot(q,ph)
#Generate random matrix
z = np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim)
rho = np.dot(np.dot(np.identity(self.dim)+U,np.dot(z,z.conj().transpose())),np.identity(self.dim)+U.conj().transpose())
rho = rho/np.trace(rho)
x = np.zeros([self.n_rvs])
for idx in range(self.n_rvs):
x[idx] = np.real(np.trace(np.dot(rho,self.paulis[idx+1])))
return x
def make_Paulis(self,paulis,d):
if d == self.dim*2:
return paulis
else:
temp = np.zeros([d**2,d,d],dtype='complex128')
for idx in range(temp.shape[0]):
temp[idx,:] = np.kron(paulis[np.trunc(idx/d)], self.paulis1Q[idx % 4])
return self.make_Paulis(temp,d*2)
class HaarUniform(SingleSampleMixin, Distribution):
"""
Haar uniform distribution of pure states of dimension ``dim``,
parameterized as coefficients of the Pauli basis.
:param int dim: Dimension of the state space.
.. note::
This distribution presently only works for ``dim==2`` and
the Pauli basis.
"""
def __init__(self, dim=2):
warnings.warn(
"This class has been deprecated; please see "
"qinfer.tomography.GinibreDistribution(rank=1).",
DeprecationWarning
)
# TODO: add basis as an option
self.dim = dim
@property
def n_rvs(self):
return 3
def _sample(self):
#Generate random unitary (see e.g. http://arxiv.org/abs/math-ph/0609050v2)
z = (np.random.randn(self.dim,self.dim) + 1j*np.random.randn(self.dim,self.dim))/np.sqrt(2.0)
q,r = la.qr(z)
d = np.diag(r)
ph = d/np.abs(d)
ph = np.diag(ph)
U = np.dot(q,ph)
#TODO: generalize this to general dimensions
#Apply Haar random unitary to |0> state to get random pure state
psi = np.dot(U,np.array([1,0]))
z = np.real(np.dot(psi.conj(),np.dot(np.array([[1,0],[0,-1]]),psi)))
y = np.real(np.dot(psi.conj(),np.dot(np.array([[0,-1j],[1j,0]]),psi)))
x = np.real(np.dot(psi.conj(),np.dot(np.array([[0,1],[1,0]]),psi)))
return np.array([x,y,z])
class GinibreUniform(SingleSampleMixin, Distribution):
"""
Creates a prior on state space of dimension dim according to the Ginibre
ensemble with parameter ``k``.
See e.g. [Mis12]_.
:param int dim: Dimension of the state space.
"""
def __init__(self,dim=2, k=2):
warnings.warn(
"This class has been deprecated; please see "
"qinfer.tomography.GinibreDistribution.",
DeprecationWarning
)
self.dim = dim
self.k = k
@property
def n_rvs(self):
return 3
def _sample(self):
#Generate random matrix
z = np.random.randn(self.dim,self.k) + 1j*np.random.randn(self.dim,self.k)
rho = np.dot(z,z.conj().transpose())
rho = rho/np.trace(rho)
z = np.real(np.trace(np.dot(rho,np.array([[1,0],[0,-1]]))))
y = np.real(np.trace(np.dot(rho,np.array([[0,-1j],[1j,0]]))))
x = np.real(np.trace(np.dot(rho,np.array([[0,1],[1,0]]))))
return np.array([x,y,z])
class PostselectedDistribution(Distribution):
"""
Postselects a distribution based on validity within a given model.
"""
# TODO: rewrite LiuWestResampler in terms of this and a
# new MixtureDistribution.
def __init__(self, distribution, model, maxiters=100):
self._dist = distribution
self._model = model
self._maxiters = maxiters
@property
def n_rvs(self):
return self._dist.n_rvs
def sample(self, n=1):
"""
Returns one or more samples from this probability distribution.
:param int n: Number of samples to return.
:return numpy.ndarray: An array containing samples from the
distribution of shape ``(n, d)``, where ``d`` is the number of
random variables.
"""
samples = np.empty((n, self.n_rvs))
idxs_to_sample = np.arange(n)
iters = 0
while idxs_to_sample.size and iters < self._maxiters:
samples[idxs_to_sample] = self._dist.sample(len(idxs_to_sample))
idxs_to_sample = idxs_to_sample[np.nonzero(np.logical_not(
self._model.are_models_valid(samples[idxs_to_sample, :])
))[0]]
iters += 1
if idxs_to_sample.size:
raise RuntimeError("Did not successfully postselect within {} iterations.".format(self._maxiters))
return samples
def grad_log_pdf(self, x):
return self._dist.grad_log_pdf(x)
class InterpolatedUnivariateDistribution(Distribution):
"""
Samples from a single-variable distribution specified by its PDF. The
samples are drawn by first drawing uniform samples over the interval
``[0, 1]``, and then using an interpolation of the inverse-CDF
corresponding to the given PDF to transform these samples into the
desired distribution.
:param callable pdf: Vectorized single-argument function that evaluates
the PDF of the desired distribution.
:param float compactification_scale: Scale of the compactified coordinates
used to interpolate the given PDF.
:param int n_interp_points: The number of points at which to sample the
given PDF.
"""
def __init__(self, pdf, compactification_scale=1, n_interp_points=1500):
self._pdf = pdf
self._xs = u.compactspace(compactification_scale, n_interp_points)
self._generate_interp()
def _generate_interp(self):
xs = self._xs
pdfs = self._pdf(xs)
norm_factor = np.trapz(pdfs, xs)
self._cdfs = cumtrapz(pdfs / norm_factor, xs, initial=0)
self._interp_inv_cdf = interp1d(self._cdfs, xs, bounds_error=False)
@property
def n_rvs(self):
return 1
def sample(self, n=1):
return self._interp_inv_cdf(np.random.random(n))[:, np.newaxis]
class ConstrainedSumDistribution(Distribution):
"""
Samples from an underlying distribution and then
enforces that all samples must sum to some given
value by normalizing each sample.
:param Distribution underlying_distribution: Underlying probability distribution.
:param float desired_total: Desired sum of each sample.
"""
def __init__(self, underlying_distribution, desired_total=1):
super(ConstrainedSumDistribution, self).__init__()
self._ud = underlying_distribution
self.desired_total = desired_total
@property
def underlying_distribution(self):
return self._ud
@property
def n_rvs(self):
return self.underlying_distribution.n_rvs
def sample(self, n=1):
s = self.underlying_distribution.sample(n)
totals = np.sum(s, axis=1)[:,np.newaxis]
return self.desired_total * np.sign(totals) * s / totals
| csferrie/python-qinfer | src/qinfer/distributions.py | Python | agpl-3.0 | 51,767 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlWwwRobotrules(PerlPackage):
"""Database of robots.txt-derived permissions"""
homepage = "http://deps.cpantesters.org/?module=WWW%3A%3ARobotRules;perl=latest"
url = "http://search.cpan.org/CPAN/authors/id/G/GA/GAAS/WWW-RobotRules-6.02.tar.gz"
version('6.02', sha256='46b502e7a288d559429891eeb5d979461dd3ecc6a5c491ead85d165b6e03a51e')
depends_on('perl-uri', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/perl-www-robotrules/package.py | Python | lgpl-2.1 | 637 |
"""a readline console module (unix only).
[email protected]
the module starts a subprocess for the readline console and
communicates through pipes (prompt/cmd).
the console is polled through a timer, which depends on PySide.
"""
from select import select
import os
import sys
import signal
if __name__ == '__main__':
import readline
# prompt input stream
fd_in = int(sys.argv[1])
file_in = os.fdopen( fd_in )
# cmd output stream
fd_out = int(sys.argv[2])
file_out = os.fdopen( fd_out, 'w' )
# some helpers
def send(data):
file_out.write(data + '\n')
file_out.flush()
def recv():
while True:
res = file_in.readline().rstrip('\n')
read, _, _ = select([ file_in ], [], [], 0)
if not read: return res
class History:
"""readline history safe open/close"""
def __init__(self, filename):
self.filename = os.path.expanduser( filename )
def __enter__(self):
try:
readline.read_history_file(self.filename)
# print 'loaded console history from', self.filename
except IOError:
pass
return self
def __exit__(self, type, value, traceback):
readline.write_history_file( self.filename )
def cleanup(*args):
print('console cleanup')
os.system('stty sane')
for sig in [signal.SIGQUIT,
signal.SIGTERM,
signal.SIGILL,
signal.SIGSEGV]:
old = signal.getsignal(sig)
def new(*args):
cleanup()
signal.signal(sig, old)
os.kill(os.getpid(), sig)
signal.signal(sig, new)
# main loop
try:
with History( "~/.sofa-console" ):
print 'console started'
while True:
send( raw_input( recv() ) )
except KeyboardInterrupt:
print 'console exited (SIGINT)'
except EOFError:
ppid = os.getppid()
try:
os.kill(os.getppid(), signal.SIGTERM)
print 'console exited (EOF), terminating parent process'
except OSError:
pass
else:
import subprocess
import code
import atexit
_cleanup = None
def _register( c ):
global _cleanup
if _cleanup: _cleanup()
_cleanup = c
class Console(code.InteractiveConsole):
def __init__(self, locals = None, timeout = 100):
"""
python interpreter taking input from console subprocess
scope is provided through 'locals' (usually: locals() or globals())
'timeout' (in milliseconds) sets how often is the console polled.
"""
code.InteractiveConsole.__init__(self, locals)
if timeout >= 0:
def callback():
self.poll()
from PySide import QtCore
self.timer = QtCore.QTimer()
self.timer.timeout.connect( callback )
self.timer.start( timeout )
_register( lambda: self.timer.stop() )
# execute next command, blocks on console input
def next(self):
line = recv()
data = '>>> '
if self.push( line ):
data = '... '
send( data )
# convenience
def poll(self):
if ready(): self.next()
# send prompt to indicate we are ready
def send(data):
prompt_out.write(data + '\n')
prompt_out.flush()
# receive command line
def recv():
res = cmd_in.readline()
if res: return res.rstrip('\n')
return res
# is there any available command ?
def ready():
read, _, _ = select([ cmd_in ], [], [], 0)
return read
# communication pipes
prompt = os.pipe()
cmd = os.pipe()
# subprocess with in/out fd, and forwarding stdin
sub = subprocess.Popen(['python', __file__,
str(prompt[0]), str(cmd[1])],
stdin = sys.stdin)
# open the tubes !
prompt_out = os.fdopen(prompt[1], 'w')
cmd_in = os.fdopen(cmd[0], 'r')
# we're ready
send('>>> ')
# def cleanup(*args):
# print('console cleanup')
# os.system('stty sane')
# def exit(*args):
# print 'exit'
# cleanup()
# sys.exit(0) forces cleanup *from python* before the gui
# closes. otherwise pyside causes segfault on python finalize.
def handler(*args):
sub.terminate()
sub.wait()
sys.exit(0)
from PySide import QtCore
app = QtCore.QCoreApplication.instance()
app.aboutToQuit.connect( handler )
# import atexit
# atexit.register( handler )
# import atexit
# atexit.register( exit )
# for sig in [signal.SIGSEGV, signal.SIGILL]:
# old = signal.getsignal(sig)
# def h(*args):
# print args
# sub.terminate()
# signal.signal(sig, old)
# os.kill(os.getpid(), sig)
# signal.signal(sig, h)
| FabienPean/sofa | applications/plugins/SofaPython/python/SofaPython/console.py | Python | lgpl-2.1 | 5,349 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import shutil
class Libiconv(AutotoolsPackage):
"""GNU libiconv provides an implementation of the iconv() function
and the iconv program for character set conversion."""
homepage = "https://www.gnu.org/software/libiconv/"
url = "http://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.15.tar.gz"
version('1.15', 'ace8b5f2db42f7b3b3057585e80d9808')
version('1.14', 'e34509b1623cec449dfeb73d7ce9c6c6')
# We cannot set up a warning for gets(), since gets() is not part
# of C11 any more and thus might not exist.
patch('gets.patch', when='@1.14')
conflicts('@1.14', when='%gcc@5:')
def configure_args(self):
args = ['--enable-extra-encodings']
# A hack to patch config.guess in the libcharset sub directory
shutil.copyfile('./build-aux/config.guess',
'libcharset/build-aux/config.guess')
return args
| EmreAtes/spack | var/spack/repos/builtin/packages/libiconv/package.py | Python | lgpl-2.1 | 2,156 |
Subsets and Splits