code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
VERSION = "2016-09-01-preview"
| SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/managedapplications/version.py | Python | mit | 506 |
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
class for handling .bb files
Reads a .bb file and obtains its metadata
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2003, 2004 Phil Blundell
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, bb, os
import logging
import bb.build, bb.utils
from bb import data
from . import ConfHandler
from .. import resolve_file, ast, logger, ParseError
from .ConfHandler import include, init
# For compatibility
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" )
__addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
__deltask_regexp__ = re.compile("deltask\s+(?P<func>\w+)")
__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
__infunc__ = []
__inpython__ = False
__body__ = []
__classname__ = ""
cached_statements = {}
def supports(fn, d):
"""Return True if fn has a supported extension"""
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
def inherit(files, fn, lineno, d):
__inherit_cache = d.getVar('__inherit_cache', False) or []
files = d.expand(files).split()
for file in files:
if not os.path.isabs(file) and not file.endswith(".bbclass"):
file = os.path.join('classes', '%s.bbclass' % file)
if not os.path.isabs(file):
bbpath = d.getVar("BBPATH")
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
for af in attempts:
if af != abs_fn:
bb.parse.mark_dependency(d, af)
if abs_fn:
file = abs_fn
if not file in __inherit_cache:
logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
__inherit_cache.append( file )
d.setVar('__inherit_cache', __inherit_cache)
include(fn, file, lineno, d, "inherit")
__inherit_cache = d.getVar('__inherit_cache', False) or []
def get_statements(filename, absolute_filename, base_name):
global cached_statements
try:
return cached_statements[absolute_filename]
except KeyError:
with open(absolute_filename, 'r') as f:
statements = ast.StatementGroup()
lineno = 0
while True:
lineno = lineno + 1
s = f.readline()
if not s: break
s = s.rstrip()
feeder(lineno, s, filename, base_name, statements)
if __inpython__:
# add a blank line to close out any python definition
feeder(lineno, "", filename, base_name, statements, eof=True)
if filename.endswith(".bbclass") or filename.endswith(".inc"):
cached_statements[absolute_filename] = statements
return statements
def handle(fn, d, include):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
__body__ = []
__infunc__ = []
__classname__ = ""
__residue__ = []
base_name = os.path.basename(fn)
(root, ext) = os.path.splitext(base_name)
init(d)
if ext == ".bbclass":
__classname__ = root
__inherit_cache = d.getVar('__inherit_cache', False) or []
if not fn in __inherit_cache:
__inherit_cache.append(fn)
d.setVar('__inherit_cache', __inherit_cache)
if include != 0:
oldfile = d.getVar('FILE', False)
else:
oldfile = None
abs_fn = resolve_file(fn, d)
if include:
bb.parse.mark_dependency(d, abs_fn)
# actual loading
statements = get_statements(fn, abs_fn, base_name)
# DONE WITH PARSING... time to evaluate
if ext != ".bbclass" and abs_fn != oldfile:
d.setVar('FILE', abs_fn)
try:
statements.eval(d)
except bb.parse.SkipRecipe:
bb.data.setVar("__SKIPPED", True, d)
if include == 0:
return { "" : d }
if __infunc__:
raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2])
if __residue__:
raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn)
if ext != ".bbclass" and include == 0:
return ast.multi_finalize(fn, d)
if ext != ".bbclass" and oldfile and abs_fn != oldfile:
d.setVar("FILE", oldfile)
return d
def feeder(lineno, s, fn, root, statements, eof=False):
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
if __infunc__:
if s == '}':
__body__.append('')
ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4])
__infunc__ = []
__body__ = []
else:
__body__.append(s)
return
if __inpython__:
m = __python_func_regexp__.match(s)
if m and not eof:
__body__.append(s)
return
else:
ast.handlePythonMethod(statements, fn, lineno, __inpython__,
root, __body__)
__body__ = []
__inpython__ = False
if eof:
return
if s and s[0] == '#':
if len(__residue__) != 0 and __residue__[0][0] != "#":
bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
if s and s[-1] == '\\':
__residue__.append(s[:-1])
return
s = "".join(__residue__) + s
__residue__ = []
# Skip empty lines
if s == '':
return
# Skip comments
if s[0] == '#':
return
m = __func_start_regexp__.match(s)
if m:
__infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None]
return
m = __def_regexp__.match(s)
if m:
__body__.append(s)
__inpython__ = m.group(1)
return
m = __export_func_regexp__.match(s)
if m:
ast.handleExportFuncs(statements, fn, lineno, m, __classname__)
return
m = __addtask_regexp__.match(s)
if m:
ast.handleAddTask(statements, fn, lineno, m)
return
m = __deltask_regexp__.match(s)
if m:
ast.handleDelTask(statements, fn, lineno, m)
return
m = __addhandler_regexp__.match(s)
if m:
ast.handleBBHandlers(statements, fn, lineno, m)
return
m = __inherit_regexp__.match(s)
if m:
ast.handleInherit(statements, fn, lineno, m)
return
return ConfHandler.feeder(lineno, s, fn, statements)
# Add us to the handlers list
from .. import handlers
handlers.append({'supports': supports, 'handle': handle, 'init': init})
del handlers
| schleichdi2/OPENNFR-6.0-CORE | bitbake/lib/bb/parse/parse_py/BBHandler.py | Python | gpl-2.0 | 8,492 |
"""
Gather F5 LTM Virtual Server Information
@author: David Petzel
@contact: [email protected]
@date: 05/06/2011
"""
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap, GetMap
from Products.DataCollector.plugins.DataMaps import ObjectMap
import re
import binascii
import string
import socket
from pprint import pprint
class BigipLtmVirtualServerMap(SnmpPlugin):
"""
Handles the modeling of Virtual Servers on the LTM
Custom Properties Added:
zVirtualServerNameFilter - This will provide a list of regex strings to compare
the virtual server name against. Only items that match will be returned.
When left blank all virtual servers will be returned
"""
relname = "LtmVs"
modname = "ZenPacks.community.f5.BigipVirtualServer"
deviceProperties = SnmpPlugin.deviceProperties + ('zF5BigipVirtualServerNameFilter',)
# Column dictionaries represent the OID ending for the data point your interested in.
# This value gets appended to the base issue listed in the snmpGetTableMaps call
basecolumns = {
'.1.1': 'ltmVirtualServName',
'.1.3': 'ltmVirtualServAddr',
'.1.6': 'ltmVirtualServPort',
}
# The VIP Status is provided from a separate table
status_columns = {
'.1.1': 'ltmVsStatusName',
'.1.2': 'ltmVsStatusAvailState',
'.1.3': 'ltmVsStatusEnabledState',
'.1.5': 'ltmVsStatusDetailReason',
}
snmpGetTableMaps = (
#Virtual Server Table
GetTableMap('ltmVirtualServTable', '.1.3.6.1.4.1.3375.2.2.10.1.2', basecolumns),
GetTableMap('ltmVsStatusTable', '.1.3.6.1.4.1.3375.2.2.10.13.2', status_columns)
)
def process(self, device, results, log):
"""
Just as it sounds
"""
#The availability of the specified virtual server indicated in color.
#none - error;
#green - available in some capacity;
#yellow - not currently available;
#red - not available;
#blue - availability is unknown;
#gray - unlicensed.
avail_status_values = {
0: 'None - Error',
1: 'Green - available in some capacity',
2: 'Yellow - not currently available',
3: 'Red - not available',
4: 'Blue - availability is unknown',
5: 'Gray - unlicensed',
}
#The activity status of the specified virtual server, as specified
#by the user.
enable_state_values = {
1: 'Enabled',
2: 'Disabled'
}
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
vs_table = tabledata.get("ltmVirtualServTable")
# Grab the second table and append it to the first
status_table = tabledata.get("ltmVsStatusTable")
for oid, data in status_table.items():
for key, value in data.items():
if key not in vs_table[oid]:
vs_table[oid][key] = value
maps = []
rm = self.relMap()
# Get the list of name patterns to search for
VirtualServerNameFilter = getattr(device, 'zF5BigipVirtualServerNameFilter', None)
log.debug("Picked up Filter List of: %s" , VirtualServerNameFilter)
for oid, data in vs_table.items():
# log.debug("%s : %s\n", oid, data)
#
om = self.objectMap(data)
include_vs = True
if VirtualServerNameFilter != None and VirtualServerNameFilter != "":
# If there is a regex filter supplied, lets use it
if re.search(VirtualServerNameFilter, om.ltmVirtualServName) == None:
include_vs = False
if include_vs == True:
om.id = self.prepId(om.ltmVirtualServName)
om.snmpindex = oid
# The value fetched is a packed hex representation of the IP
# Use socket to convert to octet based IP
# http://docs.python.org/library/socket.html#socket.inet_ntoa
om.vsIP = socket.inet_ntoa(om.ltmVirtualServAddr)
#print om.status
if om.ltmVsStatusAvailState == 1:
om.status = "Up"
else:
om.status = "Down"
om.VsStatusEnabledState = enable_state_values[om.ltmVsStatusEnabledState]
om.VsStatusAvailState = avail_status_values[om.ltmVsStatusAvailState]
om.VsStatusDetailReason = om.ltmVsStatusDetailReason
rm.append(om)
#log.debug(rm)
return [rm]
| anksp21/Community-Zenpacks | ZenPacks.community.f5/ZenPacks/community/f5/modeler/plugins/BigipLtmVirtualServerMap.py | Python | gpl-2.0 | 4,833 |
#! /usr/bin/env python
from hermes2d import Mesh, MeshView, H1Shapeset, PrecalcShapeset, H1Space, \
LinSystem, WeakForm, DummySolver, Solution, ScalarView
from hermes2d.examples.c06 import set_bc, set_forms
from hermes2d.examples import get_example_mesh
mesh = Mesh()
mesh.load(get_example_mesh())
#mesh.refine_element(0)
#mesh.refine_all_elements()
mesh.refine_towards_boundary(5, 3)
shapeset = H1Shapeset()
pss = PrecalcShapeset(shapeset)
# create an H1 space
space = H1Space(mesh, shapeset)
space.set_uniform_order(5)
set_bc(space)
space.assign_dofs()
xprev = Solution()
yprev = Solution()
# initialize the discrete problem
wf = WeakForm(1)
set_forms(wf)
solver = DummySolver()
sys = LinSystem(wf, solver)
sys.set_spaces(space)
sys.set_pss(pss)
sln = Solution()
sys.assemble()
sys.solve_system(sln)
view = ScalarView("Solution")
view.show(sln, lib="mayavi")
# view.wait()
mview = MeshView("Hello world!", 100, 100, 500, 500)
mview.show(mesh, lib="mpl", method="orders", notebook=False)
mview.wait()
| solin/hermes2d | python/examples/06.py | Python | gpl-2.0 | 1,022 |
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.tests.utils.package_tester import PackageTester
PackageTester().run_all_tests_for_package('sanfrancisco') | christianurich/VIBe2UrbanSim | 3rdparty/opus/src/sanfrancisco/tests/all_tests.py | Python | gpl-2.0 | 247 |
#!/usr/bin/python
"""
Usage: python manifestParser.py [path to list with manifest paths].
Parses the manifests indicated in the list file and cretes a pl file with the aplication permissions.
"""
import os,sys
import subprocess
import xml.etree.ElementTree as ET
class manifestParser:
"""
Main class that parses manifest file
"""
def __init__(self, listFile):
self.listFile = listFile
def getApplicationFacts(self, aManifestFile):
"""
Method that obtains the permissions from a manifest file and parses it as a prolog fact
"""
# These lines are used to obtain the path to the working directory
currentDir = os.getcwd()
filename = os.path.join(currentDir, aManifestFile)
# These lines parse the xml, the application permissions are stored as a list in permissions
manifestTree = self.parseXml(filename)
applicationName = self.getApplicationName(manifestTree)
permissions = self.getPermissions(manifestTree)
# Prolog defines atoms with an initial lower case. However in the manifest file, permissions are defined in uppercase
manifestPermissions='permissions(' + applicationName.lower() +',['
permissionList=[]
# Obtains the attribute stored in the permission list and appends it to the list
for i in range(len(permissions)):
permissionList.append(str(permissions[i].attrib['{http://schemas.android.com/apk/res/android}name'].split('.')[2]).lower())
if i < len(permissions) - 1:
manifestPermissions= manifestPermissions + permissionList[i] + ','
else:
manifestPermissions= manifestPermissions + permissionList[i]
manifestPermissions = manifestPermissions + ']).\n'
return manifestPermissions
def parseXml(self, xmlPath):
return ET.parse(xmlPath)
def getPermissions(self, aManifestTree):
return aManifestTree.findall('uses-permission')
def getApplicationName(self, aManifestTree):
appName = aManifestTree.findall('application')
return appName[0].attrib['{http://schemas.android.com/apk/res/android}name'].split('.')[-1]
def getManifests(self, aFileName):
"""
Method that reads the list file and creates a list with all the manifests paths
"""
# Reads the file
listFile = open(aFileName, 'r')
manifestPaths = []
# This for goes through the file object, line by line
for line in listFile:
# Appends each path to the list, the split is used to append the path without the newline
<<<<<<< HEAD
if len (line) < 2:
continue
=======
>>>>>>> cdf807928f1c8a63a07a8b559ea3dae692af51b0
manifestPaths.append(line.split('\n')[0])
return manifestPaths
if __name__=="__main__":
parser = manifestParser(sys.argv[1])
# Command line argument is a txt file that lists all manifests
aFileName = sys.argv[1]
# Get all the manifest files included in list.txt
manifestList = parser.getManifests(aFileName)
prologFactPermissionsString=''
# Creates a string with the format permission(applicationName,[all permissions])
for i in range(len(manifestList)):
prologFactPermissionsString = prologFactPermissionsString + parser.getApplicationFacts(manifestList[i])
# Writes the permissions to a pl file
outputFile = open("permissions.pl", 'w')
outputFile.write(prologFactPermissionsString)
outputFile.close()
| afmurillo/FinalHerramientas-2014-2 | ProyectoFinal/Codigo/manifestParser.py | Python | gpl-2.0 | 3,248 |
"""
Contains functionality common across all repository-related managers.
= Working Directories =
Working directories are as staging or temporary file storage by importers
and distributors. Each directory is unique to the repository and plugin
combination.
The directory structure for plugin working directories is as follows:
<pulp_storage>/working/<repo_id>/[importers|distributors]/<plugin_type_id>
For example, for importer "foo" and repository "bar":
/var/lib/pulp/working/bar/importers/foo
The rationale is to simplify cleanup on repository delete; the repository's
working directory is simply deleted.
"""
import os
from pulp.common import dateutils
from pulp.server import config as pulp_config
from pulp.plugins.model import Repository, RelatedRepository, RepositoryGroup, \
RelatedRepositoryGroup
def _ensure_tz_specified(time_stamp):
"""
Check a datetime that came from the database to ensure it has a timezone specified in UTC
Mongo doesn't include the TZ info so if no TZ is set this assumes UTC.
:param time_stamp: a datetime object to ensure has UTC tzinfo specified
:type time_stamp: datetime.datetime
:return: The time_stamp with a timezone specified
:rtype: datetime.datetime
"""
if time_stamp:
time_stamp = dateutils.to_utc_datetime(time_stamp, no_tz_equals_local_tz=False)
return time_stamp
def to_transfer_repo(repo_data):
"""
Converts the given database representation of a repository into a plugin
repository transfer object, including any other fields that need to be
included.
@param repo_data: database representation of a repository
@type repo_data: dict
@return: transfer object used in many plugin API calls
@rtype: pulp.plugins.model.Repository}
"""
r = Repository(repo_data['id'], repo_data['display_name'], repo_data['description'],
repo_data['notes'], content_unit_counts=repo_data['content_unit_counts'],
last_unit_added=_ensure_tz_specified(repo_data.get('last_unit_added')),
last_unit_removed=_ensure_tz_specified(repo_data.get('last_unit_removed')))
return r
def to_related_repo(repo_data, configs):
"""
Converts the given database representation of a repository into a plugin's
representation of a related repository. The list of configurations for
the repository's plugins will be included in the returned type.
@param repo_data: database representation of a repository
@type repo_data: dict
@param configs: list of configurations for all relevant plugins on the repo
@type configs: list
@return: transfer object used in many plugin API calls
@rtype: pulp.plugins.model.RelatedRepository
"""
r = RelatedRepository(repo_data['id'], configs, repo_data['display_name'],
repo_data['description'], repo_data['notes'])
return r
def repository_working_dir(repo_id, mkdir=True):
"""
Determines the repository's working directory. Individual plugin working
directories will be placed under this. If the mkdir argument is set to true,
the directory will be created as part of this call.
See the module-level docstrings for more information on the directory
structure.
@param mkdir: if true, this call will create the directory; otherwise the
full path will just be generated
@type mkdir: bool
@return: full path on disk
@rtype: str
"""
working_dir = os.path.join(_repo_working_dir(), repo_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def importer_working_dir(importer_type_id, repo_id, mkdir=True):
"""
Determines the working directory for an importer to use for a repository.
If the mkdir argument is set to true, the directory will be created as
part of this call.
See the module-level docstrings for more information on the directory
structure.
@param mkdir: if true, this call will create the directory; otherwise the
full path will just be generated
@type mkdir: bool
@return: full path on disk to the directory the importer can use for the
given repository
@rtype: str
"""
repo_working_dir = repository_working_dir(repo_id, mkdir)
working_dir = os.path.join(repo_working_dir, 'importers', importer_type_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def distributor_working_dir(distributor_type_id, repo_id, mkdir=True):
"""
Determines the working directory for a distributor to use for a repository.
If the mkdir argument is set to true, the directory will be created as
part of this call.
See the module-level docstrings for more information on the directory
structure.
@param mkdir: if true, this call will create the directory; otherwise the
full path will just be generated
@type mkdir: bool
@return: full path on disk to the directory the distributor can use for the
given repository
@rtype: str
"""
repo_working_dir = repository_working_dir(repo_id, mkdir)
working_dir = os.path.join(repo_working_dir, 'distributors', distributor_type_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def to_transfer_repo_group(group_data):
"""
Converts the given database representation of a repository group into a
plugin transfer object.
@param group_data: database representation of the group
@type group_data: dict
@return: transfer object used in plugin calls
@rtype: pulp.plugins.model.RepositoryGroup
"""
g = RepositoryGroup(group_data['id'], group_data['display_name'],
group_data['description'], group_data['notes'],
group_data['repo_ids'])
return g
def to_related_repo_group(group_data, configs):
"""
Converts the given database representation of a repository group into a
plugin transfer object. The list of configurations for the requested
group plugins are included in the returned type.
@param group_data: database representation of the group
@type group_data: dict
@param configs: list of plugin configurations to include
@type configs: list
@return: transfer object used in plugin calls
@rtype: pulp.plugins.model.RelatedRepositoryGroup
"""
g = RelatedRepositoryGroup(group_data['id'], configs, group_data['display_name'],
group_data['description'], group_data['notes'])
return g
def repo_group_working_dir(group_id, mkdir=True):
"""
Determines the repo group's working directory. Individual plugin working
directories will be placed under this. If the mkdir argument is set to
true, the directory will be created as part of this call.
@param group_id: identifies the repo group
@type group_id: str
@param mkdir: if true, the call will create the directory; otherwise the
full path will just be generated and returned
@type mkdir: bool
@return: full path on disk
@rtype: str
"""
working_dir = os.path.join(_repo_group_working_dir(), group_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def group_importer_working_dir(importer_type_id, group_id, mkdir=True):
"""
Determines the working directory for an importer to use for a repository
group. If the mkdir argument is set to true, the directory will be created
as part of this call.
@param mkdir: if true, the call will create the directory; otherwise the
full path will just be generated and returned
@type mkdir: bool
@return: full path on disk
@rtype: str
"""
group_working_dir = repo_group_working_dir(group_id, mkdir)
working_dir = os.path.join(group_working_dir, 'importers', importer_type_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def group_distributor_working_dir(distributor_type_id, group_id, mkdir=True):
"""
Determines the working directory for an importer to use for a repository
group. If the mkdir argument is set to true, the directory will be created
as part of this call.
@param mkdir: if true, the call will create the directory; otherwise the
full path will just be generated and returned
@type mkdir: bool
@return: full path on disk
@rtype: str
"""
group_working_dir = repo_group_working_dir(group_id, mkdir)
working_dir = os.path.join(group_working_dir, 'distributors', distributor_type_id)
if mkdir and not os.path.exists(working_dir):
os.makedirs(working_dir)
return working_dir
def _working_dir_root():
storage_dir = pulp_config.config.get('server', 'storage_dir')
dir_root = os.path.join(storage_dir, 'working')
return dir_root
def _repo_working_dir():
dir = os.path.join(_working_dir_root(), 'repos')
return dir
def _repo_group_working_dir():
dir = os.path.join(_working_dir_root(), 'repo_groups')
return dir
| beav/pulp | server/pulp/server/managers/repo/_common.py | Python | gpl-2.0 | 9,312 |
#!/usr/bin/env python
# encoding: utf-8
#
# rbootstrap - Install RPM based Linux into chroot jails
# Copyright (C) 2014 Lars Michelsen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
class RBError(Exception):
pass
class BailOut(RBError):
""" Is used to terminate the program with an error message """
pass
| LaMi-/rbootstrap | rbootstrap/exceptions.py | Python | gpl-2.0 | 1,001 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0004_category'),
('seller', '0014_auto_20150607_0943'),
]
operations = [
migrations.AddField(
model_name='asset',
name='newcategories',
field=models.ManyToManyField(to='common.Category'),
),
]
| tomcounsell/Cobra | apps/seller/migrations/0015_asset_newcategories.py | Python | gpl-2.0 | 455 |
#
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import gdata
from dialog import ProgressDlg
from dialog import PlayerSelectDlg
import pygame
from ige import log
# module globals
progressDlg = None
def onInitConnection():
pass
def onConnInitialized():
pass
def onCmdBegin():
if gdata.mainGameDlg:
gdata.mainGameDlg.onCmdInProgress(1)
else:
gdata.cmdInProgress = 1
gdata.app.update()
def onCmdEnd():
if gdata.mainGameDlg:
gdata.mainGameDlg.onCmdInProgress(0)
else:
gdata.cmdInProgress = 0
gdata.app.update()
def onUpdateStarting():
global progressDlg
log.debug("onUpdateStarting")
if not progressDlg:
progressDlg = ProgressDlg(gdata.app)
progressDlg.display(_('Updating OSCI database...'), 0, 1)
def onUpdateProgress(curr, max, text = None):
global progressDlg
log.debug("onUpdateProgress")
progressDlg.setProgress(text, curr, max)
def onUpdateFinished():
global progressDlg
log.debug("onUpdateFinished")
try:
progressDlg.hide()
except:
log.warning("Cannot delete progressDlg window")
for dialog in gdata.updateDlgs:
dialog.update()
def onNewMessages(number):
gdata.mainGameDlg.messagesDlg.update()
def onWaitingForResponse():
#pygame.event.pump()
while pygame.event.poll().type != pygame.NOEVENT:
pass
| dahaic/outerspace | client/osci/handler.py | Python | gpl-2.0 | 2,143 |
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.control.explorer.policies import VMControlPolicy
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.markers.env_markers.provider import ONE_PER_TYPE
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.conf import credentials
from cfme.utils.update import update
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.ignore_stream("upstream"),
pytest.mark.long_running,
pytest.mark.provider([VMwareProvider], selector=ONE_PER_TYPE, scope="module"),
test_requirements.ansible,
]
@pytest.fixture(scope="function")
def ansible_action(appliance, ansible_catalog_item):
action_collection = appliance.collections.actions
action = action_collection.create(
fauxfactory.gen_alphanumeric(15, start="action_"),
action_type="Run Ansible Playbook",
action_values={
"run_ansible_playbook": {"playbook_catalog_item": ansible_catalog_item.name}
},
)
yield action
action.delete_if_exists()
@pytest.fixture(scope="function")
def policy_for_testing(appliance, create_vm, provider, ansible_action):
policy = appliance.collections.policies.create(
VMControlPolicy,
fauxfactory.gen_alpha(15, start="policy_"),
scope=f"fill_field(VM and Instance : Name, INCLUDES, {create_vm.name})",
)
policy.assign_actions_to_event("Tag Complete", [ansible_action.description])
policy_profile = appliance.collections.policy_profiles.create(
fauxfactory.gen_alpha(15, start="profile_"), policies=[policy]
)
provider.assign_policy_profiles(policy_profile.description)
yield
if policy.exists:
policy.unassign_events("Tag Complete")
provider.unassign_policy_profiles(policy_profile.description)
policy_profile.delete()
policy.delete()
@pytest.fixture(scope="module")
def ansible_credential(wait_for_ansible, appliance, full_template_modscope):
credential = appliance.collections.ansible_credentials.create(
fauxfactory.gen_alpha(start="cred_"),
"Machine",
username=credentials[full_template_modscope.creds]["username"],
password=credentials[full_template_modscope.creds]["password"],
)
yield credential
credential.delete_if_exists()
@pytest.mark.tier(3)
@pytest.mark.parametrize("create_vm", ["full_template"], indirect=True)
def test_action_run_ansible_playbook_localhost(
request,
ansible_catalog_item,
ansible_action,
policy_for_testing,
create_vm,
ansible_credential,
ansible_service_request,
ansible_service,
appliance,
):
"""Tests a policy with ansible playbook action against localhost.
Polarion:
assignee: gtalreja
initialEstimate: 1/6h
casecomponent: Ansible
"""
with update(ansible_action):
ansible_action.run_ansible_playbook = {"inventory": {"localhost": True}}
create_vm.add_tag()
wait_for(ansible_service_request.exists, num_sec=600)
ansible_service_request.wait_for_request()
view = navigate_to(ansible_service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == "localhost"
assert view.provisioning.results.get_text_of("Status") == "Finished"
@pytest.mark.meta(blockers=[BZ(1822533, forced_streams=["5.11", "5.10"])])
@pytest.mark.tier(3)
@pytest.mark.parametrize("create_vm", ["full_template"], indirect=True)
def test_action_run_ansible_playbook_manual_address(
request,
ansible_catalog_item,
ansible_action,
policy_for_testing,
create_vm,
ansible_credential,
ansible_service_request,
ansible_service,
appliance,
):
"""Tests a policy with ansible playbook action against manual address.
Polarion:
assignee: gtalreja
initialEstimate: 1/6h
casecomponent: Ansible
"""
with update(ansible_catalog_item):
ansible_catalog_item.provisioning = {"machine_credential": ansible_credential.name}
with update(ansible_action):
ansible_action.run_ansible_playbook = {
"inventory": {"specific_hosts": True, "hosts": create_vm.ip_address}
}
create_vm.add_tag()
wait_for(ansible_service_request.exists, num_sec=600)
ansible_service_request.wait_for_request()
view = navigate_to(ansible_service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == create_vm.ip_address
assert view.provisioning.results.get_text_of("Status") == "Finished"
@pytest.mark.meta(blockers=[BZ(1822533, forced_streams=["5.11", "5.10"])])
@pytest.mark.tier(3)
@pytest.mark.parametrize("create_vm", ["full_template"], indirect=True)
def test_action_run_ansible_playbook_target_machine(
request,
ansible_catalog_item,
ansible_action,
policy_for_testing,
create_vm,
ansible_credential,
ansible_service_request,
ansible_service,
appliance,
):
"""Tests a policy with ansible playbook action against target machine.
Polarion:
assignee: gtalreja
initialEstimate: 1/6h
casecomponent: Ansible
"""
with update(ansible_action):
ansible_action.run_ansible_playbook = {"inventory": {"target_machine": True}}
create_vm.add_tag()
wait_for(ansible_service_request.exists, num_sec=600)
ansible_service_request.wait_for_request()
view = navigate_to(ansible_service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == create_vm.ip_address
assert view.provisioning.results.get_text_of("Status") == "Finished"
@pytest.mark.tier(3)
@pytest.mark.parametrize("create_vm", ["full_template"], indirect=True)
def test_action_run_ansible_playbook_unavailable_address(
request,
ansible_catalog_item,
create_vm,
ansible_action,
policy_for_testing,
ansible_credential,
ansible_service_request,
ansible_service,
appliance,
):
"""Tests a policy with ansible playbook action against unavailable address.
Polarion:
assignee: gtalreja
initialEstimate: 1/6h
casecomponent: Ansible
"""
with update(ansible_catalog_item):
ansible_catalog_item.provisioning = {"machine_credential": ansible_credential.name}
with update(ansible_action):
ansible_action.run_ansible_playbook = {
"inventory": {"specific_hosts": True, "hosts": "unavailable_address"}
}
create_vm.add_tag()
wait_for(ansible_service_request.exists, num_sec=600)
ansible_service_request.wait_for_request()
view = navigate_to(ansible_service, "Details")
assert view.provisioning.details.get_text_of("Hosts") == "unavailable_address"
assert view.provisioning.results.get_text_of("Status") == "Finished"
| nachandr/cfme_tests | cfme/tests/ansible/test_embedded_ansible_actions.py | Python | gpl-2.0 | 6,825 |
""" This module provides access to higher level functions and
constants for ieee special values such as Not a Number (nan) and
infinity (inf).
>>> from numarray import *
The special values are designated using lower case as follows:
>> inf
inf
>> plus_inf
inf
>> minus_inf
-inf
>> nan
nan
>> plus_zero
0.0
>> minus_zero
-0.0
Note that the representation of IEEE special values is platform
dependent so your Python might, for instance, say 'Infinity' rather
than 'inf'. Below, inf is seen to arise as the result of floating
point division by 0 and nan is seen to arise from 0 divided by 0:
>>> a = arange(2.0)
>>> b = a/0
Warning: Encountered invalid numeric result(s) in divide
Warning: Encountered divide by zero(s) in divide
Here are the results for linux, but the repr problem causes this
example to fail for windows:
>> b
array([ nan, inf])
A curious property of nan is that it does not compare to *itself* as
equal (results also from linux):
>> b == nan
array([0, 0], type=Bool)
The isnan(), isinf(), and isfinite() functions return boolean arrays
which have the value True where the corresponding predicate holds.
These functions detect bit ranges and are therefore more robust than
simple equality checks.
>>> isnan(b)
array([1, 0], type=Bool)
>>> isinf(b)
array([0, 1], type=Bool)
>>> isfinite(b)
array([0, 0], type=Bool)
Array based indexing provides a convenient way to replace special values:
>>> b[isnan(b)] = 999
>>> b[isinf(b)] = 5
>>> b
array([ 999., 5.])
Here's an easy approach for compressing your data arrays to remove
NaNs:
>>> x, y = arange(10.), arange(10.); x[5] = nan; y[6] = nan;
>>> keep = ~isnan(x) & ~isnan(y)
>>> x[keep]
array([ 0., 1., 2., 3., 4., 7., 8., 9.])
>>> y[keep]
array([ 0., 1., 2., 3., 4., 7., 8., 9.])
=======================================================================
# >>> inf # the repr() of inf may vary from platform to platform
# inf
# >>> nan # the repr() of nan may vary from platform to platform
# nan
# Create a couple inf values in 4,4 array
>>> a=arange(16.0, shape=(4,4))
>>> a[2,3] = 0.0
>>> b = 1/a
Warning: Encountered divide by zero(s) in divide
# Locate the positions of the inf values
>>> getinf(b)
(array([0, 2]), array([0, 3]))
# Change the inf values to something else
>>> isinf(b)
array([[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]], type=Bool)
>>> isinf(inf)
1
>>> isinf(1)
0
>>> isinf(nan)
0
>>> isfinite(inf)
0
>>> isfinite(1)
1
>>> isfinite(nan)
0
>>> isnan(inf)
0
>>> isnan(1)
0
>>> isnan(nan)
1
>>> isfinite(b)
array([[0, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]], type=Bool)
>>> a[getinf(b)] = 999
>>> a
array([[ 999., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 999.],
[ 12., 13., 14., 15.]])
# Set a bunch of locations to a special value
>>> a[0,1] = nan; a[1,2] = nan; a[2,3] = nan
>>> getnan(a)
(array([0, 1, 2]), array([1, 2, 3]))
IEEE Special Value support 32-bit
>>> import ieeespecial
>>> a = arange(5.0, type=Float32)
>>> b = (a*a)/a
Warning: Encountered invalid numeric result(s) in divide
>>> ieeemask(b, NAN)
array([1, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, NUMBER)
array([0, 1, 1, 1, 1], type=Bool)
>>> index(b, NAN)
(array([0]),)
>>> getnan(b)
(array([0]),)
>>> setnan(b, 42.0)
>>> b[0]
42.0
>>> a = arange(1.0, 6.0, type=Float32)
>>> b = a/zeros((5,), type=Float32)
Warning: Encountered divide by zero(s) in divide
>>> ieeemask(b, POS_INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, NEG_INFINITY)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> b = (-a)/zeros((5,), type=Float32)
Warning: Encountered divide by zero(s) in divide
>>> ieeemask(b, POS_INFINITY)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, NEG_INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, NUMBER)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(array([0], type=Float32), POS_ZERO)
array([1], type=Bool)
>>> ieeemask(array([0], type=Float32), NEG_ZERO)
array([0], type=Bool)
>>> ieeemask(array([0], type=Float32), ZERO)
array([1], type=Bool)
>>> neginf = (array([-1],type=Float32)/array([0], type=Float32))
Warning: Encountered divide by zero(s) in divide
>>> negzero = array([1], type=Float32)/neginf
>>> ieeemask(negzero, POS_ZERO)
array([0], type=Bool)
>>> ieeemask(negzero, NEG_ZERO)
array([1], type=Bool)
>>> ieeemask(array([-0], type=Float32), ZERO)
array([1], type=Bool)
IEEE Special Value support 64-bit
>>> import ieeespecial
>>> a = arange(5.0, type=Float64)
>>> b = (a*a)/a
Warning: Encountered invalid numeric result(s) in divide
>>> ieeemask(b, NAN)
array([1, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, NUMBER)
array([0, 1, 1, 1, 1], type=Bool)
>>> index(b, NAN)
(array([0]),)
>>> getnan(b)
(array([0]),)
>>> setnan(b, 42.0)
>>> b[0]
42.0
>>> a = arange(1.0, 6.0, type=Float64)
>>> b = a/zeros((5,), type=Float64)
Warning: Encountered divide by zero(s) in divide
>>> ieeemask(b, POS_INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, NEG_INFINITY)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> b = (-a)/zeros((5,), type=Float64)
Warning: Encountered divide by zero(s) in divide
>>> ieeemask(b, POS_INFINITY)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(b, NEG_INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, INFINITY)
array([1, 1, 1, 1, 1], type=Bool)
>>> ieeemask(b, NUMBER)
array([0, 0, 0, 0, 0], type=Bool)
>>> ieeemask(array([0], type=Float64), POS_ZERO)
array([1], type=Bool)
>>> ieeemask(array([0], type=Float64), NEG_ZERO)
array([0], type=Bool)
>>> ieeemask(array([0], type=Float64), ZERO)
array([1], type=Bool)
>>> neginf = (array([-1],type=Float64)/array([0], type=Float64))
Warning: Encountered divide by zero(s) in divide
>>> negzero = array([1], type=Float64)/neginf
>>> ieeemask(negzero, POS_ZERO)
array([0], type=Bool)
>>> ieeemask(negzero, NEG_ZERO)
array([1], type=Bool)
>>> ieeemask(array([-0], type=Float64), ZERO)
array([1], type=Bool)
"""
import numarrayall as _na
from numarray.ufunc import isnan
# Define *ieee special values*
_na.Error.pushMode(all="ignore")
plus_inf = inf = (_na.array(1.0)/_na.array(0.0))[()]
minus_inf = (_na.array(-1.0)/_na.array(0.0))[()]
nan = (_na.array(0.0)/_na.array(0.0))[()]
plus_zero = zero = 0.0
minus_zero = (_na.array(-1.0)*0.0)[()]
_na.Error.popMode()
# Define *mask condition bits*
class _IeeeMaskBit(_na.NumArray):
pass
def _BIT(x):
a = _na.array((1 << x), type=_na.Int32)
a.__class__ = _IeeeMaskBit
return a
POS_QUIET_NAN = _BIT(0)
NEG_QUIET_NAN = _BIT(1)
POS_SIGNAL_NAN = _BIT(2)
NEG_SIGNAL_NAN = _BIT(3)
POS_INFINITY = _BIT(4)
NEG_INFINITY = _BIT(5)
POS_DENORMALIZED = _BIT(6)
NEG_DENORMALIZED = _BIT(7)
POS_NORMALIZED = _BIT(8)
NEG_NORMALIZED = _BIT(9)
POS_ZERO = _BIT(10)
NEG_ZERO = _BIT(11)
INDETERM = _BIT(12)
BUG = _BIT(15)
NAN = POS_QUIET_NAN | NEG_QUIET_NAN | POS_SIGNAL_NAN | NEG_SIGNAL_NAN | INDETERM
INFINITY = POS_INFINITY | NEG_INFINITY
SPECIAL = NAN | INFINITY
NORMALIZED = POS_NORMALIZED | NEG_NORMALIZED
DENORMALIZED = POS_DENORMALIZED | NEG_DENORMALIZED
ZERO = POS_ZERO | NEG_ZERO
NUMBER = NORMALIZED | DENORMALIZED | ZERO
FINITE = NUMBER
def mask(a, m):
"""mask(a, m) returns the values of 'a' satisfying category 'm'.
mask does a parallel check for values which are not classifyable
by the categorization code, raising a RuntimeError exception if
any are found.
"""
a = _na.asarray(a)
if isinstance(a.type(), _na.IntegralType):
a = a.astype('Float64')
if isinstance(a.type(), _na.ComplexType):
f = _na.ieeemask(a.real, m) | _na.ieeemask(a.imag, m)
g = _na.ieeemask(a.real, BUG) | _na.ieeemask(a.imag, BUG)
else:
f = _na.ieeemask(a, m)
g = _na.ieeemask(a, BUG)
if _na.bitwise_or.reduce(_na.ravel(g)) != 0:
raise RuntimeError("Unclassifyable floating point values.")
if f.rank == 0:
f = f[()]
return f
def index(a, msk):
"""index returns the tuple of indices where the values satisfy 'mask'"""
return _na.nonzero(mask(a, msk))
def getinf(a):
"""getinf returns a tuple of indices of 'a' where the values are infinite."""
return index(a, INFINITY)
def setinf(a, value):
"""setinf sets elements of 'a' which are infinite to 'value' instead.
DEPRECATED: use 'a[getinf(a)] = value' instead.
"""
_na.put(a, getinf(a), value)
def isinf(a):
"""Idenitfies elements of 'a' which are infinity.
"""
return mask(a, INFINITY)
def getposinf(a):
"""getposinf returns a tuple of indices of 'a' where the values are +inf."""
return index(a, POS_INFINITY)
def getneginf(a):
"""getneginf returns a tuple of indices of 'a' where the values are -inf."""
return index(a, NEG_INFINITY)
def getnan(a):
"""getnan returns a tuple of indices of 'a' where the values are not-a-numbers"""
return _na.nonzero(isnan(a))
def setnan(a, value):
"""setnan sets elements of 'a' which are NANs to 'value' instead.
DEPRECATED: use 'a[getnan(a)] = value' instead.
"""
a[isnan(a)]= value
#def isnan(a):
# """Idenitfies elements of 'a' which are NANs, not a number.
# """
# return _na.isnan(a)
#
# This function has been replaced by isnan macro added to the numarray.ufunc module.
def isfinite(a):
"""Identifies elements of an array which are neither nan nor infinity."""
return _na.logical_not(isinf(a)| isnan(a))
def getbug(a):
"""getbug returns a tuple of indices of 'a' where the values are not classifyable."""
return index(a, BUG)
def test():
import doctest, ieeespecial
return doctest.testmod(ieeespecial)
| fxia22/ASM_xf | PythonD/site_python/numarray/ieeespecial.py | Python | gpl-2.0 | 9,913 |
#
# Copyright 2011-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
hooking - various stuff useful when writing vdsm hooks
A vm hook expects domain xml in a file named by an environment variable called
_hook_domxml. The hook may change the xml, but the "china store rule" applies -
if you break something, you own it.
before_migration_destination hook receives the xml of the domain from the
source host. The xml of the domain at the destination will differ in various
details.
Return codes:
0 - the hook ended successfully.
1 - the hook failed, other hooks should be processed.
2 - the hook failed, no further hooks should be processed.
>2 - reserved
"""
from __future__ import absolute_import
from __future__ import division
import io
import json
import os
import sys
from xml.dom import minidom
from vdsm.common import hooks
from vdsm.common.commands import execCmd
from vdsm.common.conv import tobool
# make pyflakes happy
execCmd
tobool
def read_domxml():
with io.open(os.environ['_hook_domxml'], 'rb') as f:
return minidom.parseString(f.read().decode('utf-8'))
def write_domxml(domxml):
with io.open(os.environ['_hook_domxml'], 'wb') as f:
f.write(domxml.toxml(encoding='utf-8'))
def read_json():
with open(os.environ['_hook_json']) as f:
return json.loads(f.read())
def write_json(data):
with open(os.environ['_hook_json'], 'w') as f:
f.write(json.dumps(data))
def log(message):
sys.stderr.write(message + '\n')
def exit_hook(message, return_code=2):
"""
Exit the hook with a given message, which will be printed to the standard
error stream. A newline will be printed at the end.
The default return code is 2 for signaling that an error occurred.
"""
sys.stderr.write(message + "\n")
sys.exit(return_code)
def load_vm_launch_flags_from_file(vm_id):
return hooks.load_vm_launch_flags_from_file(vm_id)
def dump_vm_launch_flags_to_file(vm_id, flags):
hooks.dump_vm_launch_flags_to_file(vm_id, flags)
| nirs/vdsm | lib/vdsm/hook/hooking.py | Python | gpl-2.0 | 2,771 |
import itertools
def split_list_by(l, n):
for i in range(len(l)/n + 1):
p = l[i*n:(i+1)*n]
if p:
yield p
class CountingIterator(object):
def __init__(self, iterator):
self.iterator = iterator
self.count = 0
def __iter__(self):
return self
def next(self):
next = self.iterator.next()
self.count += 1
return next
def isplit_list_by(il, n):
"""
returns iterator of iterator2
each iterator2 will return n element of il
"""
il = iter(il) # to forget about length
while True:
p = CountingIterator(itertools.islice(il, n))
yield p
if p.count < n:
return
if __name__ == '__main__':
for x in isplit_list_by(xrange(400), 30):
print "------------"
for z in x:
print repr(z),
print
| onoga/toolib | toolib/util/split_list.py | Python | gpl-2.0 | 731 |
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from dialogBase import GdalToolsBaseDialog as BaseDialog
import GdalTools_utils as Utils
class GdalToolsBasePluginWidget:
def __init__(self, iface, commandName, helpFileBaseName = None, parent = None):
self.base = BaseDialog(parent, iface, self, self.windowTitle(), commandName)
self.connect(self.base, SIGNAL("processError(QProcess::ProcessError)"), self.onError)
self.connect(self.base, SIGNAL("processFinished(int, QProcess::ExitStatus)"), self.onFinished)
self.connect(self.base, SIGNAL("okClicked()"), self.onRun)
self.connect(self.base, SIGNAL("closeClicked()"), self.onClosing)
self.connect(self.base, SIGNAL("helpClicked()"), self.onHelp)
self.connect(self.base, SIGNAL("finished(bool)"), self.finished)
def someValueChanged(self):
self.emit(SIGNAL("valuesChanged(const QStringList &)"), self.getArguments())
def exec_(self):
self.someValueChanged()
return self.base.exec_()
def show_(self):
self.someValueChanged()
return self.base.show()
def setCommandViewerEnabled(self, enable):
self.base.setCommandViewerEnabled(enable)
self.someValueChanged()
def onRun(self):
self.base.onRun()
def onClosing(self):
self.base.onClosing()
def onHelp(self):
self.base.onHelp()
def onFinished(self, exitCode, status):
self.base.onFinished(exitCode, status)
def onError(self, error):
self.base.onError(error)
def getArguments(self):
pass
def getInputFileName(self):
pass
def getOutputFileName(self):
pass
def addLayerIntoCanvas(self, fileInfo):
pass
def finished(self, load):
outFn = self.getOutputFileName()
if outFn == None:
return
outFn = QString(outFn)
if outFn.isEmpty():
QMessageBox.warning(self, self.tr( "Warning" ), self.tr( "No output file created." ) )
return
fileInfo = QFileInfo(outFn)
if fileInfo.exists():
if load:
self.addLayerIntoCanvas(fileInfo)
QMessageBox.information(self, self.tr( "Finished" ), self.tr( "Elaboration completed." ) )
else:
QMessageBox.warning(self, self.tr( "Warning" ), self.tr( "%1 not created." ).arg( outFn ) )
# This method is useful to set up options for the command. It sets for each passed widget:
# 1. its passed signals to connect to the BasePluginWidget.someValueChanged() slot,
# 2. its enabler checkbox or enabled status,
# 3. its status as visible (hide) if the installed gdal version is greater or equal (lesser) then the passed version
#
# wdgts_sgnls_chk_ver_list: list of wdgts_sgnls_chk_ver
# wdgts_sgnls_chk_ver: tuple containing widgets, signals, enabler checkbox or enabled status, required version
def setParamsStatus(self, wdgts_sgnls_chk_ver_list):
if isinstance(wdgts_sgnls_chk_ver_list, list):
for wdgts_sgnls_chk_ver in wdgts_sgnls_chk_ver_list:
self.setParamsStatus(wdgts_sgnls_chk_ver)
return
wdgts_sgnls_chk_ver = wdgts_sgnls_chk_ver_list
if not isinstance(wdgts_sgnls_chk_ver, tuple):
return
if len(wdgts_sgnls_chk_ver) > 0:
wdgts = wdgts_sgnls_chk_ver[0]
else:
wdgts = None
if len(wdgts_sgnls_chk_ver) > 1:
sgnls = wdgts_sgnls_chk_ver[1]
else:
sgnls = None
if len(wdgts_sgnls_chk_ver) > 2:
chk = wdgts_sgnls_chk_ver[2]
else:
chk = None
if len(wdgts_sgnls_chk_ver) > 3:
ver = wdgts_sgnls_chk_ver[3]
else:
ver = None
if isinstance(wdgts, list):
for wdgt in wdgts:
self.setParamsStatus((wdgt, sgnls, chk, ver))
return
wdgt = wdgts
if not isinstance(wdgt, QWidget):
return
# if check version fails, disable the widget then hide both it and its enabler checkbox
if ver != None:
if not isinstance(ver, Utils.Version):
ver = Utils.Version(ver)
gdalVer = Utils.GdalConfig.version()
if gdalVer != None and ver > gdalVer:
wdgt.setVisible(False)
if isinstance(chk, QWidget):
chk.setVisible(False)
chk.setChecked(False)
sgnls = None
chk = False
# connects the passed signals to the BasePluginWidget.someValueChanged slot
if isinstance(sgnls, list):
for sgnl in sgnls:
self.setParamsStatus((wdgt, sgnl, chk))
return
sgnl = sgnls
if sgnl != None:
self.connect(wdgt, sgnl, self.someValueChanged)
# set the passed checkbox as widget enabler
if isinstance(chk, bool):
wdgt.setEnabled(chk)
if ( isinstance(chk, QAbstractButton) or isinstance(chk, QGroupBox) ) and \
chk.isCheckable():
wdgt.setEnabled(chk.isChecked())
self.connect(chk, SIGNAL("toggled(bool)"), wdgt.setEnabled)
self.connect(chk, SIGNAL("toggled(bool)"), self.someValueChanged)
| sourcepole/qgis | qgis/python/plugins/GdalTools/tools/widgetPluginBase.py | Python | gpl-2.0 | 5,083 |
#!/usr/bin/python
"""Joins a number of 3D manual markers into a 4D marker volume."""
# build-in modules
import argparse
import logging
import re
# third-party modules
import scipy
# path changes
# own modules
from medpy.core import Logger
from medpy.io import load, save
from medpy.core.exceptions import ArgumentError
from medpy.filter import relabel_non_zero
# information
__author__ = "Oskar Maier"
__version__ = "d0.1.0, 2012-06-13"
__email__ = "[email protected]"
__status__ = "Development"
__description__ = """
Prepares one or more 4D from a number of 3D manual marker volumes. The supplied volumes
have to follow a special naming convention. Additionally requires the original 4D image.
Images in 4D can not me visualized very well for the creation of manual markers. This
script and its counterpart allow to de-construct a 4D volume in various ways and to
afterwards combine the created marker volumes easily. Just select one of the following
modi, create markers for the resulting volumes and then join the markers together.
This script supports the combination of all manual marker volumes that follow the naming
convention. See the counterpart of this script, "Split for manual markers", for some more
remarks on this subject.
Some remarks on the manual markers:
The supplied marker file has to contain two or more markers, which all must have indices
between 1 and 9 (higher are ignored). If two markers could be found, the one with the
lowest index is treated as foreground (FG) and the other one as background (BG).
Upon the existence of more markers, all but the lone with the highest index are treated
as FG of an distinct object. For each of these objects a 4D marker volume is created,
whereas the associated marker index is treated as FG and all others joined together into
the BG marker.
In the resulting files the index 1 will always represent the FG and the index 2 the BG.
"""
# code
def main():
args = getArguments(getParser())
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# load original example volume
original_data, original_header = load(args.original)
# prepare execution
result_data = scipy.zeros(original_data.shape, scipy.uint8)
del original_data
# First step: Combine all marker images
basename_old = False
# iterate over marker images
for marker_image in args.input:
# extract information from filename and prepare slicr object
basename, slice_dimension, slice_number = re.match(r'.*m(.*)_d([0-9])_s([0-9]{4}).*', marker_image).groups()
slice_dimension = int(slice_dimension)
slice_number = int(slice_number)
# check basenames
if basename_old and not basename_old == basename:
logger.warning('The marker seem to come from different sources. Encountered basenames {} and {}. Continuing anyway.'.format(basename, basename_old))
basename_old = basename
# prepare slicer
slicer = [slice(None)] * result_data.ndim
slicer[slice_dimension] = slice_number
# load marker image
marker_data, _ = load(marker_image)
# add to marker image ONLY where this is zero!
result_data_subvolume = result_data[slicer]
mask_array = result_data_subvolume == 0
result_data_subvolume[mask_array] = marker_data[mask_array]
if not 0 == len(marker_data[~mask_array].nonzero()[0]):
logger.warning('The mask volume {} showed some intersection with previous mask volumes. Up to {} marker voxels might be lost.'.format(marker_image, len(marker_data[~mask_array].nonzero()[0])))
# Second step: Normalize and determine type of markers
result_data[result_data >= 10] = 0 # remove markers with indices higher than 10
#result_data = relabel_non_zero(result_data) # relabel starting from 1, 0's are kept where encountered
marker_count = len(scipy.unique(result_data))
if 3 > marker_count: # less than two markers
raise ArgumentError('A minimum of two markers must be contained in the conjunction of all markers files (excluding the neutral markers of index 0).')
# assuming here that 1 == inner marker, 2 = border marker and 3 = background marker
inner_name = args.output.format('i')
inner_data = scipy.zeros_like(result_data)
inner_data[result_data == 1] = 1
inner_data[result_data == 2] = 2
inner_data[result_data == 3] = 2
save(inner_data, inner_name, original_header, args.force)
outer_name = args.output.format('o')
outer_data = scipy.zeros_like(result_data)
outer_data[result_data == 1] = 1
outer_data[result_data == 2] = 1
outer_data[result_data == 3] = 2
save(outer_data, outer_name, original_header, args.force)
# for marker in scipy.unique(result_data)[1:-1]: # first is neutral marker (0) and last overall background marker
# output = args.output.format(marker)
# _data = scipy.zeros_like(result_data)
# _data += 2 # set all as BG markers
# _data[result_data == marker] = 1
# _data[result_data == 0] = 0
# save(_data, output, original_header, args.force)
logger.info("Successfully terminated.")
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
args = parser.parse_args()
if not '{}' in args.output:
raise ArgumentError(args.output, 'The output argument string must contain the sequence "{}".')
return args
def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(description=__description__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('original', help='Original volume.')
parser.add_argument('output', help='Target volume(s). Has to include the sequence "{}" in the place where the marker number should be placed.')
parser.add_argument('input', nargs='+', help='The manual marker volumes to combine.')
parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')
return parser
if __name__ == "__main__":
main()
| kleinfeld/medpy | bin/others/miccai12/join_for_manual_markers.py | Python | gpl-3.0 | 6,830 |
#!/usr/bin/env python
# encoding: utf-8
# andersg at 0x63.nu 2007
# Thomas Nagy 2010 (ita)
"""
Support for Perl extensions. A C/C++ compiler is required::
def options(opt):
opt.load('compiler_c perl')
def configure(conf):
conf.load('compiler_c perl')
conf.check_perl_version((5,6,0))
conf.check_perl_ext_devel()
conf.check_perl_module('Cairo')
conf.check_perl_module('Devel::PPPort 4.89')
def build(bld):
bld(
features = 'c cshlib perlext',
source = 'Mytest.xs',
target = 'Mytest',
install_path = '${ARCHDIR_PERL}/auto')
bld.install_files('${ARCHDIR_PERL}', 'Mytest.pm')
"""
import os
from waflib import Task, Options, Utils
from waflib.Configure import conf
from waflib.TaskGen import extension, feature, before_method
@before_method('apply_incpaths', 'apply_link', 'propagate_uselib_vars')
@feature('perlext')
def init_perlext(self):
"""
Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the
*lib* prefix from library names.
"""
self.uselib = self.to_list(getattr(self, 'uselib', []))
if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT')
self.env['cshlib_PATTERN'] = self.env['cxxshlib_PATTERN'] = self.env['perlext_PATTERN']
@extension('.xs')
def xsubpp_file(self, node):
"""
Create :py:class:`waflib.Tools.perl.xsubpp` tasks to process *.xs* files
"""
outnode = node.change_ext('.c')
self.create_task('xsubpp', node, outnode)
self.source.append(outnode)
class xsubpp(Task.Task):
"""
Process *.xs* files
"""
run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}'
color = 'BLUE'
ext_out = ['.h']
@conf
def check_perl_version(self, minver=None):
"""
Check if Perl is installed, and set the variable PERL.
minver is supposed to be a tuple
"""
res = True
if minver:
cver = '.'.join(map(str,minver))
else:
cver = ''
self.start_msg('Checking for minimum perl version %s' % cver)
perl = getattr(Options.options, 'perlbinary', None)
if not perl:
perl = self.find_program('perl', var='PERL')
if not perl:
self.end_msg("Perl not found", color="YELLOW")
return False
self.env['PERL'] = perl
version = self.cmd_and_log(self.env.PERL + ["-e", 'printf \"%vd\", $^V'])
if not version:
res = False
version = "Unknown"
elif not minver is None:
ver = tuple(map(int, version.split(".")))
if ver < minver:
res = False
self.end_msg(version, color=res and "GREEN" or "YELLOW")
return res
@conf
def check_perl_module(self, module):
"""
Check if specified perlmodule is installed.
The minimum version can be specified by specifying it after modulename
like this::
def configure(conf):
conf.check_perl_module("Some::Module 2.92")
"""
cmd = self.env.PERL + ['-e', 'use %s' % module]
self.start_msg('perl module %s' % module)
try:
r = self.cmd_and_log(cmd)
except Exception:
self.end_msg(False)
return None
self.end_msg(r or True)
return r
@conf
def check_perl_ext_devel(self):
"""
Check for configuration needed to build perl extensions.
Sets different xxx_PERLEXT variables in the environment.
Also sets the ARCHDIR_PERL variable useful as installation path,
which can be overridden by ``--with-perl-archdir`` option.
"""
env = self.env
perl = env.PERL
if not perl:
self.fatal('find perl first')
def cmd_perl_config(s):
return perl + ['-MConfig', '-e', 'print \"%s\"' % s]
def cfg_str(cfg):
return self.cmd_and_log(cmd_perl_config(cfg))
def cfg_lst(cfg):
return Utils.to_list(cfg_str(cfg))
env['LINKFLAGS_PERLEXT'] = cfg_lst('$Config{lddlflags}')
env['INCLUDES_PERLEXT'] = cfg_lst('$Config{archlib}/CORE')
env['CFLAGS_PERLEXT'] = cfg_lst('$Config{ccflags} $Config{cccdlflags}')
env['XSUBPP'] = cfg_lst('$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}')
env['EXTUTILS_TYPEMAP'] = cfg_lst('$Config{privlib}/ExtUtils/typemap')
if not getattr(Options.options, 'perlarchdir', None):
env['ARCHDIR_PERL'] = cfg_str('$Config{sitearch}')
else:
env['ARCHDIR_PERL'] = getattr(Options.options, 'perlarchdir')
env['perlext_PATTERN'] = '%s.' + cfg_str('$Config{dlext}')
def options(opt):
"""
Add the ``--with-perl-archdir`` and ``--with-perl-binary`` command-line options.
"""
opt.add_option('--with-perl-binary', type='string', dest='perlbinary', help = 'Specify alternate perl binary', default=None)
opt.add_option('--with-perl-archdir', type='string', dest='perlarchdir', help = 'Specify directory where to install arch specific files', default=None)
| AltruisticControlSystems/controlnode | waflib/Tools/perl.py | Python | gpl-3.0 | 4,480 |
"""
setup.py: Basic setup wizard steps
Copyright 2014-2015, Outernet Inc.
Some rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
import pytz
from bottle import request, redirect
from ..forms.auth import RegistrationForm
from ..forms.setup import SetupLanguageForm, SetupDateTimeForm
from ..lib import auth
from ..utils.lang import UI_LOCALES, set_default_locale
from ..utils.setup import setup_wizard
def is_language_invalid():
return request.app.setup.get('language') not in UI_LOCALES
@setup_wizard.register_step('language', template='setup/step_language.tpl',
method='GET', index=1, test=is_language_invalid)
def setup_language_form():
return dict(form=SetupLanguageForm())
@setup_wizard.register_step('language', template='setup/step_language.tpl',
method='POST', index=1, test=is_language_invalid)
def setup_language():
form = SetupLanguageForm(request.forms)
if not form.is_valid():
return dict(successful=False, form=form)
lang = form.processed_data['language']
request.app.setup.append({'language': lang})
set_default_locale(lang)
return dict(successful=True, language=lang)
def has_bad_tz():
return request.app.setup.get('timezone') not in pytz.common_timezones
@setup_wizard.register_step('datetime', template='setup/step_datetime.tpl',
method='GET', index=2, test=has_bad_tz)
def setup_datetime_form():
return dict(form=SetupDateTimeForm())
@setup_wizard.register_step('datetime', template='setup/step_datetime.tpl',
method='POST', index=2, test=has_bad_tz)
def setup_datetime():
form = SetupDateTimeForm(request.forms)
if not form.is_valid():
return dict(successful=False, form=form)
timezone = form.processed_data['timezone']
request.app.setup.append({'timezone': timezone})
return dict(successful=True, timezone=timezone)
def has_no_superuser():
db = request.db.sessions
query = db.Select(sets='users', where='is_superuser = ?')
db.query(query, True)
return db.result is None
@setup_wizard.register_step('superuser', template='setup/step_superuser.tpl',
method='GET', index=3, test=has_no_superuser)
def setup_superuser_form():
return dict(form=RegistrationForm())
@setup_wizard.register_step('superuser', template='setup/step_superuser.tpl',
method='POST', index=3, test=has_no_superuser)
def setup_superuser():
form = RegistrationForm(request.forms)
if not form.is_valid():
return dict(successful=False, form=form)
auth.create_user(form.processed_data['username'],
form.processed_data['password1'],
is_superuser=True,
db=request.db.sessions,
overwrite=True)
return dict(successful=True)
def exit_wizard():
next_path = request.params.get('next', '/')
request.app.setup.wizard.exit()
redirect(next_path)
def routes(app):
return (
('setup:main', setup_wizard, ['GET', 'POST'], '/setup/', {}),
('setup:exit', exit_wizard, ['GET'], '/setup/exit/', {}),
)
| karanisverma/feature_langpop | librarian/routes/setup.py | Python | gpl-3.0 | 3,332 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
# Register your models here.
| bioinformatics-ua/catalogue | emif/control_version/admin.py | Python | gpl-3.0 | 834 |
import unittest
from gtrackcore.input.wrappers.GEDependentAttributesHolder import GEDependentAttributesHolder
from gtrackcore.test.common.Asserts import assertBoundingRegions, TestCaseWithImprovedAsserts
from gtrackcore.util.CommonConstants import BINARY_MISSING_VAL
class TestGEDependentAttributesHolder(TestCaseWithImprovedAsserts):
def setUp(self):
pass
def _assertCounting(self, processedBRList, origBRTuples, origGEList):
assertBoundingRegions(GEDependentAttributesHolder, self.assertEqual, \
processedBRList, origBRTuples, origGEList)
def testCountElements(self):
self._assertCounting([], \
[], \
[])
self._assertCounting([['A', 'chr1', 0, 1000, 1]], \
[['A', 'chr1', 0, 1000, 1]], \
[['A', 'chr1', 10, 100]])
self._assertCounting([['A', 'chr1', 0, 1000, 2], ['A', 'chr2', 0, 1000, 1]], \
[['A', 'chr1', 0, 1000, 2], ['A', 'chr2', 0, 1000, 1]], \
[['A', 'chr1', 10, 100], ['A', 'chr1', 80, 120], ['A', 'chr2', 10, 100]])
self._assertCounting([['A', 'chr1', 0, 1000, 2], ['A', 'chr1', 1000, 2000, 0], ['A', 'chr2', 0, 1000, 1]], \
[['A', 'chr1', 0, 1000, 2], ['A', 'chr1', 1000, 2000, 0], ['A', 'chr2', 0, 1000, 1]], \
[['A', 'chr1', 10, 100], ['A', 'chr1', 80, 120], ['A', 'chr2', 10, 100]])
def runTest(self):
pass
self.testCountElements()
if __name__ == "__main__":
#TestGEDependentAttributesHolder().debug()
unittest.main() | sveinugu/gtrackcore | gtrackcore/test/input/wrappers/TestGEDependentAttributesHolder.py | Python | gpl-3.0 | 1,731 |
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import Quartz
from AppKit import NSEvent
from .base import PyMouseMeta, PyMouseEventMeta
pressID = [None, Quartz.kCGEventLeftMouseDown,
Quartz.kCGEventRightMouseDown, Quartz.kCGEventOtherMouseDown]
releaseID = [None, Quartz.kCGEventLeftMouseUp,
Quartz.kCGEventRightMouseUp, Quartz.kCGEventOtherMouseUp]
class PyMouse(PyMouseMeta):
def press(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
pressID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def release(self, x, y, button=1):
event = Quartz.CGEventCreateMouseEvent(None,
releaseID[button],
(x, y),
button - 1)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, event)
def move(self, x, y):
move = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventMouseMoved, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, move)
def drag(self, x, y):
drag = Quartz.CGEventCreateMouseEvent(None, Quartz.kCGEventLeftMouseDragged, (x, y), 0)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, drag)
def position(self):
loc = NSEvent.mouseLocation()
return loc.x, Quartz.CGDisplayPixelsHigh(0) - loc.y
def screen_size(self):
return Quartz.CGDisplayPixelsWide(0), Quartz.CGDisplayPixelsHigh(0)
def scroll(self, vertical=None, horizontal=None, depth=None):
#Local submethod for generating Mac scroll events in one axis at a time
def scroll_event(y_move=0, x_move=0, z_move=0, n=1):
for _ in range(abs(n)):
scrollWheelEvent = Quartz.CGEventCreateScrollWheelEvent(
None, # No source
Quartz.kCGScrollEventUnitLine, # Unit of measurement is lines
3, # Number of wheels(dimensions)
y_move,
x_move,
z_move)
Quartz.CGEventPost(Quartz.kCGHIDEventTap, scrollWheelEvent)
#Execute vertical then horizontal then depth scrolling events
if vertical is not None:
vertical = int(vertical)
if vertical == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll up if positive
scroll_event(y_move=1, n=vertical)
else: # Scroll down if negative
scroll_event(y_move=-1, n=abs(vertical))
if horizontal is not None:
horizontal = int(horizontal)
if horizontal == 0: # Do nothing with 0 distance
pass
elif horizontal > 0: # Scroll right if positive
scroll_event(x_move=1, n=horizontal)
else: # Scroll left if negative
scroll_event(x_move=-1, n=abs(horizontal))
if depth is not None:
depth = int(depth)
if depth == 0: # Do nothing with 0 distance
pass
elif vertical > 0: # Scroll "out" if positive
scroll_event(z_move=1, n=depth)
else: # Scroll "in" if negative
scroll_event(z_move=-1, n=abs(depth))
class PyMouseEvent(PyMouseEventMeta):
def run(self):
tap = Quartz.CGEventTapCreate(
Quartz.kCGSessionEventTap,
Quartz.kCGHeadInsertEventTap,
Quartz.kCGEventTapOptionDefault,
Quartz.CGEventMaskBit(Quartz.kCGEventMouseMoved) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventLeftMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventRightMouseUp) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseDown) |
Quartz.CGEventMaskBit(Quartz.kCGEventOtherMouseUp),
self.handler,
None)
loopsource = Quartz.CFMachPortCreateRunLoopSource(None, tap, 0)
loop = Quartz.CFRunLoopGetCurrent()
Quartz.CFRunLoopAddSource(loop, loopsource, Quartz.kCFRunLoopDefaultMode)
Quartz.CGEventTapEnable(tap, True)
while self.state:
Quartz.CFRunLoopRunInMode(Quartz.kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
(x, y) = Quartz.CGEventGetLocation(event)
if type in pressID:
self.click(x, y, pressID.index(type), True)
elif type in releaseID:
self.click(x, y, releaseID.index(type), False)
else:
self.move(x, y)
if self.capture:
Quartz.CGEventSetType(event, Quartz.kCGEventNull)
return event
| icomfred/mouse | app/python/pymouse/mac.py | Python | gpl-3.0 | 5,514 |
# -*- encoding: utf-8 -*-
def timespan_2_stops_during_timespan_1(
timespan_1=None,
timespan_2=None,
hold=False,
):
r'''Makes time relation indicating that `timespan_2` stops
during `timespan_1`.
::
>>> relation = timespantools.timespan_2_stops_during_timespan_1()
>>> print(format(relation))
timespantools.TimespanTimespanTimeRelation(
inequality=timespantools.CompoundInequality(
[
timespantools.SimpleInequality('timespan_1.start_offset < timespan_2.stop_offset'),
timespantools.SimpleInequality('timespan_2.stop_offset <= timespan_1.stop_offset'),
],
logical_operator='and',
),
)
Returns time relation or boolean.
'''
from abjad.tools import timespantools
inequality = timespantools.CompoundInequality([
'timespan_1.start_offset < timespan_2.stop_offset',
'timespan_2.stop_offset <= timespan_1.stop_offset'
])
time_relation = timespantools.TimespanTimespanTimeRelation(
inequality,
timespan_1=timespan_1,
timespan_2=timespan_2,
)
if time_relation.is_fully_loaded and not hold:
return time_relation()
else:
return time_relation
| mscuthbert/abjad | abjad/tools/timespantools/timespan_2_stops_during_timespan_1.py | Python | gpl-3.0 | 1,322 |
# -*- coding: utf-8 -*-
#
# gccjit documentation build configuration file, created by
# sphinx-quickstart on Wed May 7 17:57:19 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gccjit'
copyright = u'2014-2015, David Malcolm'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'gccjitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'gccjit.tex', u'gccjit Documentation',
u'David Malcolm', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gccjit', u'gccjit Documentation',
[u'David Malcolm'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'gccjit', u'gccjit Documentation',
u'David Malcolm', 'gccjit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| vickenty/pygccjit | doc/conf.py | Python | gpl-3.0 | 7,721 |
# Unix SMB/CIFS implementation.
# Copyright (C) Kai Blin <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import struct
import random
import socket
import samba.ndr as ndr
from samba import credentials, param
from samba.tests import TestCase
from samba.dcerpc import dns, dnsp, dnsserver
from samba.netcmd.dns import TXTRecord, dns_record_match, data_to_dns_record
from samba.tests.subunitrun import SubunitOptions, TestProgram
import samba.getopt as options
import optparse
parser = optparse.OptionParser("dns.py <server name> <server ip> [options]")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
# This timeout only has relevance when testing against Windows
# Format errors tend to return patchy responses, so a timeout is needed.
parser.add_option("--timeout", type="int", dest="timeout",
help="Specify timeout for DNS requests")
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
timeout = opts.timeout
if len(args) < 2:
parser.print_usage()
sys.exit(1)
server_name = args[0]
server_ip = args[1]
creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
def make_txt_record(records):
rdata_txt = dns.txt_record()
s_list = dnsp.string_list()
s_list.count = len(records)
s_list.str = records
rdata_txt.txt = s_list
return rdata_txt
class DNSTest(TestCase):
def setUp(self):
global server, server_ip, lp, creds
super(DNSTest, self).setUp()
self.server = server_name
self.server_ip = server_ip
self.lp = lp
self.creds = creds
def errstr(self, errcode):
"Return a readable error code"
string_codes = [
"OK",
"FORMERR",
"SERVFAIL",
"NXDOMAIN",
"NOTIMP",
"REFUSED",
"YXDOMAIN",
"YXRRSET",
"NXRRSET",
"NOTAUTH",
"NOTZONE",
]
return string_codes[errcode]
def assert_dns_rcode_equals(self, packet, rcode):
"Helper function to check return code"
p_errcode = packet.operation & 0x000F
self.assertEquals(p_errcode, rcode, "Expected RCODE %s, got %s" %
(self.errstr(rcode), self.errstr(p_errcode)))
def assert_dns_opcode_equals(self, packet, opcode):
"Helper function to check opcode"
p_opcode = packet.operation & 0x7800
self.assertEquals(p_opcode, opcode, "Expected OPCODE %s, got %s" %
(opcode, p_opcode))
def make_name_packet(self, opcode, qid=None):
"Helper creating a dns.name_packet"
p = dns.name_packet()
if qid is None:
p.id = random.randint(0x0, 0xffff)
p.operation = opcode
p.questions = []
return p
def finish_name_packet(self, packet, questions):
"Helper to finalize a dns.name_packet"
packet.qdcount = len(questions)
packet.questions = questions
def make_name_question(self, name, qtype, qclass):
"Helper creating a dns.name_question"
q = dns.name_question()
q.name = name
q.question_type = qtype
q.question_class = qclass
return q
def get_dns_domain(self):
"Helper to get dns domain"
return self.creds.get_realm().lower()
def dns_transaction_udp(self, packet, host=server_ip,
dump=False, timeout=timeout):
"send a DNS query and read the reply"
s = None
try:
send_packet = ndr.ndr_pack(packet)
if dump:
print self.hexdump(send_packet)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.settimeout(timeout)
s.connect((host, 53))
s.send(send_packet, 0)
recv_packet = s.recv(2048, 0)
if dump:
print self.hexdump(recv_packet)
return ndr.ndr_unpack(dns.name_packet, recv_packet)
finally:
if s is not None:
s.close()
def dns_transaction_tcp(self, packet, host=server_ip,
dump=False, timeout=timeout):
"send a DNS query and read the reply"
s = None
try:
send_packet = ndr.ndr_pack(packet)
if dump:
print self.hexdump(send_packet)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.settimeout(timeout)
s.connect((host, 53))
tcp_packet = struct.pack('!H', len(send_packet))
tcp_packet += send_packet
s.send(tcp_packet, 0)
recv_packet = s.recv(0xffff + 2, 0)
if dump:
print self.hexdump(recv_packet)
return ndr.ndr_unpack(dns.name_packet, recv_packet[2:])
finally:
if s is not None:
s.close()
def make_txt_update(self, prefix, txt_array):
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "%s.%s" % (prefix, self.get_dns_domain())
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = make_txt_record(txt_array)
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
return p
def check_query_txt(self, prefix, txt_array):
name = "%s.%s" % (prefix, self.get_dns_domain())
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata.txt.str, txt_array)
class TestSimpleQueries(DNSTest):
def test_one_a_query(self):
"create a query packet containing one query record"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
self.server_ip)
def test_one_a_query_tcp(self):
"create a query packet containing one query record via TCP"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_tcp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
self.server_ip)
def test_one_mx_query(self):
"create a query packet causing an empty RCODE_OK answer"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "invalid-%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
def test_two_queries(self):
"create a query packet containing two query records"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
questions.append(q)
name = "%s.%s" % ('bogusname', self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
try:
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
except socket.timeout:
# Windows chooses not to respond to incorrectly formatted queries.
# Although this appears to be non-deterministic even for the same
# request twice, it also appears to be based on a how poorly the
# request is formatted.
pass
def test_qtype_all_query(self):
"create a QTYPE_ALL query"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_ALL, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
num_answers = 1
dc_ipv6 = os.getenv('SERVER_IPV6')
if dc_ipv6 is not None:
num_answers += 1
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, num_answers)
self.assertEquals(response.answers[0].rdata,
self.server_ip)
if dc_ipv6 is not None:
self.assertEquals(response.answers[1].rdata, dc_ipv6)
def test_qclass_none_query(self):
"create a QCLASS_NONE query"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_ALL, dns.DNS_QCLASS_NONE)
questions.append(q)
self.finish_name_packet(p, questions)
try:
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
except socket.timeout:
# Windows chooses not to respond to incorrectly formatted queries.
# Although this appears to be non-deterministic even for the same
# request twice, it also appears to be based on a how poorly the
# request is formatted.
pass
# Only returns an authority section entry in BIND and Win DNS
# FIXME: Enable one Samba implements this feature
# def test_soa_hostname_query(self):
# "create a SOA query for a hostname"
# p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
# questions = []
#
# name = "%s.%s" % (os.getenv('SERVER'), self.get_dns_domain())
# q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
# questions.append(q)
#
# self.finish_name_packet(p, questions)
# response = self.dns_transaction_udp(p)
# self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
# # We don't get SOA records for single hosts
# self.assertEquals(response.ancount, 0)
def test_soa_domain_query(self):
"create a SOA query for a domain"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata.minimum, 3600)
class TestDNSUpdates(DNSTest):
def test_two_updates(self):
"create two update requests"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = "%s.%s" % (self.server, self.get_dns_domain())
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
updates.append(u)
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
try:
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
except socket.timeout:
# Windows chooses not to respond to incorrectly formatted queries.
# Although this appears to be non-deterministic even for the same
# request twice, it also appears to be based on a how poorly the
# request is formatted.
pass
def test_update_wrong_qclass(self):
"create update with DNS_QCLASS_NONE"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_NONE)
updates.append(u)
self.finish_name_packet(p, updates)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
def test_update_prereq_with_non_null_ttl(self):
"test update with a non-null TTL"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
prereqs = []
r = dns.res_rec()
r.name = "%s.%s" % (self.server, self.get_dns_domain())
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 1
r.length = 0
prereqs.append(r)
p.ancount = len(prereqs)
p.answers = prereqs
try:
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
except socket.timeout:
# Windows chooses not to respond to incorrectly formatted queries.
# Although this appears to be non-deterministic even for the same
# request twice, it also appears to be based on a how poorly the
# request is formatted.
pass
def test_update_prereq_with_non_null_length(self):
"test update with a non-null length"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
prereqs = []
r = dns.res_rec()
r.name = "%s.%s" % (self.server, self.get_dns_domain())
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_ANY
r.ttl = 0
r.length = 1
prereqs.append(r)
p.ancount = len(prereqs)
p.answers = prereqs
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXRRSET)
def test_update_prereq_nonexisting_name(self):
"test update with a nonexisting name"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
prereqs = []
r = dns.res_rec()
r.name = "idontexist.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_ANY
r.ttl = 0
r.length = 0
prereqs.append(r)
p.ancount = len(prereqs)
p.answers = prereqs
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXRRSET)
def test_update_add_txt_record(self):
"test adding records works"
prefix, txt = 'textrec', ['"This is a test"']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
def test_delete_record(self):
"Test if deleting records works"
NAME = "deleterec.%s" % self.get_dns_domain()
# First, create a record to make sure we have a record to delete.
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = make_txt_record(['"This is a test"'])
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now delete the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
rdata = make_txt_record(['"This is a test"'])
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# And finally check it's gone
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
def test_readd_record(self):
"Test if adding, deleting and then readding a records works"
NAME = "readdrec.%s" % self.get_dns_domain()
# Create the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = make_txt_record(['"This is a test"'])
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now delete the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
rdata = make_txt_record(['"This is a test"'])
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# check it's gone
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
# recreate the record
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = NAME
r.rr_type = dns.DNS_QTYPE_TXT
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = make_txt_record(['"This is a test"'])
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
# Now check the record is around
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def test_update_add_mx_record(self):
"test adding MX records works"
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_MX
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
rdata = dns.mx_record()
rdata.preference = 10
rdata.exchange = 'mail.%s' % self.get_dns_domain()
r.rdata = rdata
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assertEqual(response.ancount, 1)
ans = response.answers[0]
self.assertEqual(ans.rr_type, dns.DNS_QTYPE_MX)
self.assertEqual(ans.rdata.preference, 10)
self.assertEqual(ans.rdata.exchange, 'mail.%s' % self.get_dns_domain())
class TestComplexQueries(DNSTest):
def setUp(self):
super(TestComplexQueries, self).setUp()
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "cname_test.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_CNAME
r.rr_class = dns.DNS_QCLASS_IN
r.ttl = 900
r.length = 0xffff
r.rdata = "%s.%s" % (self.server, self.get_dns_domain())
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def tearDown(self):
super(TestComplexQueries, self).tearDown()
p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
updates = []
name = self.get_dns_domain()
u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
updates.append(u)
self.finish_name_packet(p, updates)
updates = []
r = dns.res_rec()
r.name = "cname_test.%s" % self.get_dns_domain()
r.rr_type = dns.DNS_QTYPE_CNAME
r.rr_class = dns.DNS_QCLASS_NONE
r.ttl = 0
r.length = 0xffff
r.rdata = "%s.%s" % (self.server, self.get_dns_domain())
updates.append(r)
p.nscount = len(updates)
p.nsrecs = updates
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
def test_one_a_query(self):
"create a query packet containing one query record"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "cname_test.%s" % self.get_dns_domain()
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 2)
self.assertEquals(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
self.assertEquals(response.answers[0].rdata, "%s.%s" %
(self.server, self.get_dns_domain()))
self.assertEquals(response.answers[1].rr_type, dns.DNS_QTYPE_A)
self.assertEquals(response.answers[1].rdata,
self.server_ip)
class TestInvalidQueries(DNSTest):
def test_one_a_query(self):
"send 0 bytes follows by create a query packet containing one query record"
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.connect((self.server_ip, 53))
s.send("", 0)
finally:
if s is not None:
s.close()
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % (self.server, self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rdata,
self.server_ip)
def test_one_a_reply(self):
"send a reply instead of a query"
global timeout
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
name = "%s.%s" % ('fakefakefake', self.get_dns_domain())
q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
print "asking for ", q.name
questions.append(q)
self.finish_name_packet(p, questions)
p.operation |= dns.DNS_FLAG_REPLY
s = None
try:
send_packet = ndr.ndr_pack(p)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.settimeout(timeout)
host=self.server_ip
s.connect((host, 53))
tcp_packet = struct.pack('!H', len(send_packet))
tcp_packet += send_packet
s.send(tcp_packet, 0)
recv_packet = s.recv(0xffff + 2, 0)
self.assertEquals(0, len(recv_packet))
except socket.timeout:
# Windows chooses not to respond to incorrectly formatted queries.
# Although this appears to be non-deterministic even for the same
# request twice, it also appears to be based on a how poorly the
# request is formatted.
pass
finally:
if s is not None:
s.close()
class TestZones(DNSTest):
def setUp(self):
super(TestZones, self).setUp()
self.zone = "test.lan"
self.rpc_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[sign]" % (self.server_ip),
self.lp, self.creds)
def tearDown(self):
super(TestZones, self).tearDown()
try:
self.delete_zone(self.zone)
except RuntimeError, (num, string):
if num != 9601: #WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST
raise
def create_zone(self, zone):
zone_create = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
zone_create.pszZoneName = zone
zone_create.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
zone_create.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_SECURE
zone_create.fAging = 0
zone_create.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
self.rpc_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
self.server_ip,
None,
0,
'ZoneCreate',
dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
zone_create)
def delete_zone(self, zone):
self.rpc_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0,
self.server_ip,
zone,
0,
'DeleteZoneFromDs',
dnsserver.DNSSRV_TYPEID_NULL,
None)
def test_soa_query(self):
zone = "test.lan"
p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
questions = []
q = self.make_name_question(zone, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
questions.append(q)
self.finish_name_packet(p, questions)
response = self.dns_transaction_udp(p)
# Windows returns OK while BIND logically seems to return NXDOMAIN
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
self.create_zone(zone)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 1)
self.assertEquals(response.answers[0].rr_type, dns.DNS_QTYPE_SOA)
self.delete_zone(zone)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
self.assertEquals(response.ancount, 0)
class TestRPCRoundtrip(DNSTest):
def setUp(self):
super(TestRPCRoundtrip, self).setUp()
self.rpc_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[sign]" % (self.server_ip),
self.lp, self.creds)
def tearDown(self):
super(TestRPCRoundtrip, self).tearDown()
def test_update_add_txt_rpc_to_dns(self):
prefix, txt = 'rpctextrec', ['"This is a test"']
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"\\"This is a test\\""')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
def test_update_add_null_padded_txt_record(self):
"test adding records works"
prefix, txt = 'pad1textrec', ['"This is a test"', '', '']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"\\"This is a test\\"" "" ""'))
prefix, txt = 'pad2textrec', ['"This is a test"', '', '', 'more text']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"\\"This is a test\\"" "" "" "more text"'))
prefix, txt = 'pad3textrec', ['', '', '"This is a test"']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"" "" "\\"This is a test\\""'))
def test_update_add_padding_rpc_to_dns(self):
prefix, txt = 'pad1textrec', ['"This is a test"', '', '']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"\\"This is a test\\"" "" ""')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
prefix, txt = 'pad2textrec', ['"This is a test"', '', '', 'more text']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"\\"This is a test\\"" "" "" "more text"')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
prefix, txt = 'pad3textrec', ['', '', '"This is a test"']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"" "" "\\"This is a test\\""')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
# Test is incomplete due to strlen against txt records
def test_update_add_null_char_txt_record(self):
"test adding records works"
prefix, txt = 'nulltextrec', ['NULL\x00BYTE']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, ['NULL'])
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"NULL"'))
prefix, txt = 'nulltextrec2', ['NULL\x00BYTE', 'NULL\x00BYTE']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, ['NULL', 'NULL'])
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"NULL" "NULL"'))
def test_update_add_null_char_rpc_to_dns(self):
prefix, txt = 'nulltextrec', ['NULL\x00BYTE']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"NULL"')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, ['NULL'])
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
def test_update_add_hex_char_txt_record(self):
"test adding records works"
prefix, txt = 'hextextrec', ['HIGH\xFFBYTE']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"HIGH\xFFBYTE"'))
def test_update_add_hex_rpc_to_dns(self):
prefix, txt = 'hextextrec', ['HIGH\xFFBYTE']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"HIGH\xFFBYTE"')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
def test_update_add_slash_txt_record(self):
"test adding records works"
prefix, txt = 'slashtextrec', ['Th\\=is=is a test']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"Th\\\\=is=is a test"'))
# This test fails against Windows as it eliminates slashes in RPC
# One typical use for a slash is in records like 'var=value' to
# escape '=' characters.
def test_update_add_slash_rpc_to_dns(self):
prefix, txt = 'slashtextrec', ['Th\\=is=is a test']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '"Th\\\\=is=is a test"')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
def test_update_add_two_txt_records(self):
"test adding two txt records works"
prefix, txt = 'textrec2', ['"This is a test"',
'"and this is a test, too"']
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, '"\\"This is a test\\""' +
' "\\"and this is a test, too\\""'))
def test_update_add_two_rpc_to_dns(self):
prefix, txt = 'textrec2', ['"This is a test"',
'"and this is a test, too"']
prefix = 'rpc' + prefix
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT,
'"\\"This is a test\\""' +
' "\\"and this is a test, too\\""')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
def test_update_add_empty_txt_records(self):
"test adding two txt records works"
prefix, txt = 'emptytextrec', []
p = self.make_txt_update(prefix, txt)
response = self.dns_transaction_udp(p)
self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
self.check_query_txt(prefix, txt)
self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
self.get_dns_domain(),
"%s.%s" % (prefix, self.get_dns_domain()),
dnsp.DNS_TYPE_TXT, ''))
def test_update_add_empty_rpc_to_dns(self):
prefix, txt = 'rpcemptytextrec', []
name = "%s.%s" % (prefix, self.get_dns_domain())
rec = data_to_dns_record(dnsp.DNS_TYPE_TXT, '')
add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
add_rec_buf.rec = rec
try:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, add_rec_buf, None)
self.check_query_txt(prefix, txt)
finally:
self.rpc_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
0, self.server_ip, self.get_dns_domain(),
name, None, add_rec_buf)
TestProgram(module=__name__, opts=subunitopts)
| Zentyal/samba | python/samba/tests/dns.py | Python | gpl-3.0 | 48,654 |
#
# The Python Imaging Library.
# $Id$
#
# base class for image file handlers
#
# history:
# 1995-09-09 fl Created
# 1996-03-11 fl Fixed load mechanism.
# 1996-04-15 fl Added pcx/xbm decoders.
# 1996-04-30 fl Added encoders.
# 1996-12-14 fl Added load helpers
# 1997-01-11 fl Use encode_to_file where possible
# 1997-08-27 fl Flush output in _save
# 1998-03-05 fl Use memory mapping for some modes
# 1999-02-04 fl Use memory mapping also for "I;16" and "I;16B"
# 1999-05-31 fl Added image parser
# 2000-10-12 fl Set readonly flag on memory-mapped images
# 2002-03-20 fl Use better messages for common decoder errors
# 2003-04-21 fl Fall back on mmap/map_buffer if map is not available
# 2003-10-30 fl Added StubImageFile class
# 2004-02-25 fl Made incremental parser more robust
#
# Copyright (c) 1997-2004 by Secret Labs AB
# Copyright (c) 1995-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath
import io
import sys
import struct
MAXBLOCK = 65536
SAFEBLOCK = 1024*1024
LOAD_TRUNCATED_IMAGES = False
ERRORS = {
-1: "image buffer overrun error",
-2: "decoding error",
-3: "unknown error",
-8: "bad configuration",
-9: "out of memory error"
}
def raise_ioerror(error):
try:
message = Image.core.getcodecstatus(error)
except AttributeError:
message = ERRORS.get(error)
if not message:
message = "decoder error %d" % error
raise IOError(message + " when reading image file")
#
# --------------------------------------------------------------------
# Helpers
def _tilesort(t):
# sort on offset
return t[2]
#
# --------------------------------------------------------------------
# ImageFile base class
class ImageFile(Image.Image):
"Base class for image file format handlers."
def __init__(self, fp=None, filename=None):
Image.Image.__init__(self)
self._min_frame = 0
self.tile = None
self.readonly = 1 # until we know better
self.decoderconfig = ()
self.decodermaxblock = MAXBLOCK
if isPath(fp):
# filename
self.fp = open(fp, "rb")
self.filename = fp
self._exclusive_fp = True
else:
# stream
self.fp = fp
self.filename = filename
# can be overridden
self._exclusive_fp = None
try:
self._open()
except (IndexError, # end of data
TypeError, # end of data (ord)
KeyError, # unsupported mode
EOFError, # got header but not the first frame
struct.error) as v:
# close the file only if we have opened it this constructor
if self._exclusive_fp:
self.fp.close()
raise SyntaxError(v)
if not self.mode or self.size[0] <= 0:
raise SyntaxError("not identified by this driver")
def draft(self, mode, size):
"Set draft mode"
pass
def get_format_mimetype(self):
if self.format is None:
return
return Image.MIME.get(self.format.upper())
def verify(self):
"Check file integrity"
# raise exception if something's wrong. must be called
# directly after open, and closes file when finished.
if self._exclusive_fp:
self.fp.close()
self.fp = None
def load(self):
"Load image data based on tile list"
pixel = Image.Image.load(self)
if self.tile is None:
raise IOError("cannot load this image")
if not self.tile:
return pixel
self.map = None
use_mmap = self.filename and len(self.tile) == 1
# As of pypy 2.1.0, memory mapping was failing here.
use_mmap = use_mmap and not hasattr(sys, 'pypy_version_info')
readonly = 0
# look for read/seek overrides
try:
read = self.load_read
# don't use mmap if there are custom read/seek functions
use_mmap = False
except AttributeError:
read = self.fp.read
try:
seek = self.load_seek
use_mmap = False
except AttributeError:
seek = self.fp.seek
if use_mmap:
# try memory mapping
decoder_name, extents, offset, args = self.tile[0]
if decoder_name == "raw" and len(args) >= 3 and \
args[0] == self.mode and \
args[0] in Image._MAPMODES:
try:
if hasattr(Image.core, "map"):
# use built-in mapper WIN32 only
self.map = Image.core.map(self.filename)
self.map.seek(offset)
self.im = self.map.readimage(
self.mode, self.size, args[1], args[2]
)
else:
# use mmap, if possible
import mmap
with open(self.filename, "r") as fp:
self.map = mmap.mmap(fp.fileno(), 0,
access=mmap.ACCESS_READ)
self.im = Image.core.map_buffer(
self.map, self.size, decoder_name, extents,
offset, args)
readonly = 1
# After trashing self.im,
# we might need to reload the palette data.
if self.palette:
self.palette.dirty = 1
except (AttributeError, EnvironmentError, ImportError):
self.map = None
self.load_prepare()
err_code = -3 # initialize to unknown error
if not self.map:
# sort tiles in file order
self.tile.sort(key=_tilesort)
try:
# FIXME: This is a hack to handle TIFF's JpegTables tag.
prefix = self.tile_prefix
except AttributeError:
prefix = b""
for decoder_name, extents, offset, args in self.tile:
decoder = Image._getdecoder(self.mode, decoder_name,
args, self.decoderconfig)
try:
seek(offset)
decoder.setimage(self.im, extents)
if decoder.pulls_fd:
decoder.setfd(self.fp)
status, err_code = decoder.decode(b"")
else:
b = prefix
while True:
try:
s = read(self.decodermaxblock)
except (IndexError, struct.error):
# truncated png/gif
if LOAD_TRUNCATED_IMAGES:
break
else:
raise IOError("image file is truncated")
if not s: # truncated jpeg
if LOAD_TRUNCATED_IMAGES:
break
else:
self.tile = []
raise IOError("image file is truncated "
"(%d bytes not processed)" %
len(b))
b = b + s
n, err_code = decoder.decode(b)
if n < 0:
break
b = b[n:]
finally:
# Need to cleanup here to prevent leaks
decoder.cleanup()
self.tile = []
self.readonly = readonly
self.load_end()
if self._exclusive_fp and self._close_exclusive_fp_after_loading:
self.fp.close()
self.fp = None
if not self.map and not LOAD_TRUNCATED_IMAGES and err_code < 0:
# still raised if decoder fails to return anything
raise_ioerror(err_code)
return Image.Image.load(self)
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.new(self.mode, self.size)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def load_end(self):
# may be overridden
pass
# may be defined for contained formats
# def load_seek(self, pos):
# pass
# may be defined for blocked formats (e.g. PNG)
# def load_read(self, bytes):
# pass
def _seek_check(self, frame):
if (frame < self._min_frame or
# Only check upper limit on frames if additional seek operations
# are not required to do so
(not (hasattr(self, "_n_frames") and self._n_frames is None) and
frame >= self.n_frames+self._min_frame)):
raise EOFError("attempt to seek outside sequence")
return self.tell() != frame
class StubImageFile(ImageFile):
"""
Base class for stub image loaders.
A stub loader is an image loader that can identify files of a
certain format, but relies on external code to load the file.
"""
def _open(self):
raise NotImplementedError(
"StubImageFile subclass must implement _open"
)
def load(self):
loader = self._load()
if loader is None:
raise IOError("cannot find loader for this %s file" % self.format)
image = loader.load(self)
assert image is not None
# become the other object (!)
self.__class__ = image.__class__
self.__dict__ = image.__dict__
def _load(self):
"(Hook) Find actual image loader."
raise NotImplementedError(
"StubImageFile subclass must implement _load"
)
class Parser(object):
"""
Incremental image parser. This class implements the standard
feed/close consumer interface.
"""
incremental = None
image = None
data = None
decoder = None
offset = 0
finished = 0
def reset(self):
"""
(Consumer) Reset the parser. Note that you can only call this
method immediately after you've created a parser; parser
instances cannot be reused.
"""
assert self.data is None, "cannot reuse parsers"
def feed(self, data):
"""
(Consumer) Feed data to the parser.
:param data: A string buffer.
:exception IOError: If the parser failed to parse the image file.
"""
# collect data
if self.finished:
return
if self.data is None:
self.data = data
else:
self.data = self.data + data
# parse what we have
if self.decoder:
if self.offset > 0:
# skip header
skip = min(len(self.data), self.offset)
self.data = self.data[skip:]
self.offset = self.offset - skip
if self.offset > 0 or not self.data:
return
n, e = self.decoder.decode(self.data)
if n < 0:
# end of stream
self.data = None
self.finished = 1
if e < 0:
# decoding error
self.image = None
raise_ioerror(e)
else:
# end of image
return
self.data = self.data[n:]
elif self.image:
# if we end up here with no decoder, this file cannot
# be incrementally parsed. wait until we've gotten all
# available data
pass
else:
# attempt to open this file
try:
with io.BytesIO(self.data) as fp:
im = Image.open(fp)
except IOError:
# traceback.print_exc()
pass # not enough data
else:
flag = hasattr(im, "load_seek") or hasattr(im, "load_read")
if flag or len(im.tile) != 1:
# custom load code, or multiple tiles
self.decode = None
else:
# initialize decoder
im.load_prepare()
d, e, o, a = im.tile[0]
im.tile = []
self.decoder = Image._getdecoder(
im.mode, d, a, im.decoderconfig
)
self.decoder.setimage(im.im, e)
# calculate decoder offset
self.offset = o
if self.offset <= len(self.data):
self.data = self.data[self.offset:]
self.offset = 0
self.image = im
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
(Consumer) Close the stream.
:returns: An image object.
:exception IOError: If the parser failed to parse the image file either
because it cannot be identified or cannot be
decoded.
"""
# finish decoding
if self.decoder:
# get rid of what's left in the buffers
self.feed(b"")
self.data = self.decoder = None
if not self.finished:
raise IOError("image was incomplete")
if not self.image:
raise IOError("cannot parse this image")
if self.data:
# incremental parsing not possible; reopen the file
# not that we have all data
with io.BytesIO(self.data) as fp:
try:
self.image = Image.open(fp)
finally:
self.image.load()
return self.image
# --------------------------------------------------------------------
def _save(im, fp, tile, bufsize=0):
"""Helper to save image based on tile list
:param im: Image object.
:param fp: File object.
:param tile: Tile list.
:param bufsize: Optional buffer size
"""
im.load()
if not hasattr(im, "encoderconfig"):
im.encoderconfig = ()
tile.sort(key=_tilesort)
# FIXME: make MAXBLOCK a configuration parameter
# It would be great if we could have the encoder specify what it needs
# But, it would need at least the image size in most cases. RawEncode is
# a tricky case.
bufsize = max(MAXBLOCK, bufsize, im.size[0] * 4) # see RawEncode.c
if fp == sys.stdout:
fp.flush()
return
try:
fh = fp.fileno()
fp.flush()
except (AttributeError, io.UnsupportedOperation):
# compress to Python file-compatible object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
while True:
l, s, d = e.encode(bufsize)
fp.write(d)
if s:
break
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
else:
# slight speedup: compress to real file object
for e, b, o, a in tile:
e = Image._getencoder(im.mode, e, a, im.encoderconfig)
if o > 0:
fp.seek(o, 0)
e.setimage(im.im, b)
if e.pushes_fd:
e.setfd(fp)
l, s = e.encode_to_pyfd()
else:
s = e.encode_to_file(fh, bufsize)
if s < 0:
raise IOError("encoder error %d when writing image file" % s)
e.cleanup()
if hasattr(fp, "flush"):
fp.flush()
def _safe_read(fp, size):
"""
Reads large blocks in a safe way. Unlike fp.read(n), this function
doesn't trust the user. If the requested size is larger than
SAFEBLOCK, the file is read block by block.
:param fp: File handle. Must implement a <b>read</b> method.
:param size: Number of bytes to read.
:returns: A string containing up to <i>size</i> bytes of data.
"""
if size <= 0:
return b""
if size <= SAFEBLOCK:
return fp.read(size)
data = []
while size > 0:
block = fp.read(min(size, SAFEBLOCK))
if not block:
break
data.append(block)
size -= len(block)
return b"".join(data)
class PyCodecState(object):
def __init__(self):
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self):
return (self.xoff, self.yoff,
self.xoff+self.xsize, self.yoff+self.ysize)
class PyDecoder(object):
"""
Python implementation of a format decoder. Override this class and
add the decoding logic in the `decode` method.
See :ref:`Writing Your Own File Decoder in Python<file-decoders-py>`
"""
_pulls_fd = False
def __init__(self, mode, *args):
self.im = None
self.state = PyCodecState()
self.fd = None
self.mode = mode
self.init(args)
def init(self, args):
"""
Override to perform decoder specific initialization
:param args: Array of args items from the tile entry
:returns: None
"""
self.args = args
@property
def pulls_fd(self):
return self._pulls_fd
def decode(self, buffer):
"""
Override to perform the decoding process.
:param buffer: A bytes object with the data to be decoded.
If `handles_eof` is set, then `buffer` will be empty and `self.fd`
will be set.
:returns: A tuple of (bytes consumed, errcode).
If finished with decoding return <0 for the bytes consumed.
Err codes are from `ERRORS`
"""
raise NotImplementedError()
def cleanup(self):
"""
Override to perform decoder specific cleanup
:returns: None
"""
pass
def setfd(self, fd):
"""
Called from ImageFile to set the python file-like object
:param fd: A python file-like object
:returns: None
"""
self.fd = fd
def setimage(self, im, extents=None):
"""
Called from ImageFile to set the core output image for the decoder
:param im: A core image object
:param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle
for this tile
:returns: None
"""
# following c code
self.im = im
if extents:
(x0, y0, x1, y1) = extents
else:
(x0, y0, x1, y1) = (0, 0, 0, 0)
if x0 == 0 and x1 == 0:
self.state.xsize, self.state.ysize = self.im.size
else:
self.state.xoff = x0
self.state.yoff = y0
self.state.xsize = x1 - x0
self.state.ysize = y1 - y0
if self.state.xsize <= 0 or self.state.ysize <= 0:
raise ValueError("Size cannot be negative")
if (self.state.xsize + self.state.xoff > self.im.size[0] or
self.state.ysize + self.state.yoff > self.im.size[1]):
raise ValueError("Tile cannot extend outside image")
def set_as_raw(self, data, rawmode=None):
"""
Convenience method to set the internal image from a stream of raw data
:param data: Bytes to be set
:param rawmode: The rawmode to be used for the decoder.
If not specified, it will default to the mode of the image
:returns: None
"""
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, 'raw', (rawmode))
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/PIL/ImageFile.py | Python | gpl-3.0 | 20,762 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**QGIS plugin implementation.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. note:: This source code was copied from the 'postgis viewer' application
with original authors:
Copyright (c) 2010 by Ivan Mincik, [email protected]
Copyright (c) 2011 German Carrillo, [email protected]
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '10/01/2011'
__copyright__ = (
'Copyright (c) 2010 by Ivan Mincik, [email protected] and '
'Copyright (c) 2011 German Carrillo, [email protected]'
'Copyright (c) 2014 Tim Sutton, [email protected]'
)
import logging
from qgis.core import QgsMapLayerRegistry, QGis, QgsMapLayer
# pylint: disable=no-name-in-module
from qgis.gui import (
QgsMapCanvasLayer,
QgsMessageBar)
from PyQt4.QtCore import QObject, pyqtSlot, pyqtSignal
from safe.gis.qgis_legend_interface import QgisLegend
LOGGER = logging.getLogger('InaSAFE')
# noinspection PyMethodMayBeStatic,PyPep8Naming
class QgisInterface(QObject):
"""Class to expose qgis objects and functions to plugins.
This class is here for enabling us to run unit tests only,
so most methods are simply stubs.
"""
currentLayerChanged = pyqtSignal(QgsMapCanvasLayer)
layerSavedAs = pyqtSignal(QgsMapLayer, str)
def __init__(self, canvas):
"""Constructor
:param canvas:
"""
QObject.__init__(self)
self.canvas = canvas
self.legend = QgisLegend(canvas)
self.message_bar = QgsMessageBar(None)
# Set up slots so we can mimic the behaviour of QGIS when layers
# are added.
LOGGER.debug('Initialising canvas...')
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().layersAdded.connect(self.addLayers)
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().layerWasAdded.connect(self.addLayer)
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().removeAll.connect(self.removeAllLayers)
# For processing module
self.destCrs = None
# For keeping track of which layer is active in the legend.
self.active_layer = None
# In the next section of code, we are going to do some monkey patching
# to make the QGIS processing framework think that this mock QGIS IFACE
# instance is the actual one. It will also ensure that the processing
# algorithms are nicely loaded and available for use.
# Since QGIS > 2.0, the module is moved from QGisLayers to dataobjects
# pylint: disable=F0401, E0611
if QGis.QGIS_VERSION_INT > 20001:
# noinspection PyUnresolvedReferences
from processing.tools import dataobjects
else:
# noinspection PyUnresolvedReferences
from processing.core import QGisLayers as dataobjects
# noinspection PyUnresolvedReferences
import processing
# noinspection PyUnresolvedReferences
from processing.core.Processing import Processing
# pylint: enable=F0401, E0611
processing.classFactory(self)
# We create our own getAlgorithm function below which will will monkey
# patch in to the Processing class in QGIS in order to ensure that the
# Processing.initialize() call is made before asking for an alg.
@staticmethod
def mock_getAlgorithm(name):
"""
Modified version of the original getAlgorithm function.
:param name: Name of the algorithm to load.
:type name: str
:return: An algorithm concrete class.
:rtype: QgsAlgorithm ?
"""
Processing.initialize()
for provider in Processing.algs.values():
if name in provider:
return provider[name]
return None
# Now we let the monkey loose!
Processing.getAlgorithm = mock_getAlgorithm
# We also need to make dataobjects think that this iface is 'the one'
# Note. the placement here (after the getAlgorithm monkey patch above)
# is significant, so don't move it!
dataobjects.iface = self
def __getattr__(self, *args, **kwargs):
# It's for processing module
def dummy(*a, **kwa):
_ = a, kwa
return QgisInterface(self.canvas)
return dummy
def __iter__(self):
# It's for processing module
return self
def next(self):
# It's for processing module
raise StopIteration
def layers(self):
# It's for processing module
# simulate iface.legendInterface().layers()
return QgsMapLayerRegistry.instance().mapLayers().values()
@pyqtSlot('QStringList')
def addLayers(self, layers):
"""Handle layers being added to the registry so they show up in canvas.
:param layers: list<QgsMapLayer> list of map layers that were added
.. note:: The QgsInterface api does not include this method,
it is added here as a helper to facilitate testing.
"""
# LOGGER.debug('addLayers called on qgis_interface')
# LOGGER.debug('Number of layers being added: %s' % len(layers))
# LOGGER.debug('Layer Count Before: %s' % len(self.canvas.layers()))
current_layers = self.canvas.layers()
final_layers = []
# We need to keep the record of the registered layers on our canvas!
registered_layers = []
for layer in current_layers:
final_layers.append(QgsMapCanvasLayer(layer))
registered_layers.append(layer.id())
for layer in layers:
if layer.id() not in registered_layers:
final_layers.append(QgsMapCanvasLayer(layer))
self.canvas.setLayerSet(final_layers)
# LOGGER.debug('Layer Count After: %s' % len(self.canvas.layers()))
@pyqtSlot('QgsMapLayer')
def addLayer(self, layer):
"""Handle a layer being added to the registry so it shows up in canvas.
:param layer: list<QgsMapLayer> list of map layers that were added
.. note: The QgsInterface api does not include this method, it is added
here as a helper to facilitate testing.
.. note: The addLayer method was deprecated in QGIS 1.8 so you should
not need this method much.
"""
pass
@pyqtSlot()
def removeAllLayers(self, ):
"""Remove layers from the canvas before they get deleted.
.. note:: This is NOT part of the QGisInterface API but is needed
to support QgsMapLayerRegistry.removeAllLayers().
"""
self.canvas.setLayerSet([])
self.active_layer = None
def newProject(self):
"""Create new project."""
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().removeAllMapLayers()
# ---------------- API Mock for QgsInterface follows -------------------
def zoomFull(self):
"""Zoom to the map full extent."""
pass
def zoomToPrevious(self):
"""Zoom to previous view extent."""
pass
def zoomToNext(self):
"""Zoom to next view extent."""
pass
def zoomToActiveLayer(self):
"""Zoom to extent of active layer."""
pass
def addVectorLayer(self, path, base_name, provider_key):
"""Add a vector layer.
:param path: Path to layer.
:type path: str
:param base_name: Base name for layer.
:type base_name: str
:param provider_key: Provider key e.g. 'ogr'
:type provider_key: str
"""
pass
def addRasterLayer(self, path, base_name):
"""Add a raster layer given a raster layer file name
:param path: Path to layer.
:type path: str
:param base_name: Base name for layer.
:type base_name: str
"""
pass
def setActiveLayer(self, layer):
"""Set the currently active layer in the legend.
:param layer: Layer to make active.
:type layer: QgsMapLayer, QgsVectorLayer, QgsRasterLayer
"""
self.active_layer = layer
def activeLayer(self):
"""Get pointer to the active layer (layer selected in the legend)."""
if self.active_layer is not None:
return self.active_layer
else:
return None
def addToolBarIcon(self, action):
"""Add an icon to the plugins toolbar.
:param action: Action to add to the toolbar.
:type action: QAction
"""
pass
def removeToolBarIcon(self, action):
"""Remove an action (icon) from the plugin toolbar.
:param action: Action to add to the toolbar.
:type action: QAction
"""
pass
def addToolBar(self, name):
"""Add toolbar with specified name.
:param name: Name for the toolbar.
:type name: str
"""
pass
def mapCanvas(self):
"""Return a pointer to the map canvas."""
return self.canvas
def mainWindow(self):
"""Return a pointer to the main window.
In case of QGIS it returns an instance of QgisApp.
"""
pass
def addDockWidget(self, area, dock_widget):
"""Add a dock widget to the main window.
:param area: Where in the ui the dock should be placed.
:type area:
:param dock_widget: A dock widget to add to the UI.
:type dock_widget: QDockWidget
"""
pass
def legendInterface(self):
"""Get the legend.
See also discussion at:
https://github.com/AIFDR/inasafe/pull/924/
Implementation added for version 3.2.
"""
return self.legend
def messageBar(self):
"""Get the message bar.
.. versionadded:: 3.2
:returns: A QGIS message bar instance
:rtype: QgsMessageBar
"""
return self.message_bar
| MariaSolovyeva/inasafe | safe/gis/qgis_interface.py | Python | gpl-3.0 | 10,359 |
from river.models.proceeding import Proceeding
from river.services.proceeding import ProceedingService
__author__ = 'ahmetdal'
# noinspection PyClassHasNoInit
class ObjectService:
@staticmethod
def register_object(workflow_object, field):
proceedings = Proceeding.objects.filter(workflow_object=workflow_object, field=field)
if proceedings.count() == 0:
ProceedingService.init_proceedings(workflow_object, field)
return {'state': getattr(workflow_object, field).details()}
@staticmethod
def get_objects_waiting_for_approval(content_type, field, user):
object_pks = []
WorkflowObjectClass = content_type.model_class()
for workflow_object in WorkflowObjectClass.objects.all():
current_state = getattr(workflow_object, field)
proceedings = ProceedingService.get_available_proceedings(workflow_object, field, [current_state], user=user)
if proceedings.count():
object_pks.append(workflow_object.pk)
return WorkflowObjectClass.objects.filter(pk__in=object_pks)
@staticmethod
def get_object_count_waiting_for_approval(content_type, field, user):
return ObjectService.get_objects_waiting_for_approval(content_type, field, user).count()
@staticmethod
def is_workflow_completed(workflow_object, field):
current_state = getattr(workflow_object, field)
return Proceeding.objects.filter(workflow_object=workflow_object, meta__transition__source_state=current_state).count() == 0
| mstzn36/django-river | river/services/object.py | Python | gpl-3.0 | 1,550 |
"""
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import urllib
import pycurl
from io import StringIO,BytesIO
import re
import random
import subprocess
from subprocess import check_output
from bs4 import BeautifulSoup
import os
import os.path
from subprocess import check_output
import shutil
import json
from player_functions import ccurl,naturallysorted
class Shoutcast():
def __init__(self,tmp):
self.hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
self.tmp_dir = tmp
def getOptions(self):
criteria = ['History','Genre','Anime','JPOP']
return criteria
def getFinalUrl(self,name,epn,mir,quality):
return epn
def process_page(self,content):
content = re.sub(r'\\',"-",content)
#print(content)
#f = open('/tmp/tmp.txt','w')
#f.write(content)
#f.close()
try:
l = json.loads(content)
except:
o = re.findall('{[^}]*}',content)
l = []
for i in o:
print(i)
try:
j = json.loads(i)
print(j['ID'],j['Name'])
l.append(j)
except:
pass
print('----------------------error---------------')
s = []
for i in l:
try:
#print(i['ID'],i['Name'],i['Bitrate'],i['Listeners'])
s.append(i['Name'].replace('/','-')+' id='+str(i['ID'])+'|Bitrate='+str(i['Bitrate'])+'|Listeners='+str(i['Listeners']))
except:
pass
return s
def search(self,name):
strname = str(name)
print(strname)
if name.lower() == 'tv':
m = self.getCompleteList(name.upper(),1)
else:
url = "https://www.shoutcast.com/Home/BrowseByGenre"
#content = ccurl(url,name,1)
post = "genrename="+name
content = ccurl(url+'#'+'-d'+'#'+post)
m = self.process_page(content)
return m
def getCompleteList(self,opt,genre_num):
if opt == 'Genre' and genre_num == 0:
url = "http://www.shoutcast.com/"
#content = ccurl(url,"",1)
content = ccurl(url)
m = re.findall('Genre[^"]name[^"]*',content)
#print m
j = 0
for i in m:
m[j] = re.sub('Genre[^"]name=','',i)
m[j] = re.sub("[+]|%20",' ',m[j])
j = j+1
m.sort()
print(m)
#n = ["History","Genre","TV"]
n = ["History","Genre"]
m = n + m
elif opt == 'History':
a =0
elif opt == 'TV':
name = []
track = []
aformat = []
listeners = []
bitrate = []
idr = []
url = "http://thugie.nl/streams.php"
#content = ccurl(url,"",4)
content = ccurl(url)
soup = BeautifulSoup(content,'lxml')
tmp = soup.prettify()
#m = soup.findAll('div',{'class':'boxcenterdir fontstyle'})
#soup = BeautifulSoup(tmp,'lxml')
m = []
links = soup.findAll('div',{'class':'dirOuterDiv1 clearFix'})
for i in links:
j = i.findAll('a')
q = i.find_next('h2')
g = i.find_next('h4')
z = g.find_next('h4')
for k in j:
idr.append(k['href'].split('=')[-1][:-1])
l = i.text
n = re.findall('Station:[^"]*',l)
p = re.sub('Playing','\nPlaying',n[0])
p=p.rstrip()
a = p.split('\n')
name.append(a[0].split(":")[1])
track.append(a[1].split(':')[1])
aformat.append(q.text)
listeners.append(g.text)
bitrate.append(z.text)
for i in range(len(idr)):
m.append(name[i].strip().replace('/','-')+'-TV id='+str(idr[i]).replace('\\','')+'\nBitrate='+str(bitrate[i])+'\nListeners='+str(listeners[i])+'\n')
else:
url = "https://www.shoutcast.com/Home/BrowseByGenre"
#content = ccurl(url,opt,1)
post = 'genrename='+opt
content = ccurl(url+'#'+'-d'+'#'+post)
m = self.process_page(content)
print(opt,url)
return m
def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
name_id = (re.search('id=[^|]*',extra_info).group()).split('=')[1]
#nm = name.rsplit('-',1)
#name = nm[0]
#name_id = nm[1]
#name = nm[0]
file_arr = []
id_station = int(name_id)
station_url = ''
if opt == "TV" or '-TV' in name:
url = "http://thugie.nl/streams.php?tunein="+str(id_station)
#content = ccurl(url,'',1)
content = ccurl(url)
final = re.findall('http://[^\n]*',content)
station_url = final[0].rstrip()
if 'stream.nsv' not in station_url:
#print "Hello" + station_url
station_url = str(station_url.rstrip()+";stream.nsv")
else:
url = "https://www.shoutcast.com/Player/GetStreamUrl"
#content = ccurl(url,id_station,2)
post = 'station='+str(id_station)
content = ccurl(url+'#-d#'+post)
m = re.findall('http://[^"]*',content)
station_url = str(m[0])
file_arr.append(name+' '+station_url+' '+'NONE')
#file_arr.append('No.jpg')
#file_arr.append('Summary Not Available')
record_history = True
return (file_arr,'Summary Not Available','No.jpg',record_history,depth_list)
def getNextPage(self,opt,pgn,genre_num,name):
m = []
return m
| abhishek-archlinux/AnimeWatch | AnimeWatch-PyQt5/Plugins/Shoutcast.py | Python | gpl-3.0 | 5,280 |
from setuptools import setup
if __name__ == "__main__":
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
__version__ = "0.8.1"
base_url = "https://github.com/xialirong/prefpy"
setup(name="prefpy",
version=__version__,
description="Rank aggregation algorithms",
long_description=long_description,
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering"
],
url=base_url,
download_url="{0}/archive/v{1}.tar.gz".format(base_url, __version__),
author="Peter Piech",
license="GPL-3",
packages=["prefpy"],
zip_safe=False)
| PrefPy/prefpy | setup.py | Python | gpl-3.0 | 1,104 |
# "Bubble Math"
# Developed RL Vision (www.rlvision.com)
# Source code licensed under GPLv3 (see LICENSE.txt)
# Dev Env: Portable Python 2.7.5.1 (Python2/Windows/Pygame/PyScripter)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Text.py
# This module makes is easier to draw text in pygame, requiring only one
# function call. The class TextObject is used internally to cache surfaces
# with rendered text for performance.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import pygame
from pygame.locals import *
# Bubble Math imports
from gameflow import *
# Text align constants
ALIGN_CENTER = 0
ALIGN_LEFT = 1
ALIGN_RIGHT = 2
__textCache = {}
def clearTextCache():
""" Frees memory by clearing the text cache """
__textCache = {}
def drawText(surface, text, x, y, size = 28, color = (0, 0, 0), font = "freesansbold.ttf", align = ALIGN_CENTER):
""" Draws a string onto a pygame surface i a single function call.
Strings are cached for better performance.
Should not be used to draw always changing strings since
it would use up surface memory quickly! """
try:
# if not found in cache, create as a TextObject and store in cache
if not text in __textCache:
__textCache[text + str(size) + str(color)] = TextObject(text, size, color, font)
# recall text from cache and set alignment
t = __textCache[text + str(size) + str(color)]
t.rect.centery = y
if align == ALIGN_CENTER:
t.rect.centerx = x
elif align == ALIGN_LEFT:
t.rect.left = x
elif align == ALIGN_RIGHT:
t.rect.right = x
# draw text onto targe surface
surface.blit(t.image, t.rect)
except:
pass
class TextObject():
""" Represents a string, pre-rendered onto a surface, ready to
draw with pygame """
def __init__(self, text, size, color, font):
try:
fontObj = pygame.font.Font(font, int(size))
self.image = fontObj.render(text, True, color)
self.rect = self.image.get_rect()
except:
print("Error initializing text object: " + pygame.get_error())
| ycaihua/BubbleMath | text.py | Python | gpl-3.0 | 2,074 |
from ..s3c import HTTPError
class B2Error(HTTPError):
'''
Represents an error returned by Backblaze B2 API call
For possible codes, see https://www.backblaze.com/b2/docs/calling.html
'''
def __init__(self, status, code, message, headers=None):
super().__init__(status, message, headers)
self.code = code
# Force 1s waiting time before retry
if not self.retry_after:
self.retry_after = 1
def __str__(self):
return '%s : %s - %s' % (self.status, self.code, self.msg)
class BadDigestError(B2Error): pass
| s3ql/main | src/s3ql/backends/b2/b2_error.py | Python | gpl-3.0 | 584 |
# Copyright 2012 Dan Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from chirp.drivers import id31
from chirp import directory, bitwise
MEM_FORMAT = """
struct {
u24 freq;
u16 offset;
u16 rtone:6,
ctone:6,
unknown2:1,
mode:3;
u8 dtcs;
u8 tune_step:4,
unknown5:4;
u8 unknown4;
u8 tmode:4,
duplex:2,
dtcs_polarity:2;
char name[16];
u8 unknown13;
u8 urcall[7];
u8 rpt1call[7];
u8 rpt2call[7];
} memory[500];
#seekto 0x6A40;
u8 used_flags[70];
#seekto 0x6A86;
u8 skip_flags[69];
#seekto 0x6ACB;
u8 pskp_flags[69];
#seekto 0x6B40;
struct {
u8 bank;
u8 index;
} banks[500];
#seekto 0x6FD0;
struct {
char name[16];
} bank_names[26];
#seekto 0xA8C0;
struct {
u24 freq;
u16 offset;
u8 unknown1[3];
u8 call[7];
char name[16];
char subname[8];
u8 unknown3[10];
} repeaters[750];
#seekto 0x1384E;
struct {
u8 call[7];
} rptcall[750];
#seekto 0x14E60;
struct {
char call[8];
char tag[4];
} mycall[6];
#seekto 0x14EA8;
struct {
char call[8];
} urcall[200];
"""
LOG = logging.getLogger(__name__)
@directory.register
class ID51Radio(id31.ID31Radio):
"""Icom ID-51"""
MODEL = "ID-51"
_memsize = 0x1FB40
_model = "\x33\x90\x00\x01"
_endframe = "Icom Inc\x2E\x44\x41"
_ranges = [(0x00000, 0x1FB40, 32)]
MODES = {0: "FM", 1: "NFM", 3: "AM", 5: "DV"}
@classmethod
def match_model(cls, filedata, filename):
"""Given contents of a stored file (@filedata), return True if
this radio driver handles the represented model"""
# The default check for ICOM is just to check memory size
# Since the ID-51 and ID-51 Plus/Anniversary have exactly
# the same memory size, we need to do a more detailed check.
if len(filedata) == cls._memsize:
LOG.debug('File has correct memory size, '
'checking 20 bytes at offset 0x1AF40')
snip = filedata[0x1AF40:0x1AF60]
if snip == ('\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'):
LOG.debug('bytes matched ID-51 Signature')
return True
else:
LOG.debug('bytes did not match ID-51 Signature')
return False
def get_features(self):
rf = super(ID51Radio, self).get_features()
rf.valid_bands = [(108000000, 174000000), (400000000, 479000000)]
return rf
def process_mmap(self):
self._memobj = bitwise.parse(MEM_FORMAT, self._mmap)
| mach327/chirp_fork | chirp/drivers/id51.py | Python | gpl-3.0 | 3,283 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import StringIO
import base64
import json
import os
import random
import stat
import sys
import tempfile
import time
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase:
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._display = display
self._supports_check_mode = True
def _configure_module(self, module_name, module_args, task_vars=dict()):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
# Search module path(s) for named module.
module_suffixes = getattr(self._connection, 'default_suffixes', None)
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if module_suffixes and '.ps1' in module_suffixes:
# Use Windows versions of stat/file/copy modules when called from
# within other action plugins.
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, module_suffixes)
if module_path is None:
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes.
if module_suffixes and '.ps1' in module_suffixes:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, module_suffixes)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. " \
"Additionally, core modules are missing. If this is a checkout, " \
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
for environment in environments:
if environment is None:
continue
if not isinstance(environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
# very deliberatly using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
# FIXME: modified from original, needs testing? Since this is now inside
# the action plugin, it should make it just this simple
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
# FIXME: return a datastructure in this function instead of raising errors -
# the new executor pipeline handles it much better that way
def _make_tmp_path(self):
'''
Create and return a temporary path on a remote box.
'''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self._play_context.become and self._play_context.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
tmp_mode = 0755
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
self._display.debug("executing _low_level_execute_command to create the tmp path")
result = self._low_level_execute_command(cmd, None, sudoable=False)
self._display.debug("done with creation of tmp path")
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
else:
output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
raise AnsibleError(output)
# FIXME: do we still need to do this?
#rc = self._connection._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
rc = self._connection._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
return rc
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path and "-tmp-" in tmp_path:
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
self._display.debug("calling _low_level_execute_command to remove the tmp path")
self._low_level_execute_command(cmd, None, sudoable=False)
self._display.debug("done removing the tmp path")
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
afo.flush()
afo.close()
try:
self._connection.put_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _remote_chmod(self, tmp, mode, path, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path)
self._display.debug("calling _low_level_execute_command to chmod the remote path")
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
self._display.debug("done with chmod call")
return res
def _remote_checksum(self, tmp, path, all_vars):
'''
Takes a remote checksum and returns 1 if no file
'''
python_interp = all_vars.get('ansible_python_interpreter', 'python')
cmd = self._connection._shell.checksum(path, python_interp)
self._display.debug("calling _low_level_execute_command to get the remote checksum")
data = self._low_level_execute_command(cmd, tmp, sudoable=True)
self._display.debug("done getting the remote checksum")
# FIXME: implement this function?
#data2 = utils.last_non_blank_line(data['stdout'])
try:
data2 = data['stdout'].strip().splitlines()[-1]
if data2 == '':
# this may happen if the connection to the remote server
# failed, so just return "INVALIDCHECKSUM" to avoid errors
return "INVALIDCHECKSUM"
else:
return data2.split()[0]
except IndexError:
self._display.warning("Calculating checksum failed unusually, please report this to " + \
"the list so it can be fixed\ncommand: %s\n----\noutput: %s\n----\n") % (cmd, data)
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
def _remote_expand_user(self, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
# FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
self._display.debug("calling _low_level_execute_command to expand the remote user path")
data = self._low_level_execute_command(cmd, tmp, sudoable=False)
self._display.debug("done expanding the remote user path")
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
def _filter_leading_non_json_lines(self, data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO()
stop_filtering = False
for line in data.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=dict(), persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
# if a module name was not specified for this execution, use
# the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
# set check mode in the module arguments, if required
if self._play_context.check_mode and not self._task.always_run:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
# set no log in the module arguments, if required
if self._play_context.no_log:
module_args['_ansible_no_log'] = True
self._display.debug("in _execute_module (%s, %s)" % (module_name, module_args))
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module is missing interpreter line")
# a remote tmp path may be necessary and not already created
remote_module_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path()
if tmp:
remote_module_path = self._connection._shell.join_path(tmp, module_name)
# FIXME: async stuff here?
#if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
if remote_module_path:
self._display.debug("transferring module to remote")
self._transfer_data(remote_module_path, module_data)
self._display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
# deal with possible umask issues once sudo'ed to other user
self._remote_chmod(tmp, 'a+r', remote_module_path)
cmd = ""
in_data = None
# FIXME: all of the old-module style and async stuff has been removed from here, and
# might need to be re-added (unless we decide to drop support for old-style modules
# at this point and rework things to support non-python modules specifically)
if self._connection.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
in_data = module_data
else:
if remote_module_path:
cmd = remote_module_path
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the sudo_user
sudoable = False
self._display.debug("calling _low_level_execute_command() for command %s" % cmd)
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
self._display.debug("_low_level_execute_command returned ok")
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = self._connection._shell.remove(tmp, recurse=True)
self._low_level_execute_command(cmd2, tmp, sudoable=False)
try:
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', '')))
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
if 'stderr' in res and res['stderr'].startswith('Traceback'):
data['exception'] = res['stderr']
else:
data['msg'] = res.get('stdout', '')
if 'stderr' in res:
data['msg'] += res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
if 'stdout' in data and 'stdout_lines' not in data:
data['stdout_lines'] = data.get('stdout', '').splitlines()
# store the module invocation details back into the result
if self._task.async != 0:
data['invocation'] = dict(
module_args = module_args,
module_name = module_name,
)
self._display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, executable=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
'''
if executable is not None:
cmd = executable + ' -c ' + cmd
self._display.debug("in _low_level_execute_command() (%s)" % (cmd,))
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
self._display.debug("no command, exiting _low_level_execute_command()")
return dict(stdout='', stderr='')
if sudoable and self._play_context.become:
self._display.debug("using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
self._display.debug("executing the command %s through the connection" % cmd)
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable)
self._display.debug("command execution done")
if not isinstance(stdout, basestring):
out = ''.join(stdout.readlines())
else:
out = stdout
if not isinstance(stderr, basestring):
err = ''.join(stderr.readlines())
else:
err = stderr
self._display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
if rc is None:
rc = 0
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
self._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
for fn in faf:
fn_orig = fn
fnt = self._templar.template(fn)
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = fnt
fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
if not os.path.exists(fnd) and of is not None:
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = of
fnd = self._loader.path_dwim_relative(lead, searchdir, of)
if os.path.exists(fnd):
return fnd
return None
def _get_diff_data(self, tmp, destination, source, task_vars, source_file=True):
diff = {}
self._display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
self._display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
self._display.debug("Reading local copy of the file %s" % source)
try:
src = open(source)
src_contents = src.read(8192)
st = os.stat(source)
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if "\x00" in src_contents:
diff['src_binary'] = 1
elif st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
self._display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
return diff
| ilya-epifanov/ansible | lib/ansible/plugins/action/__init__.py | Python | gpl-3.0 | 24,960 |
import datetime
from django.utils.timezone import now
def current_time():
return now()
def current_date():
return current_time().date()
| danijar/invoicepad | invoicepad/shared/helpers.py | Python | gpl-3.0 | 142 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
#Importar libreria GPIO
import RPi.GPIO as GPIO
#Definir modo de trabajo para la placa BCM
GPIO.setmode(GPIO.BCM)
#Desactivo Errores
GPIO.setwarnings(False)
#Importamos la libreria time
import time
#Importamos la libreria para comandos de la consola/shell
import os
#Definir GPIO como Salida
GPIO.setup(4, GPIO.OUT)
#Asigno valor alto para que tenga corriente la salida de 3,3v por GPIO
GPIO.output(4, GPIO.HIGH)
| fryntiz/Raspberry-PI | Obsoleto/Web VIEJA para controlar GPIO/GPIO/4/activar.py | Python | gpl-3.0 | 465 |
# -*- encoding: utf-8 -*-
from abjad import *
def test_spannertools_Spanner__append_left_01():
r'''Append container to the left.
'''
voice = Voice("{ c'8 d'8 } { e'8 f'8 } { g'8 a'8 }")
beam = Beam()
attach(beam, voice[1])
beam._append_left(voice[0])
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice {
{
c'8 [
d'8
}
{
e'8
f'8 ]
}
{
g'8
a'8
}
}
'''
)
assert inspect_(voice).is_well_formed()
def test_spannertools_Spanner__append_left_02():
r'''Spanner appends one leaf to the right.
'''
voice = Voice("{ c'8 d'8 } { e'8 f'8 } { g'8 a'8 }")
beam = Beam()
attach(beam, voice[1])
beam._append_left(voice[0][-1])
assert systemtools.TestManager.compare(
voice,
r'''
\new Voice {
{
c'8
d'8 [
}
{
e'8
f'8 ]
}
{
g'8
a'8
}
}
'''
)
assert inspect_(voice).is_well_formed() | mscuthbert/abjad | abjad/tools/spannertools/test/test_spannertools_Spanner__append_left.py | Python | gpl-3.0 | 1,282 |
from datetime import datetime
from grokcore.component.directive import context
from twisted.internet import defer
from twisted.python import log
from zope.component import provideSubscriptionAdapter
from zope.interface import implements
from opennode.knot.model.compute import ICompute
from opennode.knot.utils.icmp import ping
from opennode.oms.config import get_config
from opennode.oms.endpoint.ssh.detached import DetachedProtocol
from opennode.oms.model.model.actions import Action, action
from opennode.oms.model.model.proc import IProcess, Proc, DaemonProcess
from opennode.oms.model.model.symlink import follow_symlinks
from opennode.oms.util import subscription_factory, async_sleep
from opennode.oms.zodb import db
class PingCheckAction(Action):
"""Check if a Compute responds to ICMP request from OMS."""
context(ICompute)
action('ping-check')
def __init__(self, *args, **kwargs):
super(PingCheckAction, self).__init__(*args, **kwargs)
config = get_config()
self.mem_limit = config.getint('pingcheck', 'mem_limit')
@db.ro_transact(proxy=False)
def subject(self, args):
return tuple((self.context, ))
@defer.inlineCallbacks
def execute(self, cmd, args):
yield self._execute(cmd, args)
@db.transact
def _execute(self, cmd, args):
address = self.context.hostname.encode('utf-8')
res = ping(address)
self.context.last_ping = (res == 1)
self.context.pingcheck.append({'timestamp': datetime.utcnow(),
'result': res})
history_len = len(self.context.pingcheck)
if history_len > self.mem_limit:
del self.context.pingcheck[:-self.mem_limit]
ping_results = map(lambda i: i['result'] == 1, self.context.pingcheck[:3])
self.context.suspicious = not all(ping_results)
self.context.failure = not any(ping_results)
class PingCheckDaemonProcess(DaemonProcess):
implements(IProcess)
__name__ = "ping-check"
def __init__(self):
super(PingCheckDaemonProcess, self).__init__()
config = get_config()
self.interval = config.getint('pingcheck', 'interval')
@defer.inlineCallbacks
def run(self):
while True:
try:
if not self.paused:
yield self.ping_check()
except Exception:
if get_config().getboolean('debug', 'print_exceptions'):
log.err(system='ping-check')
yield async_sleep(self.interval)
@defer.inlineCallbacks
def ping_check(self):
@db.ro_transact
def get_computes():
oms_root = db.get_root()['oms_root']
res = [(i, i.hostname)
for i in map(follow_symlinks, oms_root['computes'].listcontent())
if ICompute.providedBy(i)]
return res
ping_actions = []
for i, hostname in (yield get_computes()):
action = PingCheckAction(i)
d = action.execute(DetachedProtocol(), object())
ping_actions.append((hostname, d))
def handle_errors(e, c):
e.trap(Exception)
log.msg("Got exception when pinging compute '%s': %s" % (c, e), system='ping-check')
if get_config().getboolean('debug', 'print_exceptions'):
log.err(system='ping-check')
for c, deferred in ping_actions:
deferred.addErrback(handle_errors, c)
provideSubscriptionAdapter(subscription_factory(PingCheckDaemonProcess), adapts=(Proc,))
| opennode/opennode-knot | opennode/knot/backend/pingcheck.py | Python | gpl-3.0 | 3,589 |
#
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import paasmaker
from paasmaker.common.core import constants
from ..base import BaseJob
class InstanceJobHelper(BaseJob):
"""
A superclass for various coordinate jobs, that provides common helpers
that those jobs use.
"""
def get_instance_type(self, session):
"""
From the supplied job parameters, fetch and hydrate the
application instance type we are working on.
:arg Session session: The SQLAlchemy session to work in.
"""
instance_type = session.query(
paasmaker.model.ApplicationInstanceType
).get(self.parameters['application_instance_type_id'])
return instance_type
def get_instances(self, session, instance_type, states, context):
"""
Find instances for the given application, in the given state,
and return a destroyable list of instances for queueing.
:arg Session session: The SQLAlchemy session to work in.
:arg ApplicationInstnaceType instance_type: The instance type to fetch instances for.
:arg list states: The states of instances to fetch.
:arg dict context: The job's context, used to control the listing parameters.
"""
self.logger.info("Looking for instances in states %s, on active nodes.", str(states))
active_nodes = session.query(
paasmaker.model.Node.id
).filter(
paasmaker.model.Node.state == constants.NODE.ACTIVE
)
instances = session.query(
paasmaker.model.ApplicationInstance
).filter(
paasmaker.model.ApplicationInstance.application_instance_type == instance_type,
paasmaker.model.ApplicationInstance.state.in_(states),
paasmaker.model.ApplicationInstance.node_id.in_(active_nodes)
)
if context.has_key('limit_instances'):
# Limit the query to the given instance IDs.
self.logger.info("Limiting to instances %s", str(context['limit_instances']))
instances = instances.filter(
paasmaker.model.ApplicationInstance.instance_id.in_(context['limit_instances'])
)
self.logger.info("Found %d instances.", instances.count())
return instances
def get_tags_for(self, instance_type):
"""
Return a set of job tags for the given instance type.
:arg ApplicationInstanceType instance_type: The instance type
to work on.
"""
tags = []
tags.append('workspace:%d' % instance_type.application_version.application.workspace.id)
tags.append('application:%d' % instance_type.application_version.application.id)
tags.append('application_version:%d' % instance_type.application_version.id)
tags.append('application_instance_type:%d' % instance_type.id)
return tags
| kaze/paasmaker | paasmaker/common/job/coordinate/instancejobhelper.py | Python | mpl-2.0 | 2,748 |
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, installed_version):
if not installed_version:
return
churches = env["res.partner"].search([("is_church", "=", True)])
churches.update_number_sponsorships()
| eicher31/compassion-switzerland | partner_compassion/migrations/12.0.1.1.3/post-migration.py | Python | agpl-3.0 | 258 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from eve.utils import ParsedRequest
import json
import os
from apps.content_filters.content_filter.content_filter_service import ContentFilterService
from superdesk import get_backend, get_resource_service
from superdesk.errors import SuperdeskApiError
from superdesk.publish import SubscribersService
from superdesk.tests import TestCase
from superdesk.vocabularies.commands import VocabulariesPopulateCommand
class ContentFilterTests(TestCase):
def setUp(self):
self.req = ParsedRequest()
with self.app.test_request_context(self.app.config.get('URL_PREFIX')):
self.f = ContentFilterService(datasource='content_filters', backend=get_backend())
self.s = SubscribersService(datasource='subscribers', backend=get_backend())
self.articles = [{'_id': '1', 'urgency': 1, 'headline': 'story', 'state': 'fetched'},
{'_id': '2', 'headline': 'prtorque', 'state': 'fetched'},
{'_id': '3', 'urgency': 3, 'headline': 'creator', 'state': 'fetched'},
{'_id': '4', 'urgency': 4, 'state': 'fetched'},
{'_id': '5', 'urgency': 2, 'state': 'fetched'},
{'_id': '6', 'state': 'fetched'},
{'_id': '7', 'subject': [{'scheme': 'my_vocabulary', 'qcode': 'MV:01'}]},
{'_id': '8', 'extra': {'custom_text': 'my text'}}]
self.app.data.insert('archive', self.articles)
self.app.data.insert('vocabularies',
[{'_id': 'my_vocabulary',
'display_name': 'My Vocabulary',
'type': 'manageable',
'field_type': None,
'schema': {'name': {}, 'qcode': {}, 'parent': {}},
'items': [{'name': 'option 1', 'qcode': 'MV:01', 'is_active': True}]
},
{'_id': 'custom_text',
'display_name': 'Custom Text',
'type': 'manageable',
'field_type': 'text'}])
self.app.data.insert('filter_conditions',
[{'_id': 1,
'field': 'headline',
'operator': 'like',
'value': 'tor',
'name': 'test-1'}])
self.app.data.insert('filter_conditions',
[{'_id': 2,
'field': 'urgency',
'operator': 'in',
'value': '2',
'name': 'test-2'}])
self.app.data.insert('filter_conditions',
[{'_id': 3,
'field': 'headline',
'operator': 'endswith',
'value': 'tor',
'name': 'test-3'}])
self.app.data.insert('filter_conditions',
[{'_id': 4,
'field': 'urgency',
'operator': 'in',
'value': '2,3,4',
'name': 'test-4'}])
self.app.data.insert('filter_conditions',
[{'_id': 5,
'field': 'headline',
'operator': 'startswith',
'value': 'sto',
'name': 'test-5'}])
self.app.data.insert('filter_conditions',
[{'_id': 6,
'field': 'my_vocabulary',
'operator': 'in',
'value': 'MV:01',
'name': 'test-6'}])
self.app.data.insert('filter_conditions',
[{'_id': 7,
'field': 'custom_text',
'operator': 'eq',
'value': 'my text',
'name': 'test-7'}])
self.app.data.insert('content_filters',
[{"_id": 1,
"content_filter": [{"expression": {"fc": [1]}}],
"name": "soccer-only"}])
self.app.data.insert('content_filters',
[{"_id": 2,
"content_filter": [{"expression": {"fc": [4, 3]}}],
"name": "soccer-only2"}])
self.app.data.insert('content_filters',
[{"_id": 3,
"content_filter": [{"expression": {"pf": [1], "fc": [2]}}],
"name": "soccer-only3"}])
self.app.data.insert('content_filters',
[{"_id": 4,
"content_filter": [{"expression": {"fc": [3]}}, {"expression": {"fc": [5]}}],
"name": "soccer-only4"}])
self.app.data.insert('content_filters',
[{"_id": 5,
"content_filter": [{"expression": {"fc": [6]}}],
"name": "my-vocabulary"}])
self.app.data.insert('content_filters',
[{"_id": 6,
"content_filter": [{"expression": {"fc": [7]}}],
"name": "custom-text"}])
self.app.data.insert('products',
[{"_id": 1,
"content_filter": {"filter_id": 3, "filter_type": "blocking"},
"name": "p-1"}])
self.app.data.insert('products',
[{"_id": 2,
"content_filter": {"filter_id": 1, "filter_type": "blocking"},
"name": "p-2"}])
self.app.data.insert('subscribers',
[{"_id": 1,
"products": [1],
"name": "sub1"}])
self.app.data.insert('subscribers',
[{"_id": 2,
"products": [2],
"name": "sub2"}])
self.app.data.insert('routing_schemes', [
{
"_id": 1,
"name": "routing_scheme_1",
"rules": [{
"filter": 4,
"name": "routing_rule_4",
"schedule": {
"day_of_week": ["MON"],
"hour_of_day_from": "0000",
"hour_of_day_to": "2355",
},
"actions": {
"fetch": [],
"publish": [],
"exit": False
}
}]
}
])
class RetrievingDataTests(ContentFilterTests):
def test_build_mongo_query_using_like_filter_single_fc(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(3, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
def test_build_mongo_query_using_like_filter_single_pf(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(3, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_filter_condition(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(4, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('5' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_pf(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(4, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('5' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_filter_condition2(self):
doc = {'content_filter': [{"expression": {"fc": [3, 4]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_pf2(self):
doc = {'content_filter': [{"expression": {"pf": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_condition3(self):
doc = {'content_filter': [{"expression": {"fc": [3, 4]}}, {"expression": {"fc": [1, 2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_pf3(self):
doc = {'content_filter': [{"expression": {"pf": [2]}}, {"expression": {"pf": [1], "fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = self.f.build_mongo_query(doc)
docs = get_resource_service('archive').\
get_from_mongo(req=self.req, lookup=query)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_single_filter_condition(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(3, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_single_content_filter(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(3, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_filter_condition(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(4, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
self.assertTrue('5' in doc_ids)
def test_build_mongo_query_using_like_filter_multi_content_filter(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(4, docs.count())
self.assertTrue('1' in doc_ids)
self.assertTrue('2' in doc_ids)
self.assertTrue('3' in doc_ids)
self.assertTrue('5' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_filter_condition2(self):
doc = {'content_filter': [{"expression": {"fc": [3, 4]}}, {"expression": {"fc": [1, 2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_content_filter2(self):
doc = {'content_filter': [{"expression": {"fc": [4, 3]}},
{"expression": {"pf": [1], "fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_content_filter3(self):
doc = {'content_filter': [{"expression": {"pf": [2]}}, {"expression": {"pf": [1], "fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_content_filter4(self):
doc = {'content_filter': [{"expression": {"pf": [2]}}, {"expression": {"pf": [3]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
def test_build_elastic_query_using_like_filter_multi_content_filter5(self):
doc = {'content_filter': [{"expression": {"pf": [4], "fc": [4]}}], 'name': 'pf-1'}
with self.app.app_context():
query = {'query': {'filtered': {'query': self.f._get_elastic_query(doc)}}}
self.req.args = {'source': json.dumps(query)}
docs = get_resource_service('archive').get(req=self.req, lookup=None)
doc_ids = [d['_id'] for d in docs]
self.assertEqual(1, docs.count())
self.assertTrue('3' in doc_ids)
class FilteringDataTests(ContentFilterTests):
def test_does_match_returns_true_for_nonexisting_filter(self):
for article in self.articles:
self.assertTrue(self.f.does_match(None, article))
def test_does_match_custom_vocabularies(self):
doc1 = {'content_filter': [{"expression": {"fc": [6]}}], 'name': 'mv-1'}
doc2 = {'content_filter': [{"expression": {"fc": [7]}}], 'name': 'ct-1'}
with self.app.app_context():
self.assertTrue(self.f.does_match(doc1, self.articles[6]))
self.assertTrue(self.f.does_match(doc2, self.articles[7]))
def test_does_match_using_like_filter_single_fc(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertTrue(self.f.does_match(doc, self.articles[0]))
self.assertTrue(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_single_pf(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertTrue(self.f.does_match(doc, self.articles[0]))
self.assertTrue(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_fc(self):
doc = {'content_filter': [{"expression": {"fc": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertTrue(self.f.does_match(doc, self.articles[0]))
self.assertTrue(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertTrue(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_pf(self):
doc = {'content_filter': [{"expression": {"pf": [1]}}, {"expression": {"fc": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertTrue(self.f.does_match(doc, self.articles[0]))
self.assertTrue(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertTrue(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_fc2(self):
doc = {'content_filter': [{"expression": {"fc": [3, 4]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertFalse(self.f.does_match(doc, self.articles[0]))
self.assertFalse(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_pf2(self):
doc = {'content_filter': [{"expression": {"pf": [2]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertFalse(self.f.does_match(doc, self.articles[0]))
self.assertFalse(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_fc3(self):
doc = {'content_filter': [{"expression": {"fc": [3, 4]}}, {"expression": {"fc": [1, 2]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertFalse(self.f.does_match(doc, self.articles[0]))
self.assertFalse(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_does_match_using_like_filter_multi_pf3(self):
doc = {'content_filter': [{"expression": {"pf": [4], "fc": [4]}}], 'name': 'pf-1'}
with self.app.app_context():
self.assertFalse(self.f.does_match(doc, self.articles[0]))
self.assertFalse(self.f.does_match(doc, self.articles[1]))
self.assertTrue(self.f.does_match(doc, self.articles[2]))
self.assertFalse(self.f.does_match(doc, self.articles[3]))
self.assertFalse(self.f.does_match(doc, self.articles[4]))
self.assertFalse(self.f.does_match(doc, self.articles[5]))
def test_if_pf_is_used(self):
with self.app.app_context():
self.assertTrue(self.f._get_content_filters_by_content_filter(1).count() == 1)
self.assertTrue(self.f._get_content_filters_by_content_filter(4).count() == 0)
def test_if_fc_is_used(self):
with self.app.app_context():
self.assertTrue(len(self.f.get_content_filters_by_filter_condition(1)) == 2)
self.assertTrue(len(self.f.get_content_filters_by_filter_condition(3)) == 2)
self.assertTrue(len(self.f.get_content_filters_by_filter_condition(2)) == 1)
def test_get_subscribers_by_filter_condition(self):
filter_condition1 = {'field': 'urgency', 'operator': 'in', 'value': '2'}
filter_condition2 = {'field': 'urgency', 'operator': 'in', 'value': '1'}
filter_condition3 = {'field': 'headline', 'operator': 'like', 'value': 'tor'}
filter_condition4 = {'field': 'urgency', 'operator': 'nin', 'value': '3'}
with self.app.app_context():
cmd = VocabulariesPopulateCommand()
filename = os.path.join(os.path.abspath(
os.path.dirname("apps/prepopulate/data_init/vocabularies.json")), "vocabularies.json")
cmd.run(filename)
r1 = self.s._get_subscribers_by_filter_condition(filter_condition1)
r2 = self.s._get_subscribers_by_filter_condition(filter_condition2)
r3 = self.s._get_subscribers_by_filter_condition(filter_condition3)
r4 = self.s._get_subscribers_by_filter_condition(filter_condition4)
self.assertTrue(len(r1[0]['selected_subscribers']) == 1)
self.assertTrue(len(r2[0]['selected_subscribers']) == 0)
self.assertTrue(len(r3[0]['selected_subscribers']) == 2)
self.assertTrue(len(r4[0]['selected_subscribers']) == 1)
class DeleteMethodTestCase(ContentFilterTests):
"""Tests for the delete() method."""
def test_raises_error_if_filter_referenced_by_subscribers(self):
with self.assertRaises(SuperdeskApiError) as ctx:
self.f.delete({'_id': 1})
self.assertEqual(ctx.exception.status_code, 400) # bad request error
def test_raises_error_if_filter_referenced_by_routing_rules(self):
with self.assertRaises(SuperdeskApiError) as ctx:
self.f.delete({'_id': 4})
self.assertEqual(ctx.exception.status_code, 400) # bad request error
| mugurrus/superdesk-core | apps/content_filters/content_filter/tests.py | Python | agpl-3.0 | 26,062 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PoliticianInfo'
db.create_table('core_politicianinfo', (
('politician', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Politician'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.CharField')(max_length=500)),
('schema', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
))
db.send_create_signal('core', ['PoliticianInfo'])
# Adding index on 'InternalXref', fields ['int_value']
db.create_index('core_internalxref', ['int_value'])
# Adding index on 'InternalXref', fields ['text_value']
db.create_index('core_internalxref', ['text_value'])
# Adding index on 'InternalXref', fields ['target_id']
db.create_index('core_internalxref', ['target_id'])
# Adding index on 'InternalXref', fields ['schema']
db.create_index('core_internalxref', ['schema'])
# Deleting field 'Party.colour'
db.delete_column('core_party', 'colour')
def backwards(self, orm):
# Deleting model 'PoliticianInfo'
db.delete_table('core_politicianinfo')
# Removing index on 'InternalXref', fields ['int_value']
db.delete_index('core_internalxref', ['int_value'])
# Removing index on 'InternalXref', fields ['text_value']
db.delete_index('core_internalxref', ['text_value'])
# Removing index on 'InternalXref', fields ['target_id']
db.delete_index('core_internalxref', ['target_id'])
# Removing index on 'InternalXref', fields ['schema']
db.delete_index('core_internalxref', ['schema'])
# Adding field 'Party.colour'
db.add_column('core_party', 'colour', self.gf('django.db.models.fields.CharField')(default='', max_length=7, blank=True), keep_default=False)
models = {
'core.electedmember': {
'Meta': {'object_name': 'ElectedMember'},
'end_date': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Party']"}),
'politician': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Politician']"}),
'riding': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Riding']"}),
'sessions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Session']"}),
'start_date': ('django.db.models.fields.DateField', [], {'db_index': 'True'})
},
'core.internalxref': {
'Meta': {'object_name': 'InternalXref'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'schema': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'text_value': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'})
},
'core.party': {
'Meta': {'object_name': 'Party'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'})
},
'core.politician': {
'Meta': {'object_name': 'Politician'},
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'headshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_family': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'name_given': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'parlpage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'core.politicianinfo': {
'Meta': {'object_name': 'PoliticianInfo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'politician': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Politician']"}),
'schema': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'core.riding': {
'Meta': {'object_name': 'Riding'},
'edid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'core.session': {
'Meta': {'object_name': 'Session'},
'end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parliamentnum': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sessnum': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['core']
| twhyte/openparliament | parliament/core/migrations/0008_politician_info.py | Python | agpl-3.0 | 6,821 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-15 08:13
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attribution', '0021_attributionnew_substitute'),
]
operations = [
migrations.AlterField(
model_name='attributionnew',
name='substitute',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.Person'),
),
]
| uclouvain/OSIS-Louvain | attribution/migrations/0022_auto_20180115_0913.py | Python | agpl-3.0 | 579 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api, SUPERUSER_ID, exceptions
import uuid
class Volunteer(models.Model):
_inherit = 'res.users'
REASONS = [
('no_time', u'Nie mam czasu.'),
('location_change', u'Zmieniam miejsce zamieszkania.'),
('personal_change', u'Zmienia się moja sytuacja osobista (np. kończę studia).'),
('bad_offers', u'Oferty nie spełniły moich oczekiwań.'),
('no_satisfaction', u'Wolontariat nie sprawia mi już satysfakcji.'),
('else', u'Inny (wpisz powód)'),
]
active_state = fields.Selection(selection_add=[('deleted', 'usunięty')])
reason_for_deleting_account = fields.Selection(REASONS, string=u"Dlaczego chcesz usunąć konto?")
reason_other_description = fields.Text()
@api.multi
def get_deletion_reason(self):
self.ensure_one()
return dict(Volunteer.REASONS).get(self.reason_for_deleting_account)
@api.one
def delete_account(self):
if not (self.env.uid == SUPERUSER_ID or self.user_has_groups('bestja_base.instance_admin')):
raise exceptions.AccessError("Nie masz uprawnień do usuwania użytkowników!")
self.sudo().write({
'login': uuid.uuid1(),
'name': 'Konto usunięte',
'street_gov': '',
'street_number_gov': '',
'apt_number_gov': '',
'zip_code_gov': '',
'email': '',
'phone': '',
'street': '',
'street_number': '',
'apt_number': '',
'zip_code': '',
'curriculum_vitae': None,
'cv_filename': '',
'active_state': 'deleted',
'active': False,
'pesel': '',
'document_id_kind': None,
'document_id': '',
})
| EE/bestja | addons/bestja_account_deletion/models.py | Python | agpl-3.0 | 1,832 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
import time
from _common import ceiling
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv, fields, expression
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import psycopg2
import openerp.addons.decimal_precision as dp
from openerp.tools.float_utils import float_round
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) != 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) != 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
uom_categ = self.pool.get('product.uom.categ')
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
categ_id = categ_id[0]
else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc)
uom_id = self.create(cr, uid, {self._rec_name: name,
'category_id': categ_id,
'factor': 1})
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] != 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Product Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True, digits=0, # force NUMERIC with unlimited precision
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=0, # force NUMERIC with unlimited precision
fnct_inv=_factor_inv_write,
string='Bigger Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits_compute=dp.get_precision('Product Unit of Measure'), required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'uom_type': 'reference',
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, context=None):
if context is None:
context = {}
if from_unit.category_id.id != to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
# First round to the precision of the original unit, so that
# float representation errors do not bias the following ceil()
# e.g. with 1 / (1/12) we could get 12.0000048, ceiling to 13!
amount = float_round(qty/from_unit.factor, precision_rounding=from_unit.rounding)
if to_unit:
amount = amount * to_unit.factor
if round:
amount = ceiling(amount, to_unit.rounding)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if not from_uom_id or not price or not to_uom_id:
return price
from_unit, to_unit = self.browse(cr, uid, [from_uom_id, to_uom_id])
if from_unit.category_id.id != to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
class product_ul(osv.osv):
_name = "product.ul"
_description = "Logistic Unit"
_columns = {
'name' : fields.char('Name', select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
'weight': fields.float('Empty Package Weight'),
}
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class produce_price_history(osv.osv):
"""
Keep track of the ``product.template`` standard prices as they are changed.
"""
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_columns = {
'company_id': fields.many2one('res.company', required=True),
'product_template_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'datetime': fields.datetime('Historization Time'),
'cost': fields.float('Historized Cost'),
}
def _get_default_company(self, cr, uid, context=None):
if 'force_company' in context:
return context['force_company']
else:
company = self.pool['res.users'].browse(cr, uid, uid,
context=context).company_id
return company.id if company else False
_defaults = {
'datetime': fields.datetime.now,
'company_id': _get_default_company,
}
#----------------------------------------------------------
# Product Attributes
#----------------------------------------------------------
class product_attribute(osv.osv):
_name = "product.attribute"
_description = "Product Attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values', copy=True),
}
class product_attribute_value(osv.osv):
_name = "product.attribute.value"
_order = 'sequence'
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, 0)
if not context.get('active_id'):
return result
for obj in self.browse(cr, uid, ids, context=context):
for price_id in obj.price_ids:
if price_id.product_tmpl_id.id == context.get('active_id'):
result[obj.id] = price_id.price_extra
break
return result
def _set_price_extra(self, cr, uid, id, name, value, args, context=None):
if context is None:
context = {}
if 'active_id' not in context:
return None
p_obj = self.pool['product.attribute.price']
p_ids = p_obj.search(cr, uid, [('value_id', '=', id), ('product_tmpl_id', '=', context['active_id'])], context=context)
if p_ids:
p_obj.write(cr, uid, p_ids, {'price_extra': value}, context=context)
else:
p_obj.create(cr, uid, {
'product_tmpl_id': context['active_id'],
'value_id': id,
'price_extra': value,
}, context=context)
_columns = {
'sequence': fields.integer('Sequence', help="Determine the display order"),
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='cascade'),
'product_ids': fields.many2many('product.product', id1='att_id', id2='prod_id', string='Variants', readonly=True),
'price_extra': fields.function(_get_price_extra, type='float', string='Attribute Price Extra',
fnct_inv=_set_price_extra,
digits_compute=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute value on sale price. eg. 200 price extra, 1000 + 200 = 1200."),
'price_ids': fields.one2many('product.attribute.price', 'value_id', string='Attribute Prices', readonly=True),
}
_sql_constraints = [
('value_company_uniq', 'unique (name,attribute_id)', 'This attribute value already exists !')
]
_defaults = {
'price_extra': 0.0,
}
def unlink(self, cr, uid, ids, context=None):
ctx = dict(context or {}, active_test=False)
product_ids = self.pool['product.product'].search(cr, uid, [('attribute_value_ids', 'in', ids)], context=ctx)
if product_ids:
raise osv.except_osv(_('Integrity Error!'), _('The operation cannot be completed:\nYou trying to delete an attribute value with a reference on a product variant.'))
return super(product_attribute_value, self).unlink(cr, uid, ids, context=context)
class product_attribute_price(osv.osv):
_name = "product.attribute.price"
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'value_id': fields.many2one('product.attribute.value', 'Product Attribute Value', required=True, ondelete='cascade'),
'price_extra': fields.float('Price Extra', digits_compute=dp.get_precision('Product Price')),
}
class product_attribute_line(osv.osv):
_name = "product.attribute.line"
_rec_name = 'attribute_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='restrict'),
'value_ids': fields.many2many('product.attribute.value', id1='line_id', id2='val_id', string='Product Attribute Value'),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_inherit = ['mail.thread']
_description = "Product Template"
_order = "name"
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, False)
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'packaging_ids': fields.one2many(
'product.packaging', 'product_tmpl_id', 'Logistical Units',
help="Gives the different ways to package the same product. This has no impact on "
"the picking order and is mainly used if you use the EDI module."),
'seller_ids': fields.one2many('product.supplierinfo', 'product_tmpl_id', 'Supplier'),
'seller_delay': fields.related('seller_ids','delay', type='integer', string='Supplier Lead Time',
help="This is the average delay in days between the purchase order confirmation and the receipts for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.related('seller_ids','qty', type='float', string='Supplier Quantity',
help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.related('seller_ids','name', type='many2one', relation='res.partner', string='Main Supplier',
help="Main Supplier who has highest priority in Supplier List."),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'color': fields.integer('Color Index'),
'is_product_variant': fields.function( _is_product_variant, type='boolean', string='Is product variant'),
'attribute_line_ids': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product Attributes'),
'product_variant_ids': fields.one2many('product.product', 'product_tmpl_id', 'Products', required=True),
'product_variant_count': fields.function( _get_product_variant_count, type='integer', string='# of Product Variants'),
# related to display product product information if is_product_variant
'ean13': fields.related('product_variant_ids', 'ean13', type='char', string='EAN13 Barcode'),
'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference'),
}
def _price_get_list_price(self, product):
return 0.0
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in products:
# standard_price field can only be seen by users in base.group_user
# Thus, in order to compute the sale price from the cost price for users not in this group
# We fetch the standard price as the superuser
if ptype != 'standard_price':
res[product.id] = product[ptype] or 0.0
else:
res[product.id] = product.sudo()[ptype]
if ptype == 'list_price':
res[product.id] += product._name == "product.product" and product.price_extra or 0.0
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def create_variant_ids(self, cr, uid, ids, context=None):
product_obj = self.pool.get("product.product")
ctx = context and context.copy() or {}
if ctx.get("create_product_variant"):
return None
ctx.update(active_test=False, create_product_variant=True)
tmpl_ids = self.browse(cr, uid, ids, context=ctx)
for tmpl_id in tmpl_ids:
# list of values combination
all_variants = [[]]
for variant_id in tmpl_id.attribute_line_ids:
if len(variant_id.value_ids) > 1:
temp_variants = []
for value_id in variant_id.value_ids:
for variant in all_variants:
temp_variants.append(variant + [int(value_id)])
all_variants = temp_variants
# check product
variant_ids_to_active = []
variants_active_ids = []
variants_inactive = []
for product_id in tmpl_id.product_variant_ids:
variants = map(int,product_id.attribute_value_ids)
if variants in all_variants:
variants_active_ids.append(product_id.id)
all_variants.pop(all_variants.index(variants))
if not product_id.active:
variant_ids_to_active.append(product_id.id)
else:
variants_inactive.append(product_id)
if variant_ids_to_active:
product_obj.write(cr, uid, variant_ids_to_active, {'active': True}, context=ctx)
# create new product
for variant_ids in all_variants:
values = {
'product_tmpl_id': tmpl_id.id,
'attribute_value_ids': [(6, 0, variant_ids)]
}
id = product_obj.create(cr, uid, values, context=ctx)
variants_active_ids.append(id)
# unlink or inactive product
for variant_id in map(int,variants_inactive):
try:
with cr.savepoint():
product_obj.unlink(cr, uid, [variant_id], context=ctx)
except (psycopg2.Error, osv.except_osv):
product_obj.write(cr, uid, [variant_id], {'active': False}, context=ctx)
pass
return True
def create(self, cr, uid, vals, context=None):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
product_template_id = super(product_template, self).create(cr, uid, vals, context=context)
if not context or "create_product_product" not in context:
self.create_variant_ids(cr, uid, [product_template_id], context=context)
self._set_standard_price(cr, uid, product_template_id, vals.get('standard_price', 0.0), context=context)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
related_vals = {}
if vals.get('ean13'):
related_vals['ean13'] = vals['ean13']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if related_vals:
self.write(cr, uid, product_template_id, related_vals, context=context)
return product_template_id
def write(self, cr, uid, ids, vals, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if isinstance(ids, (int, long)):
ids = [ids]
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
if 'standard_price' in vals:
for prod_template_id in ids:
self._set_standard_price(cr, uid, prod_template_id, vals['standard_price'], context=context)
res = super(product_template, self).write(cr, uid, ids, vals, context=context)
if 'attribute_line_ids' in vals or vals.get('active'):
self.create_variant_ids(cr, uid, ids, context=context)
if 'active' in vals and not vals.get('active'):
ctx = context and context.copy() or {}
ctx.update(active_test=False)
product_ids = []
for product in self.browse(cr, uid, ids, context=ctx):
product_ids = map(int,product.product_variant_ids)
self.pool.get("product.product").write(cr, uid, product_ids, {'active': vals.get('active')}, context=ctx)
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
template = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (template['name'])
return super(product_template, self).copy(cr, uid, id, default=default, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'standard_price': 0.0,
'sale_ok': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff': 1.0,
'mes_type': 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
'active': True,
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id != product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
class product_product(osv.osv):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
def _product_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ') + self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
product_uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price, context['uom'])
else:
res[product.id] = product.list_price
res[product.id] = res[product.id] + product.price_extra
return res
def _set_product_lst_price(self, cr, uid, id, name, value, args, context=None):
product_uom_obj = self.pool.get('product.uom')
product = self.browse(cr, uid, id, context=context)
if 'uom' in context:
uom = product.uos_id or product.uom_id
value = product_uom_obj._compute_price(cr, uid,
context['uom'], value, uom.id)
value = value - product.price_extra
return product.write({'list_price': value}, context=context)
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name}
res = {'code': product.default_code, 'name': product.name}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + (data['name'] or '')
return res
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, True)
def _get_name_template_ids(self, cr, uid, ids, context=None):
result = set()
template_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', 'in', ids)])
for el in template_ids:
result.add(el)
return list(result)
def _get_image_variant(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.image_variant or getattr(obj.product_tmpl_id, name)
return result
def _set_image_variant(self, cr, uid, id, name, value, args, context=None):
image = tools.image_resize_image_big(value)
res = self.write(cr, uid, [id], {'image_variant': image}, context=context)
product = self.browse(cr, uid, id, context=context)
if not product.product_tmpl_id.image:
product.write({'image_variant': None}, context=context)
product.product_tmpl_id.write({'image': image}, context=context)
return res
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for product in self.browse(cr, uid, ids, context=context):
price_extra = 0.0
for variant_id in product.attribute_value_ids:
for price_id in variant_id.price_ids:
if price_id.product_tmpl_id.id == product.product_tmpl_id.id:
price_extra += price_id.price_extra
result[product.id] = price_extra
return result
_columns = {
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'price_extra': fields.function(_get_price_extra, type='float', string='Variant Extra Price', help="This is the sum of the extra price of all attributes"),
'lst_price': fields.function(_product_lst_price, fnct_inv=_set_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True, auto_join=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', store={
'product.template': (_get_name_template_ids, ['name'], 10),
'product.product': (lambda self, cr, uid, ids, c=None: ids, [], 10),
}, select=True),
'attribute_value_ids': fields.many2many('product.attribute.value', id1='prod_id', id2='att_id', string='Attributes', readonly=True, ondelete='restrict'),
'is_product_variant': fields.function( _is_product_variant_impl, type='boolean', string='Is product variant'),
# image: all image fields are base64 encoded and PIL-supported
'image_variant': fields.binary("Variant Image",
help="This field holds the image used as image for the product variant, limited to 1024x1024px."),
'image': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Big-sized image", type="binary",
help="Image of the product variant (Big-sized image of product template if false). It is automatically "\
"resized as a 1024x1024px image, with aspect ratio preserved."),
'image_small': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Small-sized image", type="binary",
help="Image of the product variant (Small-sized image of product template if false)."),
'image_medium': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Medium-sized image", type="binary",
help="Image of the product variant (Medium-sized image of product template if false)."),
}
_defaults = {
'active': 1,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
# Check if product still exists, in case it has been unlinked by unlinking its template
if not product.exists():
continue
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
if not check_ean(product['ean13']):
return False
return True
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = context.get('display_default_code', True) and d.get('default_code',False) or False
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
if partner_id:
partner_ids = [partner_id, self.pool['res.partner'].browse(cr, user, partner_id, context=context).commercial_partner_id.id]
else:
partner_ids = []
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
for product in self.browse(cr, SUPERUSER_ID, ids, context=context):
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_ids:
sellers = filter(lambda x: x.name.id in partner_ids, product.seller_ids)
if sellers:
for s in sellers:
seller_variant = s.product_name and "%s (%s)" % (s.product_name, variant) or False
mydict = {
'id': product.id,
'name': seller_variant or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
positive_operators = ['=', 'ilike', '=ilike', 'like', '=like']
ids = []
if operator in positive_operators:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids and operator not in expression.NEGATIVE_TERM_OPERATORS:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = set(self.search(cr, user, args + [('default_code', operator, name)], limit=limit, context=context))
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(ids)) if limit else False
ids.update(self.search(cr, user, args + [('name', operator, name)], limit=limit2, context=context))
ids = list(ids)
elif not ids and operator in expression.NEGATIVE_TERM_OPERATORS:
ids = self.search(cr, user, args + ['&', ('default_code', operator, name), ('name', operator, name)], limit=limit, context=context)
if not ids and operator in positive_operators:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='list_price', context=None):
products = self.browse(cr, uid, ids, context=context)
return self.pool.get("product.template")._price_get(cr, uid, products, ptype=ptype, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
product = self.browse(cr, uid, id, context)
if context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = product.product_tmpl_id.id
elif 'name' not in default:
default['name'] = _("%s (copy)") % (product.name,)
return super(product_product, self).copy(cr, uid, id, default=default, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def open_product_template(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Template" button in product views """
product = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': product.product_tmpl_id.id,
'target': 'new'}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ctx = dict(context or {}, create_product_product=True)
return super(product_product, self).create(cr, uid, vals, context=ctx)
def need_procurement(self, cr, uid, ids, context=None):
return False
def _compute_uos_qty(self, cr, uid, ids, uom, qty, uos, context=None):
'''
Computes product's invoicing quantity in UoS from quantity in UoM.
Takes into account the
:param uom: Source unit
:param qty: Source quantity
:param uos: Target UoS unit.
'''
if not uom or not qty or not uos:
return qty
uom_obj = self.pool['product.uom']
product_id = ids[0] if isinstance(ids, (list, tuple)) else ids
product = self.browse(cr, uid, product_id, context=context)
if isinstance(uos, (int, long)):
uos = uom_obj.browse(cr, uid, uos, context=context)
if isinstance(uom, (int, long)):
uom = uom_obj.browse(cr, uid, uom, context=context)
if product.uos_id: # Product has UoS defined
# We cannot convert directly between units even if the units are of the same category
# as we need to apply the conversion coefficient which is valid only between quantities
# in product's default UoM/UoS
qty_default_uom = uom_obj._compute_qty_obj(cr, uid, uom, qty, product.uom_id) # qty in product's default UoM
qty_default_uos = qty_default_uom * product.uos_coeff
return uom_obj._compute_qty_obj(cr, uid, product.uos_id, qty_default_uos, uos)
else:
return uom_obj._compute_qty_obj(cr, uid, uom, qty, uos)
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description'),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Package Logistic Unit', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'ul_container': fields.many2one('product.ul', 'Pallet Logistic Unit'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_tmpl_id' : fields.many2one('product.template', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14, help="The EAN code of the package unit."),
'code' : fields.char('Code', help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if not check_ean(pack.ean):
return False
return True
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : 3,
'sequence' : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_tmpl_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_tmpl_id' : fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade', select=True, oldname='product_id'),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist', copy=True),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'min_qty': 0.0,
'sequence': 1,
'delay': 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
_order = 'sequence'
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description'),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| nicobustillos/odoo | addons/product/product.py | Python | agpl-3.0 | 64,968 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
class StockAdjustmentType(Enum):
INVENTORY = 1
RESTOCK = 2
RESTOCK_LOGICAL = 3
class Labels:
INVENTORY = _("inventory")
RESTOCK = _("restock")
RESTOCK_LOGICAL = _("restock logical")
| shawnadelic/shuup | shuup/core/suppliers/enums.py | Python | agpl-3.0 | 553 |
Networkx MultiGraph API: (Adjacency dict is the main data structure)
G.adj = G.edge # Adjacency dict with {node1: {node2: {edge_key: {edge attr dict}}}}
G.edge[node1][node2] = G[node1][node2] = {edge_key: {edge attr dict}}
G.node[node1] = {dict with node attr}
G.edges(nbunch, keys=True) => [list with (source, target, key) tuples], uses G.adj
Note: We really do not expect there to be more than one edge from source to target,
in fact that would be an error, so it might be better to use a normal DiGraph rather than a MultiDiGraph?
| scholer/nascent | notes/Networkx_notes.py | Python | agpl-3.0 | 538 |
from dependencies.dependency import ClassSecurityInfo
from lims import bikaMessageFactory as _, logger
from lims.idserver import renameAfterCreation
from lims.utils import t, tmpID, changeWorkflowState
from lims.utils import to_utf8 as _c
from lims.browser.fields import HistoryAwareReferenceField
from lims.config import PROJECTNAME
from lims.content.bikaschema import BikaSchema
from lims.interfaces import IWorksheet
from lims.permissions import EditWorksheet, ManageWorksheets
from lims.workflow import doActionFor
from lims.workflow import skip
from dependencies.dependency import DateTime
from operator import itemgetter
from dependencies.dependency import indexer
from dependencies.dependency import REFERENCE_CATALOG
from dependencies.dependency import *
from dependencies.dependency import HoldingReference
from dependencies.dependency import HistoryAwareMixin
from dependencies.dependency import RecordsField
from dependencies.dependency import getToolByName
from dependencies.dependency import safe_unicode, _createObjectByType
from dependencies.dependency import implements
@indexer(IWorksheet)
def Priority(instance):
priority = instance.getPriority()
if priority:
return priority.getSortKey()
schema = BikaSchema.copy() + Schema((
HistoryAwareReferenceField('WorksheetTemplate',
allowed_types=('WorksheetTemplate',),
relationship='WorksheetAnalysisTemplate',
),
ComputedField('WorksheetTemplateTitle',
searchable=True,
expression="context.getWorksheetTemplate() and context.getWorksheetTemplate().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
RecordsField('Layout',
required=1,
subfields=('position', 'type', 'container_uid', 'analysis_uid'),
subfield_types={'position': 'int'},
),
# all layout info lives in Layout; Analyses is used for back references.
ReferenceField('Analyses',
required=1,
multiValued=1,
allowed_types=('Analysis', 'DuplicateAnalysis', 'ReferenceAnalysis', 'RejectAnalysis'),
relationship = 'WorksheetAnalysis',
),
StringField('Analyst',
searchable = True,
),
# TODO Remove. Instruments must be assigned directly to each analysis.
ReferenceField('Instrument',
required = 0,
allowed_types = ('Instrument',),
relationship = 'WorksheetInstrument',
referenceClass = HoldingReference,
),
TextField('Remarks',
searchable = True,
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
macro="bika_widgets/remarks",
label=_("Remarks"),
append_only=True,
),
),
),
)
schema['id'].required = 0
schema['id'].widget.visible = False
schema['title'].required = 0
schema['title'].widget.visible = {'edit': 'hidden', 'view': 'invisible'}
class Worksheet(BaseFolder, HistoryAwareMixin):
security = ClassSecurityInfo()
implements(IWorksheet)
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from lims.idserver import renameAfterCreation
renameAfterCreation(self)
def Title(self):
return safe_unicode(self.getId()).encode('utf-8')
def getFolderContents(self, contentFilter):
# The bika_listing machine passes contentFilter to all
# contentsMethod methods. We ignore it.
return list(self.getAnalyses())
security.declareProtected(EditWorksheet, 'addAnalysis')
def addAnalysis(self, analysis, position=None):
"""- add the analysis to self.Analyses().
- position is overruled if a slot for this analysis' parent exists
- if position is None, next available pos is used.
"""
workflow = getToolByName(self, 'portal_workflow')
analysis_uid = analysis.UID()
parent_uid = analysis.aq_parent.UID()
analyses = self.getAnalyses()
layout = self.getLayout()
# check if this analysis is already in the layout
if analysis_uid in [l['analysis_uid'] for l in layout]:
return
# If the ws has an instrument assigned for which the analysis
# is allowed, set it
instr = self.getInstrument()
if instr and analysis.isInstrumentAllowed(instr):
# Set the method assigned to the selected instrument
analysis.setMethod(instr.getMethod())
analysis.setInstrument(instr)
self.setAnalyses(analyses + [analysis, ])
# if our parent has a position, use that one.
if analysis.aq_parent.UID() in [slot['container_uid'] for slot in layout]:
position = [int(slot['position']) for slot in layout if
slot['container_uid'] == analysis.aq_parent.UID()][0]
else:
# prefer supplied position parameter
if not position:
used_positions = [0, ] + [int(slot['position']) for slot in layout]
position = [pos for pos in range(1, max(used_positions) + 2)
if pos not in used_positions][0]
self.setLayout(layout + [{'position': position,
'type': 'a',
'container_uid': parent_uid,
'analysis_uid': analysis.UID()}, ])
allowed_transitions = [t['id'] for t in workflow.getTransitionsFor(analysis)]
if 'assign' in allowed_transitions:
workflow.doActionFor(analysis, 'assign')
# If a dependency of DryMatter service is added here, we need to
# make sure that the dry matter analysis itself is also
# present. Otherwise WS calculations refer to the DB version
# of the DM analysis, which is out of sync with the form.
dms = self.bika_setup.getDryMatterService()
if dms:
dmk = dms.getKeyword()
deps = analysis.getDependents()
# if dry matter service in my dependents:
if dmk in [a.getService().getKeyword() for a in deps]:
# get dry matter analysis from AR
dma = analysis.aq_parent.getAnalyses(getKeyword=dmk,
full_objects=True)[0]
# add it.
if dma not in self.getAnalyses():
self.addAnalysis(dma)
security.declareProtected(EditWorksheet, 'removeAnalysis')
def removeAnalysis(self, analysis):
""" delete an analyses from the worksheet and un-assign it
"""
workflow = getToolByName(self, 'portal_workflow')
# overwrite saved context UID for event subscriber
self.REQUEST['context_uid'] = self.UID()
workflow.doActionFor(analysis, 'unassign')
# Note: subscriber might unassign the AR and/or promote the worksheet
# remove analysis from context.Analyses *after* unassign,
# (doActionFor requires worksheet in analysis.getBackReferences)
Analyses = self.getAnalyses()
if analysis in Analyses:
Analyses.remove(analysis)
self.setAnalyses(Analyses)
layout = [slot for slot in self.getLayout() if slot['analysis_uid'] != analysis.UID()]
self.setLayout(layout)
if analysis.portal_type == "DuplicateAnalysis":
self._delObject(analysis.id)
def addReferences(self, position, reference, service_uids):
""" Add reference analyses to reference, and add to worksheet layout
"""
workflow = getToolByName(self, 'portal_workflow')
rc = getToolByName(self, REFERENCE_CATALOG)
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
ref_type = reference.getBlank() and 'b' or 'c'
ref_uid = reference.UID()
if position == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
position = highest_existing_position + 1
postfix = 1
for refa in reference.getReferenceAnalyses():
grid = refa.getReferenceAnalysesGroupID()
try:
cand = int(grid.split('-')[2])
if cand >= postfix:
postfix = cand + 1
except:
pass
postfix = str(postfix).zfill(int(3))
refgid = '%s-%s' % (reference.id, postfix)
for service_uid in service_uids:
# services with dependents don't belong in references
service = rc.lookupObject(service_uid)
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)
ref_analysis = rc.lookupObject(ref_uid)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
ref_analysis.setReferenceAnalysesGroupID(refgid)
ref_analysis.reindexObject(idxs=["getReferenceAnalysesGroupID"])
# copy the interimfields
if calc:
ref_analysis.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': position,
'type': ref_type,
'container_uid': reference.UID(),
'analysis_uid': ref_analysis.UID()}])
self.setAnalyses(
self.getAnalyses() + [ref_analysis, ])
workflow.doActionFor(ref_analysis, 'assign')
security.declareProtected(EditWorksheet, 'addDuplicateAnalyses')
def addDuplicateAnalyses(self, src_slot, dest_slot):
""" add duplicate analyses to worksheet
"""
rc = getToolByName(self, REFERENCE_CATALOG)
workflow = getToolByName(self, 'portal_workflow')
layout = self.getLayout()
wst = self.getWorksheetTemplate()
wstlayout = wst and wst.getLayout() or []
src_ar = [slot['container_uid'] for slot in layout if
slot['position'] == src_slot]
if src_ar:
src_ar = src_ar[0]
if not dest_slot or dest_slot == 'new':
highest_existing_position = len(wstlayout)
for pos in [int(slot['position']) for slot in layout]:
if pos > highest_existing_position:
highest_existing_position = pos
dest_slot = highest_existing_position + 1
src_analyses = [rc.lookupObject(slot['analysis_uid'])
for slot in layout if
int(slot['position']) == int(src_slot)]
dest_analyses = [rc.lookupObject(slot['analysis_uid']).getAnalysis().UID()
for slot in layout if
int(slot['position']) == int(dest_slot)]
refgid = None
for analysis in src_analyses:
if analysis.UID() in dest_analyses:
continue
# services with dependents don't belong in duplicates
service = analysis.getService()
calc = service.getCalculation()
if calc and calc.getDependentServices():
continue
service = analysis.getService()
_id = self._findUniqueId(service.getKeyword())
duplicate = _createObjectByType("DuplicateAnalysis", self, _id)
duplicate.setAnalysis(analysis)
# Set ReferenceAnalysesGroupID (same id for the analyses from
# the same Reference Sample and same Worksheet)
# https://github.com/bikalabs/Bika-LIMS/issues/931
if not refgid and not analysis.portal_type == 'ReferenceAnalysis':
part = analysis.getSamplePartition().id
dups = [an.getReferenceAnalysesGroupID()
for an in self.getAnalyses()
if an.portal_type == 'DuplicateAnalysis'
and an.getSamplePartition().id == part]
dups = list(set(dups))
postfix = dups and len(dups) + 1 or 1
postfix = str(postfix).zfill(int(2))
refgid = '%s-D%s' % (part, postfix)
duplicate.setReferenceAnalysesGroupID(refgid)
duplicate.reindexObject(idxs=["getReferenceAnalysesGroupID"])
duplicate.processForm()
if calc:
duplicate.setInterimFields(calc.getInterimFields())
self.setLayout(
self.getLayout() + [{'position': dest_slot,
'type': 'd',
'container_uid': analysis.aq_parent.UID(),
'analysis_uid': duplicate.UID()}, ]
)
self.setAnalyses(self.getAnalyses() + [duplicate, ])
workflow.doActionFor(duplicate, 'assign')
# In case there are more than one analyses for an 'analysis_uid'
# https://jira.bikalabs.com/browse/LIMS-1745
break
def applyWorksheetTemplate(self, wst):
""" Add analyses to worksheet according to wst's layout.
Will not overwrite slots which are filled already.
If the selected template has an instrument assigned, it will
only be applied to those analyses for which the instrument
is allowed
"""
rc = getToolByName(self, REFERENCE_CATALOG)
bac = getToolByName(self, "bika_analysis_catalog")
bc = getToolByName(self, 'bika_catalog')
layout = self.getLayout()
wstlayout = wst.getLayout()
services = wst.getService()
wst_service_uids = [s.UID() for s in services]
analyses = bac(portal_type='Analysis',
getServiceUID=wst_service_uids,
review_state='sample_received',
worksheetanalysis_review_state='unassigned',
cancellation_state = 'active')
sortedans = []
for an in analyses:
sortedans.append({'uid': an.UID,
'duedate': an.getObject().getDueDate() or (DateTime() + 365),
'brain': an});
sortedans.sort(key=itemgetter('duedate'), reverse=False)
# collect analyses from the first X ARs.
ar_analyses = {} # ar_uid : [analyses]
ars = [] # for sorting
wst_slots = [row['pos'] for row in wstlayout if row['type'] == 'a']
ws_slots = [row['position'] for row in layout if row['type'] == 'a']
nr_slots = len(wst_slots) - len(ws_slots)
instr = self.getInstrument() if self.getInstrument() else wst.getInstrument()
for analysis in sortedans:
analysis = analysis['brain']
if instr and analysis.getObject().isInstrumentAllowed(instr) == False:
# Exclude those analyses for which the ws selected
# instrument is not allowed
continue
ar = analysis.getRequestID
if ar in ar_analyses:
ar_analyses[ar].append(analysis.getObject())
else:
if len(ar_analyses.keys()) < nr_slots:
ars.append(ar)
ar_analyses[ar] = [analysis.getObject(), ]
positions = [pos for pos in wst_slots if pos not in ws_slots]
for ar in ars:
for analysis in ar_analyses[ar]:
self.addAnalysis(analysis, position=positions[ars.index(ar)])
# find best maching reference samples for Blanks and Controls
for t in ('b', 'c'):
form_key = t == 'b' and 'blank_ref' or 'control_ref'
ws_slots = [row['position'] for row in layout if row['type'] == t]
for row in [r for r in wstlayout if
r['type'] == t and r['pos'] not in ws_slots]:
reference_definition_uid = row[form_key]
samples = bc(portal_type='ReferenceSample',
review_state='current',
inactive_state='active',
getReferenceDefinitionUID=reference_definition_uid)
if not samples:
break
samples = [s.getObject() for s in samples]
if t == 'b':
samples = [s for s in samples if s.getBlank()]
else:
samples = [s for s in samples if not s.getBlank()]
complete_reference_found = False
references = {}
for reference in samples:
reference_uid = reference.UID()
references[reference_uid] = {}
references[reference_uid]['services'] = []
references[reference_uid]['count'] = 0
specs = reference.getResultsRangeDict()
for service_uid in wst_service_uids:
if service_uid in specs:
references[reference_uid]['services'].append(service_uid)
references[reference_uid]['count'] += 1
if references[reference_uid]['count'] == len(wst_service_uids):
complete_reference_found = True
break
if complete_reference_found:
supported_uids = wst_service_uids
self.addReferences(int(row['pos']),
reference,
supported_uids)
else:
# find the most complete reference sample instead
reference_keys = references.keys()
no_of_services = 0
reference = None
for key in reference_keys:
if references[key]['count'] > no_of_services:
no_of_services = references[key]['count']
reference = key
if reference:
reference = rc.lookupObject(reference)
supported_uids = [s.UID() for s in reference.getServices()
if s.UID() in wst_service_uids]
self.addReferences(int(row['pos']),
reference,
supported_uids)
# fill duplicate positions
layout = self.getLayout()
ws_slots = [row['position'] for row in layout if row['type'] == 'd']
for row in [r for r in wstlayout if
r['type'] == 'd' and r['pos'] not in ws_slots]:
dest_pos = int(row['pos'])
src_pos = int(row['dup'])
if src_pos in [int(slot['position']) for slot in layout]:
self.addDuplicateAnalyses(src_pos, dest_pos)
# Apply the wst instrument to all analyses and ws
if instr:
self.setInstrument(instr, True)
def exportAnalyses(self, REQUEST=None, RESPONSE=None):
""" Export analyses from this worksheet """
import bika.lims.InstrumentExport as InstrumentExport
instrument = REQUEST.form['getInstrument']
try:
func = getattr(InstrumentExport, "%s_export" % instrument)
except:
return
func(self, REQUEST, RESPONSE)
return
security.declarePublic('getWorksheetServices')
def getWorksheetServices(self):
""" get list of analysis services present on this worksheet
"""
services = []
for analysis in self.getAnalyses():
service = analysis.getService()
if service not in services:
services.append(service)
return services
security.declareProtected(EditWorksheet, 'resequenceWorksheet')
def resequenceWorksheet(self, REQUEST=None, RESPONSE=None):
""" Reset the sequence of analyses in the worksheet """
""" sequence is [{'pos': , 'type': , 'uid', 'key'},] """
old_seq = self.getLayout()
new_dict = {}
new_seq = []
other_dict = {}
for seq in old_seq:
if seq['key'] == '':
if seq['pos'] not in other_dict:
other_dict[seq['pos']] = []
other_dict[seq['pos']].append(seq)
continue
if seq['key'] not in new_dict:
new_dict[seq['key']] = []
analyses = new_dict[seq['key']]
analyses.append(seq)
new_dict[seq['key']] = analyses
new_keys = sorted(new_dict.keys())
rc = getToolByName(self, REFERENCE_CATALOG)
seqno = 1
for key in new_keys:
analyses = {}
if len(new_dict[key]) == 1:
new_dict[key][0]['pos'] = seqno
new_seq.append(new_dict[key][0])
else:
for item in new_dict[key]:
item['pos'] = seqno
analysis = rc.lookupObject(item['uid'])
service = analysis.Title()
analyses[service] = item
a_keys = sorted(analyses.keys())
for a_key in a_keys:
new_seq.append(analyses[a_key])
seqno += 1
other_keys = other_dict.keys()
other_keys.sort()
for other_key in other_keys:
for item in other_dict[other_key]:
item['pos'] = seqno
new_seq.append(item)
seqno += 1
self.setLayout(new_seq)
RESPONSE.redirect('%s/manage_results' % self.absolute_url())
security.declarePublic('current_date')
def current_date(self):
""" return current date """
return DateTime()
def setInstrument(self, instrument, override_analyses=False):
""" Sets the specified instrument to the Analysis from the
Worksheet. Only sets the instrument if the Analysis
allows it, according to its Analysis Service and Method.
If an analysis has already assigned an instrument, it won't
be overriden.
The Analyses that don't allow the instrument specified will
not be modified.
Returns the number of analyses affected
"""
analyses = [an for an in self.getAnalyses()
if (not an.getInstrument() or override_analyses)
and an.isInstrumentAllowed(instrument)]
total = 0
for an in analyses:
# An analysis can be done using differents Methods.
# Un method can be supported by more than one Instrument,
# but not all instruments support one method.
# We must force to set the instrument's method too. Otherwise,
# the WS manage results view will display the an's default
# method and its instruments displaying, only the instruments
# for the default method in the picklist.
meth = instrument.getMethod()
if an.isMethodAllowed(meth):
an.setMethod(meth)
success = an.setInstrument(instrument)
if success is True:
total += 1
self.getField('Instrument').set(self, instrument)
return total
def workflow_script_submit(self):
# Don't cascade. Shouldn't be submitting WSs directly for now,
# except edge cases where all analyses are already submitted,
# but self was held back until an analyst was assigned.
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
can_attach = True
for a in self.getAnalyses():
if workflow.getInfoFor(a, 'review_state') in \
('to_be_sampled', 'to_be_preserved', 'sample_due',
'sample_received', 'attachment_due', 'assigned',):
# Note: referenceanalyses and duplicateanalyses can still
# have review_state = "assigned".
can_attach = False
break
if can_attach:
doActionFor(self, 'attach')
def workflow_script_attach(self):
if skip(self, "attach"):
return
self.reindexObject(idxs=["review_state", ])
# Don't cascade. Shouldn't be attaching WSs for now (if ever).
return
def workflow_script_retract(self):
if skip(self, "retract"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "retract all analyses" in self.REQUEST['workflow_skiplist']:
# retract all analyses in this self.
# (NB: don't retract if it's verified)
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state not in ('attachment_due', 'to_be_verified',):
continue
doActionFor(analysis, 'retract')
def workflow_script_verify(self):
if skip(self, "verify"):
return
workflow = getToolByName(self, 'portal_workflow')
self.reindexObject(idxs=["review_state", ])
if not "verify all analyses" in self.REQUEST['workflow_skiplist']:
# verify all analyses in this self.
analyses = self.getAnalyses()
for analysis in analyses:
state = workflow.getInfoFor(analysis, 'review_state', '')
if state != 'to_be_verified':
continue
doActionFor(analysis, "verify")
def workflow_script_reject(self):
"""Copy real analyses to RejectAnalysis, with link to real
create a new worksheet, with the original analyses, and new
duplicates and references to match the rejected
worksheet.
"""
if skip(self, "reject"):
return
utils = getToolByName(self, 'plone_utils')
workflow = self.portal_workflow
def copy_src_fields_to_dst(src, dst):
# These will be ignored when copying field values between analyses
ignore_fields = ['UID',
'id',
'title',
'allowDiscussion',
'subject',
'description',
'location',
'contributors',
'creators',
'effectiveDate',
'expirationDate',
'language',
'rights',
'creation_date',
'modification_date',
'Layout', # ws
'Analyses', # ws
]
fields = src.Schema().fields()
for field in fields:
fieldname = field.getName()
if fieldname in ignore_fields:
continue
getter = getattr(src, 'get'+fieldname,
src.Schema().getField(fieldname).getAccessor(src))
setter = getattr(dst, 'set'+fieldname,
dst.Schema().getField(fieldname).getMutator(dst))
if getter is None or setter is None:
# ComputedField
continue
setter(getter())
analysis_positions = {}
for item in self.getLayout():
analysis_positions[item['analysis_uid']] = item['position']
old_layout = []
new_layout = []
# New worksheet
worksheets = self.aq_parent
new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
new_ws.unmarkCreationFlag()
new_ws_id = renameAfterCreation(new_ws)
copy_src_fields_to_dst(self, new_ws)
new_ws.edit(
Number = new_ws_id,
Remarks = self.getRemarks()
)
# Objects are being created inside other contexts, but we want their
# workflow handlers to be aware of which worksheet this is occurring in.
# We save the worksheet in request['context_uid'].
# We reset it again below.... be very sure that this is set to the
# UID of the containing worksheet before invoking any transitions on
# analyses.
self.REQUEST['context_uid'] = new_ws.UID()
# loop all analyses
analyses = self.getAnalyses()
new_ws_analyses = []
old_ws_analyses = []
for analysis in analyses:
# Skip published or verified analyses
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state in ['published', 'verified', 'retracted']:
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
continue
# Normal analyses:
# - Create matching RejectAnalysis inside old WS
# - Link analysis to new WS in same position
# - Copy all field values
# - Clear analysis result, and set Retested flag
if analysis.portal_type == 'Analysis':
reject = _createObjectByType('RejectAnalysis', self, tmpID())
reject.unmarkCreationFlag()
reject_id = renameAfterCreation(reject)
copy_src_fields_to_dst(analysis, reject)
reject.setAnalysis(analysis)
reject.reindexObject()
analysis.edit(
Result = None,
Retested = True,
)
analysis.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(reject.UID())
old_layout.append({'position': position,
'type':'r',
'analysis_uid':reject.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(analysis.UID())
new_layout.append({'position': position,
'type':'a',
'analysis_uid':analysis.UID(),
'container_uid':analysis.aq_parent.UID()})
# Reference analyses
# - Create a new reference analysis in the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'ReferenceAnalysis':
service_uid = analysis.getService().UID()
reference = analysis.aq_parent
reference_type = analysis.getReferenceType()
new_analysis_uid = reference.addReferenceAnalysis(service_uid,
reference_type)
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':reference_type,
'analysis_uid':analysis.UID(),
'container_uid':reference.UID()})
new_ws_analyses.append(new_analysis_uid)
new_layout.append({'position': position,
'type':reference_type,
'analysis_uid':new_analysis_uid,
'container_uid':reference.UID()})
workflow.doActionFor(analysis, 'reject')
new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
workflow.doActionFor(new_reference, 'assign')
analysis.reindexObject()
# Duplicate analyses
# - Create a new duplicate inside the new worksheet
# - Transition the original analysis to 'rejected' state
if analysis.portal_type == 'DuplicateAnalysis':
src_analysis = analysis.getAnalysis()
ar = src_analysis.aq_parent
service = src_analysis.getService()
duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
new_duplicate = _createObjectByType('DuplicateAnalysis',
new_ws, duplicate_id)
new_duplicate.unmarkCreationFlag()
copy_src_fields_to_dst(analysis, new_duplicate)
workflow.doActionFor(new_duplicate, 'assign')
new_duplicate.reindexObject()
position = analysis_positions[analysis.UID()]
old_ws_analyses.append(analysis.UID())
old_layout.append({'position': position,
'type':'d',
'analysis_uid':analysis.UID(),
'container_uid':self.UID()})
new_ws_analyses.append(new_duplicate.UID())
new_layout.append({'position': position,
'type':'d',
'analysis_uid':new_duplicate.UID(),
'container_uid':new_ws.UID()})
workflow.doActionFor(analysis, 'reject')
analysis.reindexObject()
new_ws.setAnalyses(new_ws_analyses)
new_ws.setLayout(new_layout)
new_ws.replaces_rejected_worksheet = self.UID()
for analysis in new_ws.getAnalyses():
review_state = workflow.getInfoFor(analysis, 'review_state', '')
if review_state == 'to_be_verified':
changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
self.REQUEST['context_uid'] = self.UID()
self.setLayout(old_layout)
self.setAnalyses(old_ws_analyses)
self.replaced_by = new_ws.UID()
def checkUserManage(self):
""" Checks if the current user has granted access to this worksheet
and if has also privileges for managing it.
"""
granted = False
can_access = self.checkUserAccess()
if can_access == True:
pm = getToolByName(self, 'portal_membership')
edit_allowed = pm.checkPermission(EditWorksheet, self)
if edit_allowed:
# Check if the current user is the WS's current analyst
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
# Has management privileges?
if pm.checkPermission(ManageWorksheets, self):
granted = True
else:
granted = True
return granted
def checkUserAccess(self):
""" Checks if the current user has granted access to this worksheet.
Returns False if the user has no access, otherwise returns True
"""
# Deny access to foreign analysts
allowed = True
pm = getToolByName(self, "portal_membership")
member = pm.getAuthenticatedMember()
analyst = self.getAnalyst().strip()
if analyst != _c(member.getId()):
roles = member.getRoles()
restrict = 'Manager' not in roles \
and 'LabManager' not in roles \
and 'LabClerk' not in roles \
and 'RegulatoryInspector' not in roles \
and self.bika_setup.getRestrictWorksheetUsersAccess()
allowed = not restrict
return allowed
def setAnalyst(self,analyst):
for analysis in self.getAnalyses():
analysis.setAnalyst(analyst)
self.Schema().getField('Analyst').set(self, analyst)
security.declarePublic('getPriority')
def getPriority(self):
""" get highest priority from all analyses
"""
analyses = self.getAnalyses()
priorities = []
for analysis in analyses:
if not hasattr(analysis, 'getPriority'):
continue
if analysis.getPriority():
priorities.append(analysis.getPriority())
priorities = sorted(priorities, key = itemgetter('sortKey'))
if priorities:
return priorities[-1]
registerType(Worksheet, PROJECTNAME)
| sciCloud/OLiMS | lims/content/worksheet.py | Python | agpl-3.0 | 37,347 |
"""A moving-window filter for smoothing the signals within certain time interval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
@gin.configurable
class TimeBasedMovingWindowFilter:
"""A moving-window filter for smoothing the signals within certain time interval."""
def __init__(
self,
filter_window: float = 0.1,
):
"""Initializes the class.
Args:
filter_window: The filtering window (in time) used to smooth the input
signal.
"""
self._filter_window = filter_window
self.reset()
def reset(self):
self._timestamp_buffer = []
self._value_buffer = []
def calculate_average(self, new_value, timestamp):
"""Compute the filtered signals based on the time-based moving window."""
self._timestamp_buffer.append(timestamp)
self._value_buffer.append(new_value)
while len(self._value_buffer) > 1:
if self._timestamp_buffer[
0] < timestamp - self._filter_window:
self._timestamp_buffer.pop(0)
self._value_buffer.pop(0)
else:
break
return np.mean(self._value_buffer, axis=0)
| nrz/ylikuutio | external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/time_based_moving_window_filter.py | Python | agpl-3.0 | 1,200 |
# -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).
from odoo.exceptions import UserError
class ConnectionFailedError(UserError):
pass
class ConnectionSuccessError(UserError):
pass
| thinkopensolutions/server-tools | base_external_dbsource/exceptions.py | Python | agpl-3.0 | 260 |
"""This file implements the gym environment of minitaur.
"""
import math
import time
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import pybullet
from pybullet_utils import bullet_client as bc
import pybullet_data
from pybullet_envs.minitaur.envs import minitaur
from pybullet_envs.minitaur.envs import minitaur_derpy
from pybullet_envs.minitaur.envs import minitaur_logging
from pybullet_envs.minitaur.envs import minitaur_logging_pb2
from pybullet_envs.minitaur.envs import minitaur_rainbow_dash
from pybullet_envs.minitaur.envs import motor
from pkg_resources import parse_version
NUM_MOTORS = 8
MOTOR_ANGLE_OBSERVATION_INDEX = 0
MOTOR_VELOCITY_OBSERVATION_INDEX = MOTOR_ANGLE_OBSERVATION_INDEX + NUM_MOTORS
MOTOR_TORQUE_OBSERVATION_INDEX = MOTOR_VELOCITY_OBSERVATION_INDEX + NUM_MOTORS
BASE_ORIENTATION_OBSERVATION_INDEX = MOTOR_TORQUE_OBSERVATION_INDEX + NUM_MOTORS
ACTION_EPS = 0.01
OBSERVATION_EPS = 0.01
RENDER_HEIGHT = 360
RENDER_WIDTH = 480
SENSOR_NOISE_STDDEV = minitaur.SENSOR_NOISE_STDDEV
DEFAULT_URDF_VERSION = "default"
DERPY_V0_URDF_VERSION = "derpy_v0"
RAINBOW_DASH_V0_URDF_VERSION = "rainbow_dash_v0"
NUM_SIMULATION_ITERATION_STEPS = 300
MINIATUR_URDF_VERSION_MAP = {
DEFAULT_URDF_VERSION: minitaur.Minitaur,
DERPY_V0_URDF_VERSION: minitaur_derpy.MinitaurDerpy,
RAINBOW_DASH_V0_URDF_VERSION: minitaur_rainbow_dash.MinitaurRainbowDash,
}
def convert_to_list(obj):
try:
iter(obj)
return obj
except TypeError:
return [obj]
class MinitaurGymEnv(gym.Env):
"""The gym environment for the minitaur.
It simulates the locomotion of a minitaur, a quadruped robot. The state space
include the angles, velocities and torques for all the motors and the action
space is the desired motor angle for each motor. The reward function is based
on how far the minitaur walks in 1000 steps and penalizes the energy
expenditure.
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 100}
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
urdf_version=None,
distance_weight=1.0,
energy_weight=0.005,
shake_weight=0.0,
drift_weight=0.0,
distance_limit=float("inf"),
observation_noise_stdev=SENSOR_NOISE_STDDEV,
self_collision_enabled=True,
motor_velocity_limit=np.inf,
pd_control_enabled=False,
leg_model_enabled=True,
accurate_motor_model_enabled=False,
remove_default_joint_damping=False,
motor_kp=1.0,
motor_kd=0.02,
control_latency=0.0,
pd_latency=0.0,
torque_control_enabled=False,
motor_overheat_protection=False,
hard_reset=True,
on_rack=False,
render=False,
num_steps_to_log=1000,
action_repeat=1,
control_time_step=None,
env_randomizer=None,
forward_reward_cap=float("inf"),
reflection=True,
log_path=None):
"""Initialize the minitaur gym environment.
Args:
urdf_root: The path to the urdf data folder.
urdf_version: [DEFAULT_URDF_VERSION, DERPY_V0_URDF_VERSION,
RAINBOW_DASH_V0_URDF_VERSION] are allowable
versions. If None, DEFAULT_URDF_VERSION is used. DERPY_V0_URDF_VERSION
is the result of first pass system identification for derpy.
We will have a different URDF and related Minitaur class each time we
perform system identification. While the majority of the code of the
class remains the same, some code changes (e.g. the constraint location
might change). __init__() will choose the right Minitaur class from
different minitaur modules based on
urdf_version.
distance_weight: The weight of the distance term in the reward.
energy_weight: The weight of the energy term in the reward.
shake_weight: The weight of the vertical shakiness term in the reward.
drift_weight: The weight of the sideways drift term in the reward.
distance_limit: The maximum distance to terminate the episode.
observation_noise_stdev: The standard deviation of observation noise.
self_collision_enabled: Whether to enable self collision in the sim.
motor_velocity_limit: The velocity limit of each motor.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
accurate_motor_model_enabled: Whether to use the accurate DC motor model.
remove_default_joint_damping: Whether to remove the default joint damping.
motor_kp: proportional gain for the accurate motor model.
motor_kd: derivative gain for the accurate motor model.
control_latency: It is the delay in the controller between when an
observation is made at some point, and when that reading is reported
back to the Neural Network.
pd_latency: latency of the PD controller loop. PD calculates PWM based on
the motor angle and velocity. The latency measures the time between when
the motor angle and velocity are observed on the microcontroller and
when the true state happens on the motor. It is typically (0.001-
0.002s).
torque_control_enabled: Whether to use the torque control, if set to
False, pose control will be used.
motor_overheat_protection: Whether to shutdown the motor that has exerted
large torque (OVERHEAT_SHUTDOWN_TORQUE) for an extended amount of time
(OVERHEAT_SHUTDOWN_TIME). See ApplyAction() in minitaur.py for more
details.
hard_reset: Whether to wipe the simulation and load everything when reset
is called. If set to false, reset just place the minitaur back to start
position and set its pose to initial configuration.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
render: Whether to render the simulation.
num_steps_to_log: The max number of control steps in one episode that will
be logged. If the number of steps is more than num_steps_to_log, the
environment will still be running, but only first num_steps_to_log will
be recorded in logging.
action_repeat: The number of simulation steps before actions are applied.
control_time_step: The time step between two successive control signals.
env_randomizer: An instance (or a list) of EnvRandomizer(s). An
EnvRandomizer may randomize the physical property of minitaur, change
the terrrain during reset(), or add perturbation forces during step().
forward_reward_cap: The maximum value that forward reward is capped at.
Disabled (Inf) by default.
log_path: The path to write out logs. For the details of logging, refer to
minitaur_logging.proto.
Raises:
ValueError: If the urdf_version is not supported.
"""
# Set up logging.
self._log_path = log_path
self.logging = minitaur_logging.MinitaurLogging(log_path)
# PD control needs smaller time step for stability.
if control_time_step is not None:
self.control_time_step = control_time_step
self._action_repeat = action_repeat
self._time_step = control_time_step / action_repeat
else:
# Default values for time step and action repeat
if accurate_motor_model_enabled or pd_control_enabled:
self._time_step = 0.002
self._action_repeat = 5
else:
self._time_step = 0.01
self._action_repeat = 1
self.control_time_step = self._time_step * self._action_repeat
# TODO(b/73829334): Fix the value of self._num_bullet_solver_iterations.
self._num_bullet_solver_iterations = int(NUM_SIMULATION_ITERATION_STEPS / self._action_repeat)
self._urdf_root = urdf_root
self._self_collision_enabled = self_collision_enabled
self._motor_velocity_limit = motor_velocity_limit
self._observation = []
self._true_observation = []
self._objectives = []
self._objective_weights = [distance_weight, energy_weight, drift_weight, shake_weight]
self._env_step_counter = 0
self._num_steps_to_log = num_steps_to_log
self._is_render = render
self._last_base_position = [0, 0, 0]
self._distance_weight = distance_weight
self._energy_weight = energy_weight
self._drift_weight = drift_weight
self._shake_weight = shake_weight
self._distance_limit = distance_limit
self._observation_noise_stdev = observation_noise_stdev
self._action_bound = 1
self._pd_control_enabled = pd_control_enabled
self._leg_model_enabled = leg_model_enabled
self._accurate_motor_model_enabled = accurate_motor_model_enabled
self._remove_default_joint_damping = remove_default_joint_damping
self._motor_kp = motor_kp
self._motor_kd = motor_kd
self._torque_control_enabled = torque_control_enabled
self._motor_overheat_protection = motor_overheat_protection
self._on_rack = on_rack
self._cam_dist = 1.0
self._cam_yaw = 0
self._cam_pitch = -30
self._forward_reward_cap = forward_reward_cap
self._hard_reset = True
self._last_frame_time = 0.0
self._control_latency = control_latency
self._pd_latency = pd_latency
self._urdf_version = urdf_version
self._ground_id = None
self._reflection = reflection
self._env_randomizers = convert_to_list(env_randomizer) if env_randomizer else []
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
if self._is_render:
self._pybullet_client = bc.BulletClient(connection_mode=pybullet.GUI)
else:
self._pybullet_client = bc.BulletClient()
if self._urdf_version is None:
self._urdf_version = DEFAULT_URDF_VERSION
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self.seed()
self.reset()
observation_high = (self._get_observation_upper_bound() + OBSERVATION_EPS)
observation_low = (self._get_observation_lower_bound() - OBSERVATION_EPS)
action_dim = NUM_MOTORS
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high)
self.observation_space = spaces.Box(observation_low, observation_high)
self.viewer = None
self._hard_reset = hard_reset # This assignment need to be after reset()
def close(self):
if self._env_step_counter > 0:
self.logging.save_episode(self._episode_proto)
self.minitaur.Terminate()
def add_env_randomizer(self, env_randomizer):
self._env_randomizers.append(env_randomizer)
def reset(self, initial_motor_angles=None, reset_duration=1.0):
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 0)
if self._env_step_counter > 0:
self.logging.save_episode(self._episode_proto)
self._episode_proto = minitaur_logging_pb2.MinitaurEpisode()
minitaur_logging.preallocate_episode_proto(self._episode_proto, self._num_steps_to_log)
if self._hard_reset:
self._pybullet_client.resetSimulation()
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=int(self._num_bullet_solver_iterations))
self._pybullet_client.setTimeStep(self._time_step)
self._ground_id = self._pybullet_client.loadURDF("%s/plane.urdf" % self._urdf_root)
if (self._reflection):
self._pybullet_client.changeVisualShape(self._ground_id, -1, rgbaColor=[1, 1, 1, 0.8])
self._pybullet_client.configureDebugVisualizer(
self._pybullet_client.COV_ENABLE_PLANAR_REFLECTION, self._ground_id)
self._pybullet_client.setGravity(0, 0, -10)
acc_motor = self._accurate_motor_model_enabled
motor_protect = self._motor_overheat_protection
if self._urdf_version not in MINIATUR_URDF_VERSION_MAP:
raise ValueError("%s is not a supported urdf_version." % self._urdf_version)
else:
self.minitaur = (MINIATUR_URDF_VERSION_MAP[self._urdf_version](
pybullet_client=self._pybullet_client,
action_repeat=self._action_repeat,
urdf_root=self._urdf_root,
time_step=self._time_step,
self_collision_enabled=self._self_collision_enabled,
motor_velocity_limit=self._motor_velocity_limit,
pd_control_enabled=self._pd_control_enabled,
accurate_motor_model_enabled=acc_motor,
remove_default_joint_damping=self._remove_default_joint_damping,
motor_kp=self._motor_kp,
motor_kd=self._motor_kd,
control_latency=self._control_latency,
pd_latency=self._pd_latency,
observation_noise_stdev=self._observation_noise_stdev,
torque_control_enabled=self._torque_control_enabled,
motor_overheat_protection=motor_protect,
on_rack=self._on_rack))
self.minitaur.Reset(reload_urdf=False,
default_motor_angles=initial_motor_angles,
reset_time=reset_duration)
# Loop over all env randomizers.
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_env(self)
self._pybullet_client.setPhysicsEngineParameter(enableConeFriction=0)
self._env_step_counter = 0
self._last_base_position = [0, 0, 0]
self._objectives = []
self._pybullet_client.resetDebugVisualizerCamera(self._cam_dist, self._cam_yaw,
self._cam_pitch, [0, 0, 0])
self._pybullet_client.configureDebugVisualizer(self._pybullet_client.COV_ENABLE_RENDERING, 1)
return self._get_observation()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self.minitaur.ConvertFromLegModel(action)
return action
def step(self, action):
"""Step forward the simulation, given the action.
Args:
action: A list of desired motor angles for eight motors.
Returns:
observations: The angles, velocities and torques of all motors.
reward: The reward for the current state-action pair.
done: Whether the episode has ended.
info: A dictionary that stores diagnostic information.
Raises:
ValueError: The action dimension is not the same as the number of motors.
ValueError: The magnitude of actions is out of bounds.
"""
self._last_base_position = self.minitaur.GetBasePosition()
if self._is_render:
# Sleep, otherwise the computation takes less time than real time,
# which will make the visualization like a fast-forward video.
time_spent = time.time() - self._last_frame_time
self._last_frame_time = time.time()
time_to_sleep = self.control_time_step - time_spent
if time_to_sleep > 0:
time.sleep(time_to_sleep)
base_pos = self.minitaur.GetBasePosition()
# Keep the previous orientation of the camera set by the user.
[yaw, pitch, dist] = self._pybullet_client.getDebugVisualizerCamera()[8:11]
self._pybullet_client.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)
for env_randomizer in self._env_randomizers:
env_randomizer.randomize_step(self)
action = self._transform_action_to_motor_command(action)
self.minitaur.Step(action)
reward = self._reward()
done = self._termination()
if self._log_path is not None:
minitaur_logging.update_episode_proto(self._episode_proto, self.minitaur, action,
self._env_step_counter)
self._env_step_counter += 1
if done:
self.minitaur.Terminate()
return np.array(self._get_observation()), reward, done, {}
def render(self, mode="rgb_array", close=False):
if mode != "rgb_array":
return np.array([])
base_pos = self.minitaur.GetBasePosition()
view_matrix = self._pybullet_client.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._pybullet_client.computeProjectionMatrixFOV(fov=60,
aspect=float(RENDER_WIDTH) /
RENDER_HEIGHT,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._pybullet_client.getCameraImage(
width=RENDER_WIDTH,
height=RENDER_HEIGHT,
renderer=self._pybullet_client.ER_BULLET_HARDWARE_OPENGL,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def get_minitaur_motor_angles(self):
"""Get the minitaur's motor angles.
Returns:
A numpy array of motor angles.
"""
return np.array(self._observation[MOTOR_ANGLE_OBSERVATION_INDEX:MOTOR_ANGLE_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_motor_velocities(self):
"""Get the minitaur's motor velocities.
Returns:
A numpy array of motor velocities.
"""
return np.array(
self._observation[MOTOR_VELOCITY_OBSERVATION_INDEX:MOTOR_VELOCITY_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_motor_torques(self):
"""Get the minitaur's motor torques.
Returns:
A numpy array of motor torques.
"""
return np.array(
self._observation[MOTOR_TORQUE_OBSERVATION_INDEX:MOTOR_TORQUE_OBSERVATION_INDEX +
NUM_MOTORS])
def get_minitaur_base_orientation(self):
"""Get the minitaur's base orientation, represented by a quaternion.
Returns:
A numpy array of minitaur's orientation.
"""
return np.array(self._observation[BASE_ORIENTATION_OBSERVATION_INDEX:])
def is_fallen(self):
"""Decide whether the minitaur has fallen.
If the up directions between the base and the world is larger (the dot
product is smaller than 0.85) or the base is very low on the ground
(the height is smaller than 0.13 meter), the minitaur is considered fallen.
Returns:
Boolean value that indicates whether the minitaur has fallen.
"""
orientation = self.minitaur.GetBaseOrientation()
rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)
local_up = rot_mat[6:]
pos = self.minitaur.GetBasePosition()
return (np.dot(np.asarray([0, 0, 1]), np.asarray(local_up)) < 0.85 or pos[2] < 0.13)
def _termination(self):
position = self.minitaur.GetBasePosition()
distance = math.sqrt(position[0]**2 + position[1]**2)
return self.is_fallen() or distance > self._distance_limit
def _reward(self):
current_base_position = self.minitaur.GetBasePosition()
forward_reward = current_base_position[0] - self._last_base_position[0]
# Cap the forward reward if a cap is set.
forward_reward = min(forward_reward, self._forward_reward_cap)
# Penalty for sideways translation.
drift_reward = -abs(current_base_position[1] - self._last_base_position[1])
# Penalty for sideways rotation of the body.
orientation = self.minitaur.GetBaseOrientation()
rot_matrix = pybullet.getMatrixFromQuaternion(orientation)
local_up_vec = rot_matrix[6:]
shake_reward = -abs(np.dot(np.asarray([1, 1, 0]), np.asarray(local_up_vec)))
energy_reward = -np.abs(
np.dot(self.minitaur.GetMotorTorques(),
self.minitaur.GetMotorVelocities())) * self._time_step
objectives = [forward_reward, energy_reward, drift_reward, shake_reward]
weighted_objectives = [o * w for o, w in zip(objectives, self._objective_weights)]
reward = sum(weighted_objectives)
self._objectives.append(objectives)
return reward
def get_objectives(self):
return self._objectives
@property
def objective_weights(self):
"""Accessor for the weights for all the objectives.
Returns:
List of floating points that corresponds to weights for the objectives in
the order that objectives are stored.
"""
return self._objective_weights
def _get_observation(self):
"""Get observation of this environment, including noise and latency.
The minitaur class maintains a history of true observations. Based on the
latency, this function will find the observation at the right time,
interpolate if necessary. Then Gaussian noise is added to this observation
based on self.observation_noise_stdev.
Returns:
The noisy observation with latency.
"""
observation = []
observation.extend(self.minitaur.GetMotorAngles().tolist())
observation.extend(self.minitaur.GetMotorVelocities().tolist())
observation.extend(self.minitaur.GetMotorTorques().tolist())
observation.extend(list(self.minitaur.GetBaseOrientation()))
self._observation = observation
return self._observation
def _get_true_observation(self):
"""Get the observations of this environment.
It includes the angles, velocities, torques and the orientation of the base.
Returns:
The observation list. observation[0:8] are motor angles. observation[8:16]
are motor velocities, observation[16:24] are motor torques.
observation[24:28] is the orientation of the base, in quaternion form.
"""
observation = []
observation.extend(self.minitaur.GetTrueMotorAngles().tolist())
observation.extend(self.minitaur.GetTrueMotorVelocities().tolist())
observation.extend(self.minitaur.GetTrueMotorTorques().tolist())
observation.extend(list(self.minitaur.GetTrueBaseOrientation()))
self._true_observation = observation
return self._true_observation
def _get_observation_upper_bound(self):
"""Get the upper bound of the observation.
Returns:
The upper bound of an observation. See GetObservation() for the details
of each element of an observation.
"""
upper_bound = np.zeros(self._get_observation_dimension())
num_motors = self.minitaur.num_motors
upper_bound[0:num_motors] = math.pi # Joint angle.
upper_bound[num_motors:2 * num_motors] = (motor.MOTOR_SPEED_LIMIT) # Joint velocity.
upper_bound[2 * num_motors:3 * num_motors] = (motor.OBSERVED_TORQUE_LIMIT) # Joint torque.
upper_bound[3 * num_motors:] = 1.0 # Quaternion of base orientation.
return upper_bound
def _get_observation_lower_bound(self):
"""Get the lower bound of the observation."""
return -self._get_observation_upper_bound()
def _get_observation_dimension(self):
"""Get the length of the observation list.
Returns:
The length of the observation list.
"""
return len(self._get_observation())
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
_step = step
def set_time_step(self, control_step, simulation_step=0.001):
"""Sets the time step of the environment.
Args:
control_step: The time period (in seconds) between two adjacent control
actions are applied.
simulation_step: The simulation time step in PyBullet. By default, the
simulation step is 0.001s, which is a good trade-off between simulation
speed and accuracy.
Raises:
ValueError: If the control step is smaller than the simulation step.
"""
if control_step < simulation_step:
raise ValueError("Control step should be larger than or equal to simulation step.")
self.control_time_step = control_step
self._time_step = simulation_step
self._action_repeat = int(round(control_step / simulation_step))
self._num_bullet_solver_iterations = (NUM_SIMULATION_ITERATION_STEPS / self._action_repeat)
self._pybullet_client.setPhysicsEngineParameter(
numSolverIterations=self._num_bullet_solver_iterations)
self._pybullet_client.setTimeStep(self._time_step)
self.minitaur.SetTimeSteps(action_repeat=self._action_repeat, simulation_step=self._time_step)
@property
def pybullet_client(self):
return self._pybullet_client
@property
def ground_id(self):
return self._ground_id
@ground_id.setter
def ground_id(self, new_ground_id):
self._ground_id = new_ground_id
@property
def env_step_counter(self):
return self._env_step_counter
| nrz/ylikuutio | external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py | Python | agpl-3.0 | 25,350 |
# encoding: utf-8
from .OpeningHours import ParseException, OpeningHours
| ypid/pyopening_hours | pyopening_hours/__init__.py | Python | agpl-3.0 | 74 |
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
from collections import defaultdict
import os
import pickle
import time
import imp
from cerbero.config import CONFIG_DIR, Platform, Architecture, Distro,\
DistroVersion, License
from cerbero.build.build import BuildType
from cerbero.build.source import SourceType
from cerbero.errors import FatalError, RecipeNotFoundError, InvalidRecipeError
from cerbero.utils import _, shell, parse_file
from cerbero.utils import messages as m
from cerbero.build import recipe as crecipe
COOKBOOK_NAME = 'cookbook'
COOKBOOK_FILE = os.path.join(CONFIG_DIR, COOKBOOK_NAME)
class RecipeStatus (object):
'''
Stores the current build status of a L{cerbero.recipe.Recipe}
@ivar steps: list of steps currently done
@type steps: list
@ivar needs_build: whether the recipe needs to be build or not
True when all steps where successful
@type needs_build: bool
@ivar mtime: modification time of the recipe file, used to reset the
state when the recipe was modified
@type mtime: float
@ivar filepath: recipe's file path
@type filepath: str
@ivar built_version: string with the last version built
@type built_version: str
@ivar file_hash: hash of the file with the recipe description
@type file_hash: int
'''
def __init__(self, filepath, steps=[], needs_build=True,
mtime=time.time(), built_version=None, file_hash=0):
self.steps = steps
self.needs_build = needs_build
self.mtime = mtime
self.filepath = filepath
self.built_version = built_version
self.file_hash = file_hash
def touch(self):
''' Touches the recipe updating its modification time '''
self.mtime = time.time()
def __repr__(self):
return "Steps: %r Needs Build: %r" % (self.steps, self.needs_build)
class CookBook (object):
'''
Stores a list of recipes and their build status saving it's state to a
cache file
@ivar recipes: dictionary with L{cerbero.recipe.Recipe} availables
@type recipes: dict
@ivar status: dictionary with the L{cerbero.cookbook.RecipeStatus}
@type status: dict
'''
RECIPE_EXT = '.recipe'
def __init__(self, config, load=True):
self.set_config(config)
self.recipes = {} # recipe_name -> recipe
self._mtimes = {}
if not load:
return
self._restore_cache()
if not os.path.exists(config.recipes_dir):
raise FatalError(_("Recipes dir %s not found") %
config.recipes_dir)
self.update()
def set_config(self, config):
'''
Set the configuration used
@param config: configuration used
@type config: L{cerbero.config.Config}
'''
self._config = config
def get_config(self):
'''
Gets the configuration used
@return: current configuration
@rtype: L{cerbero.config.Config}
'''
return self._config
def set_status(self, status):
'''
Sets the recipes status
@param status: the recipes status
@rtype: dict
'''
self.status = status
def update(self):
'''
Reloads the recipes list and updates the cookbook
'''
self._load_recipes()
self.save()
def get_recipes_list(self):
'''
Gets the list of recipes
@return: list of recipes
@rtype: list
'''
recipes = self.recipes.values()
recipes.sort(key=lambda x: x.name)
return recipes
def add_recipe(self, recipe):
'''
Adds a new recipe to the cookbook
@param recipe: the recipe to add
@type recipe: L{cerbero.build.cookbook.Recipe}
'''
self.recipes[recipe.name] = recipe
def get_recipe(self, name):
'''
Gets a recipe from its name
@param name: name of the recipe
@type name: str
'''
if name not in self.recipes:
raise RecipeNotFoundError(name)
return self.recipes[name]
def update_step_status(self, recipe_name, step):
'''
Updates the status of a recipe's step
@param recipe_name: name of the recipe
@type recipe: str
@param step: name of the step
@type step: str
'''
status = self._recipe_status(recipe_name)
status.steps.append(step)
status.touch()
self.status[recipe_name] = status
self.save()
def update_build_status(self, recipe_name, built_version):
'''
Updates the recipe's build status
@param recipe_name: name of the recipe
@type recipe_name: str
@param built_version: built version ir None to reset it
@type built_version: str
'''
status = self._recipe_status(recipe_name)
status.needs_build = built_version == None
status.built_version = built_version
status.touch()
self.status[recipe_name] = status
self.save()
def recipe_built_version (self, recipe_name):
'''
Get the las built version of a recipe from the build status
@param recipe_name: name of the recipe
@type recipe_name: str
'''
try:
return self._recipe_status(recipe_name).built_version
except:
return None
def step_done(self, recipe_name, step):
'''
Whether is step is done or not
@param recipe_name: name of the recipe
@type recipe_name: str
@param step: name of the step
@type step: bool
'''
return step in self._recipe_status(recipe_name).steps
def reset_recipe_status(self, recipe_name):
'''
Resets the build status of a recipe
@param recipe_name: name of the recipe
@type recipe_name: str
'''
if recipe_name in self.status:
del self.status[recipe_name]
self.save()
def recipe_needs_build(self, recipe_name):
'''
Whether a recipe needs to be build or not
@param recipe_name: name of the recipe
@type recipe_name: str
@return: True if the recipe needs to be build
@rtype: bool
'''
return self._recipe_status(recipe_name).needs_build
def list_recipe_deps(self, recipe_name):
'''
List the dependencies that needs to be built in the correct build
order for a recipe
@param recipe_name: name of the recipe
@type recipe_name: str
@return: list of L{cerbero.recipe.Recipe}
@rtype: list
'''
recipe = self.get_recipe(recipe_name)
return self._find_deps(recipe, {}, [])
def list_recipe_reverse_deps(self, recipe_name):
'''
List the dependencies that depends on this recipe
@param recipe_name: name of the recipe
@type recipe_name: str
@return: list of reverse dependencies L{cerbero.recipe.Recipe}
@rtype: list
'''
recipe = self.get_recipe(recipe_name)
return [r for r in self.recipes.values() if recipe.name in r.deps]
def _runtime_deps (self):
return [x.name for x in self.recipes.values() if x.runtime_dep]
def _cache_file(self, config):
if config.cache_file is not None:
return os.path.join(CONFIG_DIR, config.cache_file)
else:
return COOKBOOK_FILE
def _restore_cache(self):
try:
with open(self._cache_file(self.get_config()), 'rb') as f:
self.status = pickle.load(f)
except Exception:
self.status = {}
m.warning(_("Could not recover status"))
def save(self):
try:
cache_file = self._cache_file(self.get_config())
if not os.path.exists(os.path.dirname(cache_file)):
os.makedirs(os.path.dirname(cache_file))
with open(cache_file, 'wb') as f:
pickle.dump(self.status, f)
except IOError, ex:
m.warning(_("Could not cache the CookBook: %s") % ex)
def _find_deps(self, recipe, state={}, ordered=[]):
if state.get(recipe, 'clean') == 'processed':
return
if state.get(recipe, 'clean') == 'in-progress':
raise FatalError(_("Dependency Cycle"))
state[recipe] = 'in-progress'
recipe_deps = recipe.list_deps()
if not recipe.runtime_dep:
recipe_deps = self._runtime_deps () + recipe_deps
for recipe_name in recipe_deps:
try:
recipedep = self.get_recipe(recipe_name)
except RecipeNotFoundError, e:
raise FatalError(_("Recipe %s has a unknown dependency %s"
% (recipe.name, recipe_name)))
self._find_deps(recipedep, state, ordered)
state[recipe] = 'processed'
ordered.append(recipe)
return ordered
def _recipe_status(self, recipe_name):
recipe = self.get_recipe(recipe_name)
if recipe_name not in self.status:
filepath = None
if hasattr(recipe, '__file__'):
filepath = recipe.__file__
self.status[recipe_name] = RecipeStatus(filepath, steps=[],
file_hash=shell.file_hash(filepath))
return self.status[recipe_name]
def _load_recipes(self):
self.recipes = {}
recipes = defaultdict(dict)
recipes_repos = self._config.get_recipes_repos()
for reponame, (repodir, priority) in recipes_repos.iteritems():
recipes[int(priority)].update(self._load_recipes_from_dir(repodir))
# Add recipes by asceding pripority
for key in sorted(recipes.keys()):
self.recipes.update(recipes[key])
# Check for updates in the recipe file to reset the status
for recipe in self.recipes.values():
if recipe.name not in self.status:
continue
st = self.status[recipe.name]
# filepath attribute was added afterwards
if not hasattr(st, 'filepath') or not getattr(st, 'filepath'):
st.filepath = recipe.__file__
rmtime = os.path.getmtime(recipe.__file__)
if rmtime > st.mtime:
# The mtime is different, check the file hash now
# Use getattr as file_hash we added later
saved_hash = getattr(st, 'file_hash', 0)
if os.path.exists (st.filepath):
current_hash = shell.file_hash(st.filepath)
else:
current_hash = None
if saved_hash == current_hash:
# Update the status with the mtime
st.touch()
else:
self.reset_recipe_status(recipe.name)
def _load_recipes_from_dir(self, repo):
recipes = {}
recipes_files = shell.find_files('*%s' % self.RECIPE_EXT, repo)
recipes_files.extend(shell.find_files('*/*%s' % self.RECIPE_EXT, repo))
try:
custom = None
m_path = os.path.join(repo, 'custom.py')
if os.path.exists(m_path):
custom = imp.load_source('custom', m_path)
except Exception:
custom = None
for f in recipes_files:
# Try to load the custom.py module located in the recipes dir
# which can contain private classes to extend cerbero's recipes
# and reuse them in our private repository
try:
recipe = self._load_recipe_from_file(f, custom)
except RecipeNotFoundError:
m.warning(_("Could not found a valid recipe in %s") % f)
if recipe is None:
continue
recipes[recipe.name] = recipe
return recipes
def _load_recipe_from_file(self, filepath, custom=None):
mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
if self._config.target_arch == Architecture.UNIVERSAL:
if self._config.target_platform in [Platform.IOS, Platform.DARWIN]:
recipe = crecipe.UniversalFlatRecipe(self._config)
else:
recipe = crecipe.UniversalRecipe(self._config)
for c in self._config.arch_config.keys():
try:
d = {'Platform': Platform, 'Architecture': Architecture,
'BuildType': BuildType, 'SourceType': SourceType,
'Distro': Distro, 'DistroVersion': DistroVersion,
'License': License, 'recipe': crecipe, 'os': os,
'BuildSteps': crecipe.BuildSteps,
'InvalidRecipeError': InvalidRecipeError,
'FatalError': FatalError,
'custom': custom, '_': _, 'shell': shell}
parse_file(filepath, d)
conf = self._config.arch_config[c]
if self._config.target_arch == Architecture.UNIVERSAL:
if self._config.target_platform not in [Platform.IOS,
Platform.DARWIN]:
conf.prefix = os.path.join(self._config.prefix, c)
r = d['Recipe'](conf)
r.__file__ = os.path.abspath(filepath)
self._config.arch_config[c].do_setup_env()
r.prepare()
if self._config.target_arch == Architecture.UNIVERSAL:
recipe.add_recipe(r)
else:
return r
except InvalidRecipeError:
pass
except Exception, ex:
m.warning("Error loading recipe in file %s %s" %
(filepath, ex))
if self._config.target_arch == Architecture.UNIVERSAL:
if not recipe.is_empty():
return recipe
return None
| freedesktop-unofficial-mirror/gstreamer-sdk__cerbero | cerbero/build/cookbook.py | Python | lgpl-2.1 | 14,881 |
"""
Implementation of the XDG Menu Specification
http://standards.freedesktop.org/menu-spec/
Example code:
from xdg.Menu import parse, Menu, MenuEntry
def print_menu(menu, tab=0):
for submenu in menu.Entries:
if isinstance(submenu, Menu):
print ("\t" * tab) + unicode(submenu)
print_menu(submenu, tab+1)
elif isinstance(submenu, MenuEntry):
print ("\t" * tab) + unicode(submenu.DesktopEntry)
print_menu(parse())
"""
import os
import locale
import subprocess
import ast
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from xdg.BaseDirectory import xdg_data_dirs, xdg_config_dirs
from xdg.DesktopEntry import DesktopEntry
from xdg.Exceptions import ParsingError
from xdg.util import PY3
import xdg.Locale
import xdg.Config
def _strxfrm(s):
"""Wrapper around locale.strxfrm that accepts unicode strings on Python 2.
See Python bug #2481.
"""
if (not PY3) and isinstance(s, unicode):
s = s.encode('utf-8')
return locale.strxfrm(s)
DELETED = "Deleted"
NO_DISPLAY = "NoDisplay"
HIDDEN = "Hidden"
EMPTY = "Empty"
NOT_SHOW_IN = "NotShowIn"
NO_EXEC = "NoExec"
class Menu:
"""Menu containing sub menus under menu.Entries
Contains both Menu and MenuEntry items.
"""
def __init__(self):
# Public stuff
self.Name = ""
self.Directory = None
self.Entries = []
self.Doc = ""
self.Filename = ""
self.Depth = 0
self.Parent = None
self.NotInXml = False
# Can be True, False, DELETED, NO_DISPLAY, HIDDEN, EMPTY or NOT_SHOW_IN
self.Show = True
self.Visible = 0
# Private stuff, only needed for parsing
self.AppDirs = []
self.DefaultLayout = None
self.Deleted = None
self.Directories = []
self.DirectoryDirs = []
self.Layout = None
self.MenuEntries = []
self.Moves = []
self.OnlyUnallocated = None
self.Rules = []
self.Submenus = []
def __str__(self):
return self.Name
def __add__(self, other):
for dir in other.AppDirs:
self.AppDirs.append(dir)
for dir in other.DirectoryDirs:
self.DirectoryDirs.append(dir)
for directory in other.Directories:
self.Directories.append(directory)
if other.Deleted is not None:
self.Deleted = other.Deleted
if other.OnlyUnallocated is not None:
self.OnlyUnallocated = other.OnlyUnallocated
if other.Layout:
self.Layout = other.Layout
if other.DefaultLayout:
self.DefaultLayout = other.DefaultLayout
for rule in other.Rules:
self.Rules.append(rule)
for move in other.Moves:
self.Moves.append(move)
for submenu in other.Submenus:
self.addSubmenu(submenu)
return self
# FIXME: Performance: cache getName()
def __cmp__(self, other):
return locale.strcoll(self.getName(), other.getName())
def _key(self):
"""Key function for locale-aware sorting."""
return _strxfrm(self.getName())
def __lt__(self, other):
try:
other = other._key()
except AttributeError:
pass
return self._key() < other
def __eq__(self, other):
try:
return self.Name == unicode(other)
except NameError: # unicode() becomes str() in Python 3
return self.Name == str(other)
""" PUBLIC STUFF """
def getEntries(self, show_hidden=False):
"""Interator for a list of Entries visible to the user."""
for entry in self.Entries:
if show_hidden:
yield entry
elif entry.Show is True:
yield entry
# FIXME: Add searchEntry/seaqrchMenu function
# search for name/comment/genericname/desktopfileid
# return multiple items
def getMenuEntry(self, desktopfileid, deep=False):
"""Searches for a MenuEntry with a given DesktopFileID."""
for menuentry in self.MenuEntries:
if menuentry.DesktopFileID == desktopfileid:
return menuentry
if deep:
for submenu in self.Submenus:
submenu.getMenuEntry(desktopfileid, deep)
def getMenu(self, path):
"""Searches for a Menu with a given path."""
array = path.split("/", 1)
for submenu in self.Submenus:
if submenu.Name == array[0]:
if len(array) > 1:
return submenu.getMenu(array[1])
else:
return submenu
def getPath(self, org=False, toplevel=False):
"""Returns this menu's path in the menu structure."""
parent = self
names = []
while 1:
if org:
names.append(parent.Name)
else:
names.append(parent.getName())
if parent.Depth > 0:
parent = parent.Parent
else:
break
names.reverse()
path = ""
if not toplevel:
names.pop(0)
for name in names:
path = os.path.join(path, name)
return path
def getName(self):
"""Returns the menu's localised name."""
try:
return self.Directory.DesktopEntry.getName()
except AttributeError:
return self.Name
def getGenericName(self):
"""Returns the menu's generic name."""
try:
return self.Directory.DesktopEntry.getGenericName()
except AttributeError:
return ""
def getComment(self):
"""Returns the menu's comment text."""
try:
return self.Directory.DesktopEntry.getComment()
except AttributeError:
return ""
def getIcon(self):
"""Returns the menu's icon, filename or simple name"""
try:
return self.Directory.DesktopEntry.getIcon()
except AttributeError:
return ""
def sort(self):
self.Entries = []
self.Visible = 0
for submenu in self.Submenus:
submenu.sort()
_submenus = set()
_entries = set()
for order in self.Layout.order:
if order[0] == "Filename":
_entries.add(order[1])
elif order[0] == "Menuname":
_submenus.add(order[1])
for order in self.Layout.order:
if order[0] == "Separator":
separator = Separator(self)
if len(self.Entries) > 0 and isinstance(self.Entries[-1], Separator):
separator.Show = False
self.Entries.append(separator)
elif order[0] == "Filename":
menuentry = self.getMenuEntry(order[1])
if menuentry:
self.Entries.append(menuentry)
elif order[0] == "Menuname":
submenu = self.getMenu(order[1])
if submenu:
if submenu.Layout.inline:
self.merge_inline(submenu)
else:
self.Entries.append(submenu)
elif order[0] == "Merge":
if order[1] == "files" or order[1] == "all":
self.MenuEntries.sort()
for menuentry in self.MenuEntries:
if menuentry.DesktopFileID not in _entries:
self.Entries.append(menuentry)
elif order[1] == "menus" or order[1] == "all":
self.Submenus.sort()
for submenu in self.Submenus:
if submenu.Name not in _submenus:
if submenu.Layout.inline:
self.merge_inline(submenu)
else:
self.Entries.append(submenu)
# getHidden / NoDisplay / OnlyShowIn / NotOnlyShowIn / Deleted / NoExec
for entry in self.Entries:
entry.Show = True
self.Visible += 1
if isinstance(entry, Menu):
if entry.Deleted is True:
entry.Show = DELETED
self.Visible -= 1
elif isinstance(entry.Directory, MenuEntry):
if entry.Directory.DesktopEntry.getNoDisplay():
entry.Show = NO_DISPLAY
self.Visible -= 1
elif entry.Directory.DesktopEntry.getHidden():
entry.Show = HIDDEN
self.Visible -= 1
elif isinstance(entry, MenuEntry):
if entry.DesktopEntry.getNoDisplay():
entry.Show = NO_DISPLAY
self.Visible -= 1
elif entry.DesktopEntry.getHidden():
entry.Show = HIDDEN
self.Visible -= 1
elif entry.DesktopEntry.getTryExec() and not entry.DesktopEntry.findTryExec():
entry.Show = NO_EXEC
self.Visible -= 1
elif xdg.Config.windowmanager:
if (entry.DesktopEntry.OnlyShowIn != [] and (
xdg.Config.windowmanager not in entry.DesktopEntry.OnlyShowIn
)
) or (
xdg.Config.windowmanager in entry.DesktopEntry.NotShowIn
):
entry.Show = NOT_SHOW_IN
self.Visible -= 1
elif isinstance(entry, Separator):
self.Visible -= 1
# remove separators at the beginning and at the end
if len(self.Entries) > 0:
if isinstance(self.Entries[0], Separator):
self.Entries[0].Show = False
if len(self.Entries) > 1:
if isinstance(self.Entries[-1], Separator):
self.Entries[-1].Show = False
# show_empty tag
for entry in self.Entries[:]:
if isinstance(entry, Menu) and not entry.Layout.show_empty and entry.Visible == 0:
entry.Show = EMPTY
self.Visible -= 1
if entry.NotInXml is True:
self.Entries.remove(entry)
""" PRIVATE STUFF """
def addSubmenu(self, newmenu):
for submenu in self.Submenus:
if submenu == newmenu:
submenu += newmenu
break
else:
self.Submenus.append(newmenu)
newmenu.Parent = self
newmenu.Depth = self.Depth + 1
# inline tags
def merge_inline(self, submenu):
"""Appends a submenu's entries to this menu
See the <Menuname> section of the spec about the "inline" attribute
"""
if len(submenu.Entries) == 1 and submenu.Layout.inline_alias:
menuentry = submenu.Entries[0]
menuentry.DesktopEntry.set("Name", submenu.getName(), locale=True)
menuentry.DesktopEntry.set("GenericName", submenu.getGenericName(), locale=True)
menuentry.DesktopEntry.set("Comment", submenu.getComment(), locale=True)
self.Entries.append(menuentry)
elif len(submenu.Entries) <= submenu.Layout.inline_limit or submenu.Layout.inline_limit == 0:
if submenu.Layout.inline_header:
header = Header(submenu.getName(), submenu.getGenericName(), submenu.getComment())
self.Entries.append(header)
for entry in submenu.Entries:
self.Entries.append(entry)
else:
self.Entries.append(submenu)
class Move:
"A move operation"
def __init__(self, old="", new=""):
self.Old = old
self.New = new
def __cmp__(self, other):
return cmp(self.Old, other.Old)
class Layout:
"Menu Layout class"
def __init__(self, show_empty=False, inline=False, inline_limit=4,
inline_header=True, inline_alias=False):
self.show_empty = show_empty
self.inline = inline
self.inline_limit = inline_limit
self.inline_header = inline_header
self.inline_alias = inline_alias
self._order = []
self._default_order = [
['Merge', 'menus'],
['Merge', 'files']
]
@property
def order(self):
return self._order if self._order else self._default_order
@order.setter
def order(self, order):
self._order = order
class Rule:
"""Include / Exclude Rules Class"""
TYPE_INCLUDE, TYPE_EXCLUDE = 0, 1
@classmethod
def fromFilename(cls, type, filename):
tree = ast.Expression(
body=ast.Compare(
left=ast.Str(filename),
ops=[ast.Eq()],
comparators=[ast.Attribute(
value=ast.Name(id='menuentry', ctx=ast.Load()),
attr='DesktopFileID',
ctx=ast.Load()
)]
),
lineno=1, col_offset=0
)
ast.fix_missing_locations(tree)
rule = Rule(type, tree)
return rule
def __init__(self, type, expression):
# Type is TYPE_INCLUDE or TYPE_EXCLUDE
self.Type = type
# expression is ast.Expression
self.expression = expression
self.code = compile(self.expression, '<compiled-menu-rule>', 'eval')
def __str__(self):
return ast.dump(self.expression)
def apply(self, menuentries, run):
for menuentry in menuentries:
if run == 2 and (menuentry.MatchedInclude is True or
menuentry.Allocated is True):
continue
if eval(self.code):
if self.Type is Rule.TYPE_INCLUDE:
menuentry.Add = True
menuentry.MatchedInclude = True
else:
menuentry.Add = False
return menuentries
class MenuEntry:
"Wrapper for 'Menu Style' Desktop Entries"
TYPE_USER = "User"
TYPE_SYSTEM = "System"
TYPE_BOTH = "Both"
def __init__(self, filename, dir="", prefix=""):
# Create entry
self.DesktopEntry = DesktopEntry(os.path.join(dir, filename))
self.setAttributes(filename, dir, prefix)
# Can True, False DELETED, HIDDEN, EMPTY, NOT_SHOW_IN or NO_EXEC
self.Show = True
# Semi-Private
self.Original = None
self.Parents = []
# Private Stuff
self.Allocated = False
self.Add = False
self.MatchedInclude = False
# Caching
self.Categories = self.DesktopEntry.getCategories()
def save(self):
"""Save any changes to the desktop entry."""
if self.DesktopEntry.tainted:
self.DesktopEntry.write()
def getDir(self):
"""Return the directory containing the desktop entry file."""
return self.DesktopEntry.filename.replace(self.Filename, '')
def getType(self):
"""Return the type of MenuEntry, System/User/Both"""
if not xdg.Config.root_mode:
if self.Original:
return self.TYPE_BOTH
elif xdg_data_dirs[0] in self.DesktopEntry.filename:
return self.TYPE_USER
else:
return self.TYPE_SYSTEM
else:
return self.TYPE_USER
def setAttributes(self, filename, dir="", prefix=""):
self.Filename = filename
self.Prefix = prefix
self.DesktopFileID = os.path.join(prefix, filename).replace("/", "-")
if not os.path.isabs(self.DesktopEntry.filename):
self.__setFilename()
def updateAttributes(self):
if self.getType() == self.TYPE_SYSTEM:
self.Original = MenuEntry(self.Filename, self.getDir(), self.Prefix)
self.__setFilename()
def __setFilename(self):
if not xdg.Config.root_mode:
path = xdg_data_dirs[0]
else:
path = xdg_data_dirs[1]
if self.DesktopEntry.getType() == "Application":
dir_ = os.path.join(path, "applications")
else:
dir_ = os.path.join(path, "desktop-directories")
self.DesktopEntry.filename = os.path.join(dir_, self.Filename)
def __cmp__(self, other):
return locale.strcoll(self.DesktopEntry.getName(), other.DesktopEntry.getName())
def _key(self):
"""Key function for locale-aware sorting."""
return _strxfrm(self.DesktopEntry.getName())
def __lt__(self, other):
try:
other = other._key()
except AttributeError:
pass
return self._key() < other
def __eq__(self, other):
if self.DesktopFileID == str(other):
return True
else:
return False
def __repr__(self):
return self.DesktopFileID
class Separator:
"Just a dummy class for Separators"
def __init__(self, parent):
self.Parent = parent
self.Show = True
class Header:
"Class for Inline Headers"
def __init__(self, name, generic_name, comment):
self.Name = name
self.GenericName = generic_name
self.Comment = comment
def __str__(self):
return self.Name
TYPE_DIR, TYPE_FILE = 0, 1
def _check_file_path(value, filename, type):
path = os.path.dirname(filename)
if not os.path.isabs(value):
value = os.path.join(path, value)
value = os.path.abspath(value)
if not os.path.exists(value):
return False
if type == TYPE_DIR and os.path.isdir(value):
return value
if type == TYPE_FILE and os.path.isfile(value):
return value
return False
def _get_menu_file_path(filename):
dirs = list(xdg_config_dirs)
if xdg.Config.root_mode is True:
dirs.pop(0)
for d in dirs:
menuname = os.path.join(d, "menus", filename)
if os.path.isfile(menuname):
return menuname
def _to_bool(value):
if isinstance(value, bool):
return value
return value.lower() == "true"
# remove duplicate entries from a list
def _dedupe(_list):
_set = {}
_list.reverse()
_list = [_set.setdefault(e, e) for e in _list if e not in _set]
_list.reverse()
return _list
class XMLMenuBuilder(object):
def __init__(self, debug=False):
self.debug = debug
def parse(self, filename=None):
"""Load an applications.menu file.
filename : str, optional
The default is ``$XDG_CONFIG_DIRS/menus/${XDG_MENU_PREFIX}applications.menu``.
"""
# convert to absolute path
if filename and not os.path.isabs(filename):
filename = _get_menu_file_path(filename)
# use default if no filename given
if not filename:
candidate = os.environ.get('XDG_MENU_PREFIX', '') + "applications.menu"
filename = _get_menu_file_path(candidate)
if not filename:
raise ParsingError('File not found', "/etc/xdg/menus/%s" % candidate)
# check if it is a .menu file
if not filename.endswith(".menu"):
raise ParsingError('Not a .menu file', filename)
# create xml parser
try:
tree = etree.parse(filename)
except:
raise ParsingError('Not a valid .menu file', filename)
# parse menufile
self._merged_files = set()
self._directory_dirs = set()
self.cache = MenuEntryCache()
menu = self.parse_menu(tree.getroot(), filename)
menu.tree = tree
menu.filename = filename
self.handle_moves(menu)
self.post_parse(menu)
# generate the menu
self.generate_not_only_allocated(menu)
self.generate_only_allocated(menu)
# and finally sort
menu.sort()
return menu
def parse_menu(self, node, filename):
menu = Menu()
self.parse_node(node, filename, menu)
return menu
def parse_node(self, node, filename, parent=None):
num_children = len(node)
for child in node:
tag, text = child.tag, child.text
text = text.strip() if text else None
if tag == 'Menu':
menu = self.parse_menu(child, filename)
parent.addSubmenu(menu)
elif tag == 'AppDir' and text:
self.parse_app_dir(text, filename, parent)
elif tag == 'DefaultAppDirs':
self.parse_default_app_dir(filename, parent)
elif tag == 'DirectoryDir' and text:
self.parse_directory_dir(text, filename, parent)
elif tag == 'DefaultDirectoryDirs':
self.parse_default_directory_dir(filename, parent)
elif tag == 'Name' and text:
parent.Name = text
elif tag == 'Directory' and text:
parent.Directories.append(text)
elif tag == 'OnlyUnallocated':
parent.OnlyUnallocated = True
elif tag == 'NotOnlyUnallocated':
parent.OnlyUnallocated = False
elif tag == 'Deleted':
parent.Deleted = True
elif tag == 'NotDeleted':
parent.Deleted = False
elif tag == 'Include' or tag == 'Exclude':
parent.Rules.append(self.parse_rule(child))
elif tag == 'MergeFile':
if child.attrib.get("type", None) == "parent":
self.parse_merge_file("applications.menu", child, filename, parent)
elif text:
self.parse_merge_file(text, child, filename, parent)
elif tag == 'MergeDir' and text:
self.parse_merge_dir(text, child, filename, parent)
elif tag == 'DefaultMergeDirs':
self.parse_default_merge_dirs(child, filename, parent)
elif tag == 'Move':
parent.Moves.append(self.parse_move(child))
elif tag == 'Layout':
if num_children > 1:
parent.Layout = self.parse_layout(child)
elif tag == 'DefaultLayout':
if num_children > 1:
parent.DefaultLayout = self.parse_layout(child)
elif tag == 'LegacyDir' and text:
self.parse_legacy_dir(text, child.attrib.get("prefix", ""), filename, parent)
elif tag == 'KDELegacyDirs':
self.parse_kde_legacy_dirs(filename, parent)
def parse_layout(self, node):
layout = Layout(
show_empty=_to_bool(node.attrib.get("show_empty", False)),
inline=_to_bool(node.attrib.get("inline", False)),
inline_limit=int(node.attrib.get("inline_limit", 4)),
inline_header=_to_bool(node.attrib.get("inline_header", True)),
inline_alias=_to_bool(node.attrib.get("inline_alias", False))
)
for child in node:
tag, text = child.tag, child.text
text = text.strip() if text else None
if tag == "Menuname" and text:
layout.order.append([
"Menuname",
text,
_to_bool(child.attrib.get("show_empty", False)),
_to_bool(child.attrib.get("inline", False)),
int(child.attrib.get("inline_limit", 4)),
_to_bool(child.attrib.get("inline_header", True)),
_to_bool(child.attrib.get("inline_alias", False))
])
elif tag == "Separator":
layout.order.append(['Separator'])
elif tag == "Filename" and text:
layout.order.append(["Filename", text])
elif tag == "Merge":
layout.order.append([
"Merge",
child.attrib.get("type", "all")
])
return layout
def parse_move(self, node):
old, new = "", ""
for child in node:
tag, text = child.tag, child.text
text = text.strip() if text else None
if tag == "Old" and text:
old = text
elif tag == "New" and text:
new = text
return Move(old, new)
# ---------- <Rule> parsing
def parse_rule(self, node):
type = Rule.TYPE_INCLUDE if node.tag == 'Include' else Rule.TYPE_EXCLUDE
tree = ast.Expression(lineno=1, col_offset=0)
expr = self.parse_bool_op(node, ast.Or())
if expr:
tree.body = expr
else:
tree.body = ast.Name('False', ast.Load())
ast.fix_missing_locations(tree)
return Rule(type, tree)
def parse_bool_op(self, node, operator):
values = []
for child in node:
rule = self.parse_rule_node(child)
if rule:
values.append(rule)
num_values = len(values)
if num_values > 1:
return ast.BoolOp(operator, values)
elif num_values == 1:
return values[0]
return None
def parse_rule_node(self, node):
tag = node.tag
if tag == 'Or':
return self.parse_bool_op(node, ast.Or())
elif tag == 'And':
return self.parse_bool_op(node, ast.And())
elif tag == 'Not':
expr = self.parse_bool_op(node, ast.Or())
return ast.UnaryOp(ast.Not(), expr) if expr else None
elif tag == 'All':
return ast.Name('True', ast.Load())
elif tag == 'Category':
category = node.text
return ast.Compare(
left=ast.Str(category),
ops=[ast.In()],
comparators=[ast.Attribute(
value=ast.Name(id='menuentry', ctx=ast.Load()),
attr='Categories',
ctx=ast.Load()
)]
)
elif tag == 'Filename':
filename = node.text
return ast.Compare(
left=ast.Str(filename),
ops=[ast.Eq()],
comparators=[ast.Attribute(
value=ast.Name(id='menuentry', ctx=ast.Load()),
attr='DesktopFileID',
ctx=ast.Load()
)]
)
# ---------- App/Directory Dir Stuff
def parse_app_dir(self, value, filename, parent):
value = _check_file_path(value, filename, TYPE_DIR)
if value:
parent.AppDirs.append(value)
def parse_default_app_dir(self, filename, parent):
for d in reversed(xdg_data_dirs):
self.parse_app_dir(os.path.join(d, "applications"), filename, parent)
def parse_directory_dir(self, value, filename, parent):
value = _check_file_path(value, filename, TYPE_DIR)
if value:
parent.DirectoryDirs.append(value)
def parse_default_directory_dir(self, filename, parent):
for d in reversed(xdg_data_dirs):
self.parse_directory_dir(os.path.join(d, "desktop-directories"), filename, parent)
# ---------- Merge Stuff
def parse_merge_file(self, value, child, filename, parent):
if child.attrib.get("type", None) == "parent":
for d in xdg_config_dirs:
rel_file = filename.replace(d, "").strip("/")
if rel_file != filename:
for p in xdg_config_dirs:
if d == p:
continue
if os.path.isfile(os.path.join(p, rel_file)):
self.merge_file(os.path.join(p, rel_file), child, parent)
break
else:
value = _check_file_path(value, filename, TYPE_FILE)
if value:
self.merge_file(value, child, parent)
def parse_merge_dir(self, value, child, filename, parent):
value = _check_file_path(value, filename, TYPE_DIR)
if value:
for item in os.listdir(value):
try:
if item.endswith(".menu"):
self.merge_file(os.path.join(value, item), child, parent)
except UnicodeDecodeError:
continue
def parse_default_merge_dirs(self, child, filename, parent):
basename = os.path.splitext(os.path.basename(filename))[0]
for d in reversed(xdg_config_dirs):
self.parse_merge_dir(os.path.join(d, "menus", basename + "-merged"), child, filename, parent)
def merge_file(self, filename, child, parent):
# check for infinite loops
if filename in self._merged_files:
if self.debug:
raise ParsingError('Infinite MergeFile loop detected', filename)
else:
return
self._merged_files.add(filename)
# load file
try:
tree = etree.parse(filename)
except IOError:
if self.debug:
raise ParsingError('File not found', filename)
else:
return
except:
if self.debug:
raise ParsingError('Not a valid .menu file', filename)
else:
return
root = tree.getroot()
self.parse_node(root, filename, parent)
# ---------- Legacy Dir Stuff
def parse_legacy_dir(self, dir_, prefix, filename, parent):
m = self.merge_legacy_dir(dir_, prefix, filename, parent)
if m:
parent += m
def merge_legacy_dir(self, dir_, prefix, filename, parent):
dir_ = _check_file_path(dir_, filename, TYPE_DIR)
if dir_ and dir_ not in self._directory_dirs:
self._directory_dirs.add(dir_)
m = Menu()
m.AppDirs.append(dir_)
m.DirectoryDirs.append(dir_)
m.Name = os.path.basename(dir_)
m.NotInXml = True
for item in os.listdir(dir_):
try:
if item == ".directory":
m.Directories.append(item)
elif os.path.isdir(os.path.join(dir_, item)):
m.addSubmenu(self.merge_legacy_dir(
os.path.join(dir_, item),
prefix,
filename,
parent
))
except UnicodeDecodeError:
continue
self.cache.add_menu_entries([dir_], prefix, True)
menuentries = self.cache.get_menu_entries([dir_], False)
for menuentry in menuentries:
categories = menuentry.Categories
if len(categories) == 0:
r = Rule.fromFilename(Rule.TYPE_INCLUDE, menuentry.DesktopFileID)
m.Rules.append(r)
if not dir_ in parent.AppDirs:
categories.append("Legacy")
menuentry.Categories = categories
return m
def parse_kde_legacy_dirs(self, filename, parent):
try:
proc = subprocess.Popen(
['kde-config', '--path', 'apps'],
stdout=subprocess.PIPE,
universal_newlines=True
)
output = proc.communicate()[0].splitlines()
except OSError:
# If kde-config doesn't exist, ignore this.
return
try:
for dir_ in output[0].split(":"):
self.parse_legacy_dir(dir_, "kde", filename, parent)
except IndexError:
pass
def post_parse(self, menu):
# unallocated / deleted
if menu.Deleted is None:
menu.Deleted = False
if menu.OnlyUnallocated is None:
menu.OnlyUnallocated = False
# Layout Tags
if not menu.Layout or not menu.DefaultLayout:
if menu.DefaultLayout:
menu.Layout = menu.DefaultLayout
elif menu.Layout:
if menu.Depth > 0:
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.DefaultLayout = Layout()
else:
if menu.Depth > 0:
menu.Layout = menu.Parent.DefaultLayout
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.Layout = Layout()
menu.DefaultLayout = Layout()
# add parent's app/directory dirs
if menu.Depth > 0:
menu.AppDirs = menu.Parent.AppDirs + menu.AppDirs
menu.DirectoryDirs = menu.Parent.DirectoryDirs + menu.DirectoryDirs
# remove duplicates
menu.Directories = _dedupe(menu.Directories)
menu.DirectoryDirs = _dedupe(menu.DirectoryDirs)
menu.AppDirs = _dedupe(menu.AppDirs)
# go recursive through all menus
for submenu in menu.Submenus:
self.post_parse(submenu)
# reverse so handling is easier
menu.Directories.reverse()
menu.DirectoryDirs.reverse()
menu.AppDirs.reverse()
# get the valid .directory file out of the list
for directory in menu.Directories:
for dir in menu.DirectoryDirs:
if os.path.isfile(os.path.join(dir, directory)):
menuentry = MenuEntry(directory, dir)
if not menu.Directory:
menu.Directory = menuentry
elif menuentry.Type == MenuEntry.TYPE_SYSTEM:
if menu.Directory.Type == MenuEntry.TYPE_USER:
menu.Directory.Original = menuentry
if menu.Directory:
break
# Finally generate the menu
def generate_not_only_allocated(self, menu):
for submenu in menu.Submenus:
self.generate_not_only_allocated(submenu)
if menu.OnlyUnallocated is False:
self.cache.add_menu_entries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.apply(self.cache.get_menu_entries(menu.AppDirs), 1)
for menuentry in menuentries:
if menuentry.Add is True:
menuentry.Parents.append(menu)
menuentry.Add = False
menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
def generate_only_allocated(self, menu):
for submenu in menu.Submenus:
self.generate_only_allocated(submenu)
if menu.OnlyUnallocated is True:
self.cache.add_menu_entries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.apply(self.cache.get_menu_entries(menu.AppDirs), 2)
for menuentry in menuentries:
if menuentry.Add is True:
menuentry.Parents.append(menu)
# menuentry.Add = False
# menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
def handle_moves(self, menu):
for submenu in menu.Submenus:
self.handle_moves(submenu)
# parse move operations
for move in menu.Moves:
move_from_menu = menu.getMenu(move.Old)
if move_from_menu:
# FIXME: this is assigned, but never used...
move_to_menu = menu.getMenu(move.New)
menus = move.New.split("/")
oldparent = None
while len(menus) > 0:
if not oldparent:
oldparent = menu
newmenu = oldparent.getMenu(menus[0])
if not newmenu:
newmenu = Menu()
newmenu.Name = menus[0]
if len(menus) > 1:
newmenu.NotInXml = True
oldparent.addSubmenu(newmenu)
oldparent = newmenu
menus.pop(0)
newmenu += move_from_menu
move_from_menu.Parent.Submenus.remove(move_from_menu)
class MenuEntryCache:
"Class to cache Desktop Entries"
def __init__(self):
self.cacheEntries = {}
self.cacheEntries['legacy'] = []
self.cache = {}
def add_menu_entries(self, dirs, prefix="", legacy=False):
for dir_ in dirs:
if not dir_ in self.cacheEntries:
self.cacheEntries[dir_] = []
self.__addFiles(dir_, "", prefix, legacy)
def __addFiles(self, dir_, subdir, prefix, legacy):
for item in os.listdir(os.path.join(dir_, subdir)):
if item.endswith(".desktop"):
try:
menuentry = MenuEntry(os.path.join(subdir, item), dir_, prefix)
except ParsingError:
continue
self.cacheEntries[dir_].append(menuentry)
if legacy:
self.cacheEntries['legacy'].append(menuentry)
elif os.path.isdir(os.path.join(dir_, subdir, item)) and not legacy:
self.__addFiles(dir_, os.path.join(subdir, item), prefix, legacy)
def get_menu_entries(self, dirs, legacy=True):
entries = []
ids = set()
# handle legacy items
appdirs = dirs[:]
if legacy:
appdirs.append("legacy")
# cache the results again
key = "".join(appdirs)
try:
return self.cache[key]
except KeyError:
pass
for dir_ in appdirs:
for menuentry in self.cacheEntries[dir_]:
try:
if menuentry.DesktopFileID not in ids:
ids.add(menuentry.DesktopFileID)
entries.append(menuentry)
elif menuentry.getType() == MenuEntry.TYPE_SYSTEM:
# FIXME: This is only 99% correct, but still...
idx = entries.index(menuentry)
entry = entries[idx]
if entry.getType() == MenuEntry.TYPE_USER:
entry.Original = menuentry
except UnicodeDecodeError:
continue
self.cache[key] = entries
return entries
def parse(filename=None, debug=False):
"""Helper function.
Equivalent to calling xdg.Menu.XMLMenuBuilder().parse(filename)
"""
return XMLMenuBuilder(debug).parse(filename)
| 0312birdzhang/pyxdg | xdg/Menu.py | Python | lgpl-2.1 | 38,726 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RoctracerDev(CMakePackage):
"""ROC-tracer library: Runtimes Generic Callback/Activity APIs.
The goal of the implementation is to provide a generic independent from
specific runtime profiler to trace API and asyncronous activity."""
homepage = "https://github.com/ROCm-Developer-Tools/roctracer"
git = "https://github.com/ROCm-Developer-Tools/roctracer.git"
url = "https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-4.5.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('4.5.0', sha256='83dcd8987e129b14da0fe74e24ce8d027333f8fedc9247a402d3683765983296')
version('4.3.1', sha256='88ada5f256a570792d1326a305663e94cf2c3b0cbd99f7e745326923882dafd2')
version('4.3.0', sha256='c3d9f408df8d4dc0e9c0026217b8c684f68e775da80b215fecb3cd24419ee6d3')
version('4.2.0', sha256='62a9c0cb1ba50b1c39a0636c886ac86e75a1a71cbf5fec05801517ceb0e67a37')
version('4.1.0', sha256='5d93de4e92895b6eb5f9d098f5dbd182d33923bd9b2ab69cf5a1abbf91d70695', deprecated=True)
version('4.0.0', sha256='f47859a46173228b597c463eda850b870e810534af5efd5f2a746067ef04edee', deprecated=True)
version('3.10.0', sha256='ac4a1d059fc34377e906071fd0e56f5434a7e0e4ded9db8faf9217a115239dec', deprecated=True)
version('3.9.0', sha256='0678f9faf45058b16923948c66d77ba2c072283c975d167899caef969169b292', deprecated=True)
version('3.8.0', sha256='5154a84ce7568cd5dba756e9508c34ae9fc62f4b0b5731f93c2ad68b21537ed1', deprecated=True)
version('3.7.0', sha256='6fa5b771e990f09c242237ab334b9f01039ec7d54ccde993e719c5d6577d1518', deprecated=True)
version('3.5.0', sha256='7af5326c9ca695642b4265232ec12864a61fd6b6056aa7c4ecd9e19c817f209e', deprecated=True)
variant('build_type', default='Release', values=("Release", "Debug", "RelWithDebInfo"), description='CMake build type')
depends_on('cmake@3:', type='build')
depends_on('python@:2', type='build', when='@:4.1.0')
depends_on('python@3:', type='build', when='@4.2.0:')
depends_on('py-cppheaderparser', type='build')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0', '4.0.0', '4.1.0',
'4.2.0', '4.3.0', '4.3.1', '4.5.0']:
depends_on('hsakmt-roct@' + ver, when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, when='@' + ver)
depends_on('rocminfo@' + ver, when='@' + ver)
depends_on('hip@' + ver, when='@' + ver)
for ver in ['4.2.0', '4.3.0', '4.3.1', '4.5.0']:
depends_on('rocprofiler-dev@' + ver, when='@' + ver)
def setup_build_environment(self, build_env):
spec = self.spec
build_env.set("HIP_PATH", spec['hip'].prefix)
def patch(self):
filter_file('${CMAKE_PREFIX_PATH}/hsa',
'${HSA_RUNTIME_INC_PATH}', 'src/CMakeLists.txt',
string=True)
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
with working_dir('script'):
match = '^#!/usr/bin/python[23]'
python = self.spec['python'].command.path
substitute = "#!{python}".format(python=python)
if self.spec.satisfies('@:4.3.2'):
files = [
'check_trace.py', 'gen_ostream_ops.py', 'hsaap.py', 'kfdap.py'
]
else:
files = [
'check_trace.py', 'gen_ostream_ops.py', 'hsaap.py'
]
filter_file(match, substitute, *files, **kwargs)
def cmake_args(self):
args = ['-DHIP_VDI=1',
'-DCMAKE_MODULE_PATH={0}/cmake_modules'.format(
self.stage.source_path),
'-DHSA_RUNTIME_HSA_INC_PATH={0}/include'.format(
self.spec['hsa-rocr-dev'].prefix)
]
return args
| LLNL/spack | var/spack/repos/builtin/packages/roctracer-dev/package.py | Python | lgpl-2.1 | 4,005 |
#!/usr/bin/python
import gpod
import sys
if len(sys.argv) > 1:
db = gpod.Database(sys.argv[1])
else:
db = gpod.Database()
print db
for track in db[4:20]:
print track
print track['title']
for pl in db.Playlists:
print pl
for track in pl:
print " ", track
| neuschaefer/libgpod | bindings/python/examples/play_with_ipod_api.py | Python | lgpl-2.1 | 292 |
def itemTemplate():
return ['object/tangible/wearables/armor/padded/shared_armor_padded_s01_helmet.iff']
def customItemName():
return 'Padded Armor Helmet'
def customItemStackCount():
return 1
def customizationAttributes():
return []
def customizationValues():
return []
def itemStats():
stats = ['armor_efficiency_kinetic','5000','6800']
stats += ['armor_efficiency_energy','3000','4800']
stats += ['special_protection_heat','4000','5800']
stats += ['special_protection_cold','4000','5800']
stats += ['special_protection_acid','4000','5800']
stats += ['special_protection_electricity','4000','5800']
return stats
| agry/NGECore2 | scripts/loot/lootItems/armor/padded/padded_armor_helmet.py | Python | lgpl-3.0 | 644 |
# coding:utf-8
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
#from django.contrib import admin
# admin.autodiscover()
autourl = ['filemanage',
# (r'^test/$','views.test.test'),
]
urlpatterns = patterns(*tuple(autourl))
| sdgdsffdsfff/yunwei | filemanager/urls.py | Python | lgpl-3.0 | 295 |
class interface(object):
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
specs={}
specsGui={}
| schristakidis/p2ner | p2ner/components/output/nulloutput/nulloutput/interface.py | Python | apache-2.0 | 682 |
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Internal client library for making calls directly to the servers rather than
through the proxy.
"""
import os
import socket
from httplib import HTTPException
from time import time
from eventlet import sleep, Timeout
from swift.common.bufferedhttp import http_connect
from swiftclient import ClientException, json_loads
from swift.common.utils import FileLikeIter
from swift.common.ondisk import normalize_timestamp
from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \
is_success, is_server_error
from swift.common.swob import HeaderKeyDict
from swift.common.utils import quote
def _get_direct_account_container(path, stype, node, part,
account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""Base class for get direct account and container.
Do not use directly use the get_direct_account or
get_direct_container instead.
"""
qs = 'format=json'
if marker:
qs += '&marker=%s' % quote(marker)
if limit:
qs += '&limit=%d' % limit
if prefix:
qs += '&prefix=%s' % quote(prefix)
if delimiter:
qs += '&delimiter=%s' % quote(delimiter)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, query_string=qs,
headers=gen_headers())
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'%s server %s:%s direct GET %s gave stats %s' %
(stype, node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
if resp.status == HTTP_NO_CONTENT:
resp.read()
return resp_headers, []
return resp_headers, json_loads(resp.read())
def gen_headers(hdrs_in=None, add_ts=False):
hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict()
if add_ts:
hdrs_out['X-Timestamp'] = normalize_timestamp(time())
hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid()
return hdrs_out
def direct_get_account(node, part, account, marker=None, limit=None,
prefix=None, delimiter=None, conn_timeout=5,
response_timeout=15):
"""
Get listings directly from the account server.
:param node: node dictionary from the ring
:param part: partition the account is on
:param account: account name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of containers) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/' + account
return _get_direct_account_container(path, "Account", node, part,
account, marker=None,
limit=None, prefix=None,
delimiter=None,
conn_timeout=5,
response_timeout=15)
def direct_head_container(node, part, account, container, conn_timeout=5,
response_timeout=15):
"""
Request container information directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path, headers=gen_headers())
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_container(node, part, account, container, marker=None,
limit=None, prefix=None, delimiter=None,
conn_timeout=5, response_timeout=15):
"""
Get container listings directly from the container server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param marker: marker query
:param limit: query limit
:param prefix: prefix query
:param delimeter: delimeter for the query
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a tuple of (response headers, a list of objects) The response
headers will be a dict and all header names will be lowercase.
"""
path = '/%s/%s' % (account, container)
return _get_direct_account_container(path, "Container", node,
part, account, marker=None,
limit=None, prefix=None,
delimiter=None,
conn_timeout=5,
response_timeout=15)
def direct_delete_container(node, part, account, container, conn_timeout=5,
response_timeout=15, headers=None):
if headers is None:
headers = {}
path = '/%s/%s' % (account, container)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path,
headers=gen_headers(headers, True))
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Container server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)), resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_head_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15):
"""
Request object information directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: a dict containing the response's headers (all header names will
be lowercase)
"""
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'HEAD', path, headers=gen_headers())
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct HEAD %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers
def direct_get_object(node, part, account, container, obj, conn_timeout=5,
response_timeout=15, resp_chunk_size=None, headers=None):
"""
Get object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param resp_chunk_size: if defined, chunk size of data to read.
:param headers: dict to be passed into HTTPConnection headers
:returns: a tuple of (response headers, the object's contents) The response
headers will be a dict and all header names will be lowercase.
"""
if headers is None:
headers = {}
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'GET', path, headers=gen_headers(headers))
with Timeout(response_timeout):
resp = conn.getresponse()
if not is_success(resp.status):
resp.read()
raise ClientException(
'Object server %s:%s direct GET %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)), resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
if resp_chunk_size:
def _object_body():
buf = resp.read(resp_chunk_size)
while buf:
yield buf
buf = resp.read(resp_chunk_size)
object_body = _object_body()
else:
object_body = resp.read()
resp_headers = {}
for header, value in resp.getheaders():
resp_headers[header.lower()] = value
return resp_headers, object_body
def direct_put_object(node, part, account, container, name, contents,
content_length=None, etag=None, content_type=None,
headers=None, conn_timeout=5, response_timeout=15,
chunk_size=65535):
"""
Put object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param contents: an iterable or string to read object data from
:param content_length: value to send as content-length header
:param etag: etag of contents
:param content_type: value to send as content-type header
:param headers: additional headers to include in the request
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:param chunk_size: if defined, chunk size of data to send.
:returns: etag from the server response
"""
path = '/%s/%s/%s' % (account, container, name)
if headers is None:
headers = {}
if etag:
headers['ETag'] = etag.strip('"')
if content_length is not None:
headers['Content-Length'] = str(content_length)
else:
for n, v in headers.iteritems():
if n.lower() == 'content-length':
content_length = int(v)
if content_type is not None:
headers['Content-Type'] = content_type
else:
headers['Content-Type'] = 'application/octet-stream'
if not contents:
headers['Content-Length'] = '0'
if isinstance(contents, basestring):
contents = [contents]
#Incase the caller want to insert an object with specific age
add_ts = 'X-Timestamp' not in headers
if content_length is None:
headers['Transfer-Encoding'] = 'chunked'
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'PUT', path, headers=gen_headers(headers, add_ts))
contents_f = FileLikeIter(contents)
if content_length is None:
chunk = contents_f.read(chunk_size)
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = contents_f.read(chunk_size)
conn.send('0\r\n\r\n')
else:
left = content_length
while left > 0:
size = chunk_size
if size > left:
size = left
chunk = contents_f.read(size)
if not chunk:
break
conn.send(chunk)
left -= len(chunk)
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct PUT %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
return resp.getheader('etag').strip('"')
def direct_post_object(node, part, account, container, name, headers,
conn_timeout=5, response_timeout=15):
"""
Direct update to object metadata on object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param name: object name
:param headers: headers to store as metadata
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:raises ClientException: HTTP POST request failed
"""
path = '/%s/%s/%s' % (account, container, name)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'POST', path, headers=gen_headers(headers, True))
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct POST %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def direct_delete_object(node, part, account, container, obj,
conn_timeout=5, response_timeout=15, headers=None):
"""
Delete object directly from the object server.
:param node: node dictionary from the ring
:param part: partition the container is on
:param account: account name
:param container: container name
:param obj: object name
:param conn_timeout: timeout in seconds for establishing the connection
:param response_timeout: timeout in seconds for getting the response
:returns: response from server
"""
if headers is None:
headers = {}
path = '/%s/%s/%s' % (account, container, obj)
with Timeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'], part,
'DELETE', path, headers=gen_headers(headers, True))
with Timeout(response_timeout):
resp = conn.getresponse()
resp.read()
if not is_success(resp.status):
raise ClientException(
'Object server %s:%s direct DELETE %s gave status %s' %
(node['ip'], node['port'],
repr('/%s/%s%s' % (node['device'], part, path)),
resp.status),
http_host=node['ip'], http_port=node['port'],
http_device=node['device'], http_status=resp.status,
http_reason=resp.reason)
def retry(func, *args, **kwargs):
"""
Helper function to retry a given function a number of times.
:param func: callable to be called
:param retries: number of retries
:param error_log: logger for errors
:param args: arguments to send to func
:param kwargs: keyward arguments to send to func (if retries or
error_log are sent, they will be deleted from kwargs
before sending on to func)
:returns: restult of func
"""
retries = 5
if 'retries' in kwargs:
retries = kwargs['retries']
del kwargs['retries']
error_log = None
if 'error_log' in kwargs:
error_log = kwargs['error_log']
del kwargs['error_log']
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
return attempts, func(*args, **kwargs)
except (socket.error, HTTPException, Timeout) as err:
if error_log:
error_log(err)
if attempts > retries:
raise
except ClientException as err:
if error_log:
error_log(err)
if attempts > retries or not is_server_error(err.http_status) or \
err.http_status == HTTP_INSUFFICIENT_STORAGE:
raise
sleep(backoff)
backoff *= 2
# Shouldn't actually get down here, but just in case.
if args and 'ip' in args[0]:
raise ClientException('Raise too many retries',
http_host=args[
0]['ip'], http_port=args[0]['port'],
http_device=args[0]['device'])
else:
raise ClientException('Raise too many retries')
| citrix-openstack-build/swift | swift/common/direct_client.py | Python | apache-2.0 | 19,446 |
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
def header(input_api):
"""Returns the expected license header regexp for this project."""
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(range(2011, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
license_header = (
r'.*? Copyright %(year)s The LUCI Authors\. '
r'All rights reserved\.\n'
r'.*? Use of this source code is governed under the Apache License, '
r'Version 2\.0\n'
r'.*? that can be found in the LICENSE file\.(?: \*/)?\n'
) % {
'year': years_re,
}
return license_header
def CommonChecks(input_api, output_api):
return input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=header(input_api),
excluded_paths=[
r'.+_pb2\.py',
],
)
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
results = CommonChecks(input_api, output_api)
# Explicitly run these independently because they update files on disk and are
# called implicitly with the other tests. The vpython check is nominally
# locked with a file lock, but updating the protos, etc. of recipes.py is not.
recipes_py = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'recipes.py')
run_first = (
input_api.canned_checks.CheckVPythonSpec(input_api, output_api) + [
input_api.Command(
'Compile recipe protos',
['python', recipes_py, 'fetch'],
{},
output_api.PresubmitError,
),
])
for cmd in run_first:
result = input_api.thread_pool.CallCommand(cmd)
if result:
results.append(result)
# Now run all the unit tests except run_test in parallel and then run run_test
# separately. The reason is that run_test depends on the wall clock on the
# host and if the host gets busy, the tests are likely to be flaky.
results.extend(input_api.RunTests(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, 'unittests',
files_to_check=[r'.+_test\.py'],
files_to_skip=[r'run_test\.py'],
)
))
results.extend(input_api.RunTests(
input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, 'unittests',
files_to_check=[r'run_test\.py'],
)
))
return results
| luci/recipes-py | PRESUBMIT.py | Python | apache-2.0 | 2,563 |
# -*- coding: utf-8 -*-
#
# Copyright 2013, Qunar OPSDEV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: zhen.pei <[email protected]>
# Author: Jianing Yang <[email protected]>
#
from qg.core import gettextutils
gettextutils.install('testing', lazy=True)
from oslo_config import cfg
from testtools import TestCase
import os
CONF = cfg.CONF
class TestGettext(TestCase):
def setUp(self):
# TODO(jianingy): 自动设置境变量 TESTING_LOCALEDIR, 测试用例里 locale
# 用中文
localedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'locale'))
cmd = ("msgfmt -o %s/zh_CN/LC_MESSAGES/testing.mo "
"%s/zh_CN/LC_MESSAGES/testing.po") % (localedir, localedir)
os.system(cmd)
os.environ['TESTING_LOCALEDIR'] = localedir
os.environ['LC_ALL'] = 'zh_CN.UTF-8'
CONF.set_default('domain', 'testing', 'i18n')
super(TestGettext, self).setUp()
def test_gettext_without_translation(self):
self.assertEqual(_('Hello'), 'Hello')
def test_gettext_with_translation(self):
self.assertEqual(_('Hello, world'), u'世界你好')
| shadow4125/qg.core | tests/unit/test_gettextutils.py | Python | apache-2.0 | 1,757 |
# -*- coding: utf-8 -*-
# Copyright 2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
from HTMLParser import HTMLParser
from optparse import make_option
import StringIO
from zipfile import ZipFile, ZipInfo
from django.core.management.base import BaseCommand, CommandError
from moocng.badges.models import Award
from moocng.courses.models import Course
class Command(BaseCommand):
help = ("create a zip bundle with csv files with asigned badges per course")
option_list = BaseCommand.option_list + (
make_option('-f', '--filename',
action='store',
dest='filename',
default="",
help="Filename.zip to save the csv files"),
)
def error(self, message):
self.stderr.write("%s\n" % message.encode("ascii", "replace"))
def message(self, message):
self.stdout.write("%s\n" % message.encode("ascii", "replace"))
def handle(self, *args, **options):
if not options["filename"]:
raise CommandError("-f filename.zip is required")
courses = Course.objects.all()
if not courses:
raise CommandError("Courses not found")
if options["filename"].endswith(".zip"):
self.filename = options["filename"]
else:
self.filename = "%s.zip" % options["filename"]
h = HTMLParser()
zip = ZipFile(self.filename, mode="w")
awards_file = StringIO.StringIO()
awards_csv = csv.writer(awards_file, quoting=csv.QUOTE_ALL)
headers = ["Course", "Badge", "Number of awards"]
awards_csv.writerow(headers)
for course in courses:
self.message("Calculatiing awards for course %s" % course.slug)
awards_counter = 0
badge_name = u''
if not course.completion_badge is None:
awards_counter = Award.objects.filter(badge=course.completion_badge).count()
badge_name = h.unescape(course.completion_badge.title.encode("ascii", "ignore"))
row = []
row.append(h.unescape(course.name.encode("ascii", "ignore")))
row.append(badge_name)
row.append(awards_counter)
awards_csv.writerow(row)
awards_file.seek(0)
awards_fileinfo = ZipInfo("awards.csv")
zip.writestr(awards_fileinfo, awards_file.read())
awards_file.close()
zip.close()
self.message("Created %s file" % self.filename)
| OpenMOOC/moocng | moocng/courses/management/commands/csv_awards_by_course.py | Python | apache-2.0 | 3,018 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import models
import internal_unit_testing
import pytest
@pytest.fixture(autouse=True, scope="function")
def set_variables(airflow_database):
models.Variable.set("bucket_path", "gs://example_bucket")
models.Variable.set("project_id", "example-project")
models.Variable.set("gce_zone", "us-central1-f")
yield
models.Variable.delete('bucket_path')
models.Variable.delete('project_id')
models.Variable.delete('gce_zone')
def test_dag_import():
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
from . import dataflowtemplateoperator_tutorial as module
internal_unit_testing.assert_has_valid_dag(module)
| GoogleCloudPlatform/python-docs-samples | composer/workflows/dataflowtemplateoperator_tutorial_test.py | Python | apache-2.0 | 1,477 |
import datetime
import logging
import multiprocessing
import os
import shutil
from mimetypes import guess_type
from typing import Any, Dict, Iterable, List, Optional, Tuple
import orjson
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.timezone import now as timezone_now
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from analytics.models import RealmCount, StreamCount, UserCount
from zerver.lib.actions import (
UserMessageLite,
bulk_insert_ums,
do_change_avatar_fields,
do_change_realm_plan_type,
)
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users, bulk_set_users_or_streams_recipient_fields
from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName
from zerver.lib.markdown import markdown_convert
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import get_last_message_id
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.streams import render_stream_description
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.upload import BadImageError, get_bucket, sanitize_name, upload_backend
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.models import (
AlertWord,
Attachment,
BotConfigData,
BotStorageData,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
GroupGroupMembership,
Huddle,
Message,
MutedUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
RealmPlayground,
RealmUserDefault,
Recipient,
Service,
Stream,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
UserTopic,
get_huddle_hash,
get_realm,
get_system_bot,
get_user_profile_by_id,
)
realm_tables = [
("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter"),
("zerver_realmplayground", RealmPlayground, "realmplayground"),
] # List[Tuple[TableName, Any, str]]
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicitly initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP: Dict[str, Dict[int, int]] = {
"alertword": {},
"client": {},
"user_profile": {},
"huddle": {},
"realm": {},
"stream": {},
"recipient": {},
"subscription": {},
"defaultstream": {},
"reaction": {},
"realmemoji": {},
"realmdomain": {},
"realmfilter": {},
"realmplayground": {},
"message": {},
"user_presence": {},
"userstatus": {},
"useractivity": {},
"useractivityinterval": {},
"usermessage": {},
"customprofilefield": {},
"customprofilefieldvalue": {},
"attachment": {},
"realmauditlog": {},
"recipient_to_huddle_map": {},
"userhotspot": {},
"usertopic": {},
"muteduser": {},
"service": {},
"usergroup": {},
"usergroupmembership": {},
"groupgroupmembership": {},
"botstoragedata": {},
"botconfigdata": {},
"analytics_realmcount": {},
"analytics_streamcount": {},
"analytics_usercount": {},
"realmuserdefault": {},
}
id_map_to_list: Dict[str, Dict[int, List[int]]] = {
"huddle_to_user_list": {},
}
path_maps: Dict[str, Dict[str, str]] = {
"attachment_path": {},
}
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception(
f"""
Table {table} is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
"""
)
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(
item[field_name], tz=datetime.timezone.utc
)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message["has_attachment"] is True:
for key, value in path_maps["attachment_path"].items():
if key in message["content"]:
message["content"] = message["content"].replace(key, value)
if message["rendered_content"]:
message["rendered_content"] = message["rendered_content"].replace(
key, value
)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
event_last_message_id = get_last_message_id()
event_time = timezone_now()
recipient_id_to_stream_id = {
d["id"]: d["type_id"] for d in data["zerver_recipient"] if d["type"] == Recipient.STREAM
}
for sub in data["zerver_subscription"]:
recipient_id = sub["recipient_id"]
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub["user_profile_id"]
all_subscription_logs.append(
RealmAuditLog(
realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
)
)
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item["token"] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list["huddle_to_user_list"][huddle["id"]]
huddle["huddle_hash"] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list["huddle_to_user_list"] = {
value: [] for value in ID_MAP["recipient_to_huddle_map"].values()
}
for subscription in data[table]:
if subscription["recipient"] in ID_MAP["recipient_to_huddle_map"]:
huddle_id = ID_MAP["recipient_to_huddle_map"][subscription["recipient"]]
id_map_to_list["huddle_to_user_list"][huddle_id].append(subscription["user_profile_id"])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data["zerver_customprofilefield"]:
if item["field_type"] == CustomProfileField.USER:
field_type_USER_id_list.append(item["id"])
for item in data["zerver_customprofilefieldvalue"]:
if item["field_id"] in field_type_USER_id_list:
old_user_id_list = orjson.loads(item["value"])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table="zerver_customprofilefieldvalue",
field_name="value",
related_table="user_profile",
old_id_list=old_user_id_list,
)
item["value"] = orjson.dumps(new_id_list).decode()
def fix_message_rendered_content(
realm: Realm, sender_map: Dict[int, Record], messages: List[Record]
) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message["rendered_content"] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention["data-user-id"] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message["rendered_content"] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message["rendered_content"] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message["rendered_content"] = str(soup)
continue
try:
content = message["content"]
sender_id = message["sender_id"]
sender = sender_map[sender_id]
sent_by_bot = sender["is_bot"]
translate_emoticons = sender["translate_emoticons"]
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
rendered_content = markdown_convert(
content=content,
realm_alert_words_automaton=realm_alert_words_automaton,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
).rendered_content
message["rendered_content"] = rendered_content
message["rendered_content_version"] = markdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering Markdown throwing an uncaught exception
# * rendering Markdown failing with the exception being
# caught in Markdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning(
"Error in Markdown rendering for message ID %s; continuing", message["id"]
)
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return "zerver_realmalias_id_seq"
elif model_class == BotStorageData:
return "zerver_botuserstatedata_id_seq"
elif model_class == BotConfigData:
return "zerver_botuserconfigdata_id_seq"
elif model_class == UserTopic:
# The database table for this model was renamed from `mutedtopic` to
# `usertopic`, but the name of the sequence object remained the same.
return "zerver_mutedtopic_id_seq"
return f"{model_class._meta.db_table}_id_seq"
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval(%s) from generate_series(1, %s)", [sequence, count])
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
"""
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
"""
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(
data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
id_field: bool = False,
recipient_field: bool = False,
) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert "usermessage" not in related_table
re_map_foreign_keys_internal(
data[table],
table,
field_name,
related_table,
verbose,
id_field,
recipient_field,
)
def re_map_foreign_keys_internal(
data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
id_field: bool = False,
recipient_field: bool = False,
) -> None:
"""
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
"""
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item["type"] == 2:
pass
elif related_table == "user_profile" and item["type"] == 1:
pass
elif related_table == "huddle" and item["type"] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP["recipient_to_huddle_map"][item["id"]] = lookup_table[old_id]
else:
continue
old_id = item[field_name]
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info(
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
)
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
item[field_name] = new_id
def re_map_realm_emoji_codes(data: TableData, *, table_name: str) -> None:
"""
Some tables, including Reaction and UserStatus, contain a form of
foreign key reference to the RealmEmoji table in the form of
`str(realm_emoji.id)` when `reaction_type="realm_emoji"`.
See the block comment for emoji_code in the AbstractEmoji
definition for more details.
"""
realm_emoji_dct = {}
for row in data["zerver_realmemoji"]:
realm_emoji_dct[row["id"]] = row
for row in data[table_name]:
if row["reaction_type"] == Reaction.REALM_EMOJI:
old_realm_emoji_id = int(row["emoji_code"])
# Fail hard here if we didn't map correctly here
new_realm_emoji_id = ID_MAP["realmemoji"][old_realm_emoji_id]
# This is a very important sanity check.
realm_emoji_row = realm_emoji_dct[new_realm_emoji_id]
assert realm_emoji_row["name"] == row["emoji_name"]
# Now update emoji_code to the new id.
row["emoji_code"] = str(new_realm_emoji_id)
def re_map_foreign_keys_many_to_many(
data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool = False,
) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose
)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(
table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool = False,
) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info(
"Remapping %s %s from %s to %s", table, field_name + "_id", old_id, new_id
)
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + "_mask"]
del item[field_name + "_mask"]
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = "".join("1" if field[1] else "0" for field in item[field_name])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
"""
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported.
"""
for stream_dict in data["zerver_stream"]:
if "recipient" in stream_dict:
del stream_dict["recipient"]
for user_profile_dict in data["zerver_userprofile"]:
if "recipient" in user_profile_dict:
del user_profile_dict["recipient"]
for huddle_dict in data["zerver_huddle"]:
if "recipient" in huddle_dict:
del huddle_dict["recipient"]
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnecessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert "usermessage" not in table
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, "id", related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = "zerver_usermessage"
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id=item["user_profile_id"],
message_id=item["message_id"],
flags=item["flags"],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str] = None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s.", model, table)
else:
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item["name"])
except Client.DoesNotExist:
client = Client.objects.create(name=item["name"])
update_id_map(table="client", old_id=item["id"], new_id=client.id)
def fix_subscriptions_is_user_active_column(
data: TableData, user_profiles: List[UserProfile]
) -> None:
table = get_db_table(Subscription)
user_id_to_active_status = {user.id: user.is_active for user in user_profiles}
for sub in data[table]:
sub["is_user_active"] = user_id_to_active_status[sub["user_profile_id"]]
def process_avatars(record: Dict[str, Any]) -> None:
# We need to re-import upload_backend here, because in the
# import-export unit tests, the Zulip settings are overridden for
# specific tests to control the choice of upload backend, and this
# reimport ensures that we use the right choice for the current
# test. Outside the test suite, settings never change after the
# server is started, so this import will have no effect in production.
from zerver.lib.upload import upload_backend
if record["s3_path"].endswith(".original"):
user_profile = get_user_profile_by_id(record["user_profile_id"])
if settings.LOCAL_UPLOADS_DIR is not None:
avatar_path = user_avatar_path_from_ids(user_profile.id, record["realm_id"])
medium_file_path = (
os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + "-medium.png"
)
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
try:
upload_backend.ensure_avatar_image(user_profile=user_profile, is_medium=True)
if record.get("importer_should_thumbnail"):
upload_backend.ensure_avatar_image(user_profile=user_profile)
except BadImageError:
logging.warning(
"Could not thumbnail avatar image for user %s; ignoring",
user_profile.id,
)
# Delete the record of the avatar to avoid 404s.
do_change_avatar_fields(
user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None
)
def import_uploads(
realm: Realm,
import_dir: Path,
processes: int,
processing_avatars: bool = False,
processing_emojis: bool = False,
processing_realm_icons: bool = False,
) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
elif processing_realm_icons:
logging.info("Importing realm icons and logos")
else:
logging.info("Importing uploaded files")
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename, "rb") as records_file:
records: List[Dict[str, Any]] = orjson.loads(records_file.read())
timestamp = datetime_to_timestamp(timezone_now())
re_map_foreign_keys_internal(
records, "records", "realm_id", related_table="realm", id_field=True
)
if not processing_emojis and not processing_realm_icons:
re_map_foreign_keys_internal(
records, "records", "user_profile_id", related_table="user_profile", id_field=True
)
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
if s3_uploads:
if processing_avatars or processing_emojis or processing_realm_icons:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
bucket = get_bucket(bucket_name)
count = 0
for record in records:
count += 1
if count % 1000 == 0:
logging.info("Processed %s/%s uploads", count, len(records))
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
relative_path = user_avatar_path_from_ids(record["user_profile_id"], record["realm_id"])
if record["s3_path"].endswith(".original"):
relative_path += ".original"
else:
# TODO: This really should be unconditional. However,
# until we fix the S3 upload backend to use the .png
# path suffix for its normal avatar URLs, we need to
# only do this for the LOCAL_UPLOADS_DIR backend.
if not s3_uploads:
relative_path += ".png"
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record["realm_id"], emoji_file_name=record["file_name"]
)
record["last_modified"] = timestamp
elif processing_realm_icons:
icon_name = os.path.basename(record["path"])
relative_path = os.path.join(str(record["realm_id"]), "realm", icon_name)
record["last_modified"] = timestamp
else:
# This relative_path is basically the new location of the file,
# which will later be copied from its original location as
# specified in record["s3_path"].
relative_path = upload_backend.generate_message_upload_path(
str(record["realm_id"]), sanitize_name(os.path.basename(record["path"]))
)
path_maps["attachment_path"][record["s3_path"]] = relative_path
if s3_uploads:
key = bucket.Object(relative_path)
metadata = {}
if processing_emojis and "user_profile_id" not in record:
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
pass
elif processing_realm_icons and "user_profile_id" not in record:
# Exported realm icons and logos from local export don't have
# the value of user_profile_id in the associated record.
pass
else:
user_profile_id = int(record["user_profile_id"])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
metadata["user_profile_id"] = str(user_profile.id)
if "last_modified" in record:
metadata["orig_last_modified"] = str(record["last_modified"])
metadata["realm_id"] = str(record["realm_id"])
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record["s3_path"])[0]
if content_type is None:
# This is the default for unknown data. Note that
# for `.original` files, this is the value we'll
# set; that is OK, because those are never served
# directly anyway.
content_type = "application/octet-stream"
key.upload_file(
Filename=os.path.join(import_dir, record["path"]),
ExtraArgs={"ContentType": content_type, "Metadata": metadata},
)
else:
assert settings.LOCAL_UPLOADS_DIR is not None
if processing_avatars or processing_emojis or processing_realm_icons:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path)
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path)
orig_file_path = os.path.join(import_dir, record["path"])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
if processes == 1:
for record in records:
process_avatars(record)
else:
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(process_avatars, records):
pass
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and foreign keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str, processes: int = 1) -> Realm:
logging.info("Importing realm dump %s", import_dir)
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
if not server_initialized():
create_internal_realm()
logging.info("Importing realm data from %s", realm_data_filename)
with open(realm_data_filename, "rb") as f:
data = orjson.loads(f.read())
remove_denormalized_recipient_column_from_data(data)
sort_by_date = data.get("sort_by_date", False)
bulk_import_client(data, Client, "zerver_client")
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, "stream")
re_map_foreign_keys(data, "zerver_realm", "notifications_stream", related_table="stream")
re_map_foreign_keys(data, "zerver_realm", "signup_notifications_stream", related_table="stream")
fix_datetime_fields(data, "zerver_realm")
# Fix realm subdomain information
data["zerver_realm"][0]["string_id"] = subdomain
data["zerver_realm"][0]["name"] = subdomain
fix_realm_authentication_bitfield(data, "zerver_realm", "authentication_methods")
update_model_ids(Realm, data, "realm")
realm = Realm(**data["zerver_realm"][0])
if realm.notifications_stream_id is not None:
notifications_stream_id: Optional[int] = int(realm.notifications_stream_id)
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id: Optional[int] = int(realm.signup_notifications_stream_id)
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, "zerver_stream")
re_map_foreign_keys(data, "zerver_stream", "realm", related_table="realm")
# Handle rendering of stream descriptions for import from non-Zulip
for stream in data["zerver_stream"]:
stream["rendered_description"] = render_stream_description(stream["description"])
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
internal_realm = get_realm(settings.SYSTEM_BOT_REALM)
for item in data["zerver_userprofile_crossrealm"]:
logging.info(
"Adding to ID map: %s %s",
item["id"],
get_system_bot(item["email"], internal_realm.id).id,
)
new_user_id = get_system_bot(item["email"], internal_realm.id).id
update_id_map(table="user_profile", old_id=item["id"], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table="recipient", old_id=item["recipient_id"], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data["zerver_userprofile"] = data["zerver_userprofile"] + data["zerver_userprofile_mirrordummy"]
del data["zerver_userprofile_mirrordummy"]
data["zerver_userprofile"].sort(key=lambda r: r["id"])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, "zerver_userprofile")
update_model_ids(UserProfile, data, "user_profile")
re_map_foreign_keys(data, "zerver_userprofile", "realm", related_table="realm")
re_map_foreign_keys(data, "zerver_userprofile", "bot_owner", related_table="user_profile")
re_map_foreign_keys(
data, "zerver_userprofile", "default_sending_stream", related_table="stream"
)
re_map_foreign_keys(
data, "zerver_userprofile", "default_events_register_stream", related_table="stream"
)
re_map_foreign_keys(
data, "zerver_userprofile", "last_active_message_id", related_table="message", id_field=True
)
for user_profile_dict in data["zerver_userprofile"]:
user_profile_dict["password"] = None
user_profile_dict["api_key"] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict["user_permissions"]
del user_profile_dict["groups"]
# The short_name field is obsolete in Zulip, but it's
# convenient for third party exports to populate it.
if "short_name" in user_profile_dict:
del user_profile_dict["short_name"]
user_profiles = [UserProfile(**item) for item in data["zerver_userprofile"]]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, "zerver_defaultstream", "stream", related_table="stream")
re_map_foreign_keys(data, "zerver_realmemoji", "author", related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, "realm", related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
# Ensure RealmEmoji get the .author set to a reasonable default, if the value
# wasn't provided in the import data.
first_user_profile = (
UserProfile.objects.filter(realm=realm, is_active=True, role=UserProfile.ROLE_REALM_OWNER)
.order_by("id")
.first()
)
for realm_emoji in RealmEmoji.objects.filter(realm=realm):
if realm_emoji.author_id is None:
realm_emoji.author_id = first_user_profile.id
realm_emoji.save(update_fields=["author_id"])
if "zerver_huddle" in data:
update_model_ids(Huddle, data, "huddle")
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(
data,
"zerver_recipient",
"type_id",
related_table="stream",
recipient_field=True,
id_field=True,
)
re_map_foreign_keys(
data,
"zerver_recipient",
"type_id",
related_table="user_profile",
recipient_field=True,
id_field=True,
)
re_map_foreign_keys(
data,
"zerver_recipient",
"type_id",
related_table="huddle",
recipient_field=True,
id_field=True,
)
update_model_ids(Recipient, data, "recipient")
bulk_import_model(data, Recipient)
bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm))
bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm))
re_map_foreign_keys(data, "zerver_subscription", "user_profile", related_table="user_profile")
get_huddles_from_subscription(data, "zerver_subscription")
re_map_foreign_keys(data, "zerver_subscription", "recipient", related_table="recipient")
update_model_ids(Subscription, data, "subscription")
fix_subscriptions_is_user_active_column(data, user_profiles)
bulk_import_model(data, Subscription)
if "zerver_realmauditlog" in data:
fix_datetime_fields(data, "zerver_realmauditlog")
re_map_foreign_keys(data, "zerver_realmauditlog", "realm", related_table="realm")
re_map_foreign_keys(
data, "zerver_realmauditlog", "modified_user", related_table="user_profile"
)
re_map_foreign_keys(
data, "zerver_realmauditlog", "acting_user", related_table="user_profile"
)
re_map_foreign_keys(data, "zerver_realmauditlog", "modified_stream", related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info("about to call create_subscription_events")
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info("done with create_subscription_events")
# Ensure the invariant that there's always a realm-creation audit
# log event, even if the export was generated by an export tool
# that does not create RealmAuditLog events.
if not RealmAuditLog.objects.filter(
realm=realm, event_type=RealmAuditLog.REALM_CREATED
).exists():
RealmAuditLog.objects.create(
realm=realm,
event_type=RealmAuditLog.REALM_CREATED,
event_time=realm.date_created,
# Mark these as backfilled, since they weren't created
# when the realm was actually created, and thus do not
# have the creating user associated with them.
backfilled=True,
)
if "zerver_huddle" in data:
process_huddle_hash(data, "zerver_huddle")
bulk_import_model(data, Huddle)
for huddle in Huddle.objects.filter(recipient_id=None):
recipient = Recipient.objects.get(type=Recipient.HUDDLE, type_id=huddle.id)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
if "zerver_alertword" in data:
re_map_foreign_keys(data, "zerver_alertword", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_alertword", "realm", related_table="realm")
update_model_ids(AlertWord, data, "alertword")
bulk_import_model(data, AlertWord)
if "zerver_userhotspot" in data:
fix_datetime_fields(data, "zerver_userhotspot")
re_map_foreign_keys(data, "zerver_userhotspot", "user", related_table="user_profile")
update_model_ids(UserHotspot, data, "userhotspot")
bulk_import_model(data, UserHotspot)
if "zerver_usertopic" in data:
fix_datetime_fields(data, "zerver_usertopic")
re_map_foreign_keys(data, "zerver_usertopic", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_usertopic", "stream", related_table="stream")
re_map_foreign_keys(data, "zerver_usertopic", "recipient", related_table="recipient")
update_model_ids(UserTopic, data, "usertopic")
bulk_import_model(data, UserTopic)
if "zerver_muteduser" in data:
fix_datetime_fields(data, "zerver_muteduser")
re_map_foreign_keys(data, "zerver_muteduser", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_muteduser", "muted_user", related_table="user_profile")
update_model_ids(MutedUser, data, "muteduser")
bulk_import_model(data, MutedUser)
if "zerver_service" in data:
re_map_foreign_keys(data, "zerver_service", "user_profile", related_table="user_profile")
fix_service_tokens(data, "zerver_service")
update_model_ids(Service, data, "service")
bulk_import_model(data, Service)
if "zerver_usergroup" in data:
re_map_foreign_keys(data, "zerver_usergroup", "realm", related_table="realm")
re_map_foreign_keys_many_to_many(
data, "zerver_usergroup", "direct_members", related_table="user_profile"
)
re_map_foreign_keys_many_to_many(
data, "zerver_usergroup", "direct_subgroups", related_table="usergroup"
)
update_model_ids(UserGroup, data, "usergroup")
bulk_import_model(data, UserGroup)
re_map_foreign_keys(
data, "zerver_usergroupmembership", "user_group", related_table="usergroup"
)
re_map_foreign_keys(
data, "zerver_usergroupmembership", "user_profile", related_table="user_profile"
)
update_model_ids(UserGroupMembership, data, "usergroupmembership")
bulk_import_model(data, UserGroupMembership)
re_map_foreign_keys(
data, "zerver_groupgroupmembership", "supergroup", related_table="usergroup"
)
re_map_foreign_keys(
data, "zerver_groupgroupmembership", "subgroup", related_table="usergroup"
)
update_model_ids(GroupGroupMembership, data, "groupgroupmembership")
bulk_import_model(data, GroupGroupMembership)
if "zerver_botstoragedata" in data:
re_map_foreign_keys(
data, "zerver_botstoragedata", "bot_profile", related_table="user_profile"
)
update_model_ids(BotStorageData, data, "botstoragedata")
bulk_import_model(data, BotStorageData)
if "zerver_botconfigdata" in data:
re_map_foreign_keys(
data, "zerver_botconfigdata", "bot_profile", related_table="user_profile"
)
update_model_ids(BotConfigData, data, "botconfigdata")
bulk_import_model(data, BotConfigData)
if "zerver_realmuserdefault" in data:
re_map_foreign_keys(data, "zerver_realmuserdefault", "realm", related_table="realm")
update_model_ids(RealmUserDefault, data, "realmuserdefault")
bulk_import_model(data, RealmUserDefault)
# Create RealmUserDefault table with default values if not created
# already from the import data; this can happen when importing
# data from another product.
if not RealmUserDefault.objects.filter(realm=realm).exists():
RealmUserDefault.objects.create(realm=realm)
fix_datetime_fields(data, "zerver_userpresence")
re_map_foreign_keys(data, "zerver_userpresence", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_userpresence", "client", related_table="client")
re_map_foreign_keys(data, "zerver_userpresence", "realm", related_table="realm")
update_model_ids(UserPresence, data, "user_presence")
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, "zerver_useractivity")
re_map_foreign_keys(data, "zerver_useractivity", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_useractivity", "client", related_table="client")
update_model_ids(UserActivity, data, "useractivity")
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, "zerver_useractivityinterval")
re_map_foreign_keys(
data, "zerver_useractivityinterval", "user_profile", related_table="user_profile"
)
update_model_ids(UserActivityInterval, data, "useractivityinterval")
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, "zerver_customprofilefield", "realm", related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(
data, "zerver_customprofilefieldvalue", "user_profile", related_table="user_profile"
)
re_map_foreign_keys(
data, "zerver_customprofilefieldvalue", "field", related_table="customprofilefield"
)
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(realm, os.path.join(import_dir, "avatars"), processes, processing_avatars=True)
import_uploads(realm, os.path.join(import_dir, "uploads"), processes)
# We need to have this check as the emoji files are only present in the data
# importer from Slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(realm, os.path.join(import_dir, "emoji"), processes, processing_emojis=True)
if os.path.exists(os.path.join(import_dir, "realm_icons")):
import_uploads(
realm, os.path.join(import_dir, "realm_icons"), processes, processing_realm_icons=True
)
sender_map = {user["id"]: user for user in data["zerver_userprofile"]}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, "zerver_reaction", "message", related_table="message")
re_map_foreign_keys(data, "zerver_reaction", "user_profile", related_table="user_profile")
re_map_realm_emoji_codes(data, table_name="zerver_reaction")
update_model_ids(Reaction, data, "reaction")
bulk_import_model(data, Reaction)
# Similarly, we need to recalculate the first_message_id for stream objects.
for stream in Stream.objects.filter(realm=realm):
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
first_message = Message.objects.filter(recipient=recipient).first()
if first_message is None:
stream.first_message_id = None
else:
stream.first_message_id = first_message.id
stream.save(update_fields=["first_message_id"])
if "zerver_userstatus" in data:
fix_datetime_fields(data, "zerver_userstatus")
re_map_foreign_keys(data, "zerver_userstatus", "user_profile", related_table="user_profile")
re_map_foreign_keys(data, "zerver_userstatus", "client", related_table="client")
update_model_ids(UserStatus, data, "userstatus")
re_map_realm_emoji_codes(data, table_name="zerver_userstatus")
bulk_import_model(data, UserStatus)
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s", fn)
with open(fn, "rb") as f:
data = orjson.loads(f.read())
import_attachments(data)
# Import the analytics file.
import_analytics_data(realm=realm, import_dir=import_dir)
if settings.BILLING_ENABLED:
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
else:
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_SELF_HOSTED, acting_user=None)
return realm
# create_users and do_import_system_bots differ from their equivalent
# in zerver/lib/server_initialization.py because here we check if the
# bots don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [
(bot["name"], bot["email_template"] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS
]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(
realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int] = None
) -> None:
user_set = set()
for full_name, email in name_list:
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path, sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table="message",
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path, sort_by_date: bool) -> List[int]:
"""
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
"""
if sort_by_date:
tups: List[Tuple[int, int]] = []
else:
message_ids: List[int] = []
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
# Aggressively free up memory.
del data["zerver_usermessage"]
for row in data["zerver_message"]:
# We truncate date_sent to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row["id"]
if sort_by_date:
date_sent = int(row["date_sent"])
tup = (date_sent, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm, sender_map: Dict[int, Record], import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
logging.info("Importing message dump %s", message_filename)
re_map_foreign_keys(data, "zerver_message", "sender", related_table="user_profile")
re_map_foreign_keys(data, "zerver_message", "recipient", related_table="recipient")
re_map_foreign_keys(data, "zerver_message", "sending_client", related_table="client")
fix_datetime_fields(data, "zerver_message")
# Parser to update message content with the updated attachment URLs
fix_upload_links(data, "zerver_message")
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP["message"]
for row in data["zerver_message"]:
row["id"] = message_id_map[row["id"]]
for row in data["zerver_usermessage"]:
assert row["message"] in message_id_map
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data["zerver_message"],
)
logging.info("Successfully rendered Markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, "zerver_usermessage", "message", related_table="message")
re_map_foreign_keys(
data, "zerver_usermessage", "user_profile", related_table="user_profile"
)
fix_bitfield_keys(data, "zerver_usermessage", "flags")
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, "zerver_attachment")
re_map_foreign_keys(data, "zerver_attachment", "owner", related_table="user_profile")
re_map_foreign_keys(data, "zerver_attachment", "realm", related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = "zerver_attachment"
parent_singular = "attachment"
child_singular = "message"
child_plural = "messages"
m2m_table_name = "zerver_attachment_messages"
parent_id = "attachment_id"
child_id = "message_id"
update_model_ids(parent_model, data, "attachment")
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows: List[Record] = []
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row: Record = {}
m2m_row[parent_singular] = parent_row["id"]
m2m_row[child_singular] = ID_MAP["message"][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data: TableData = {m2m_table_name: m2m_rows}
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment["path_id"] = path_maps["attachment_path"][attachment["path_id"]]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = SQL(
"""
INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s
"""
).format(
m2m_table_name=Identifier(m2m_table_name),
parent_id=Identifier(parent_id),
child_id=Identifier(child_id),
)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
execute_values(cursor.cursor, sql_template, tups)
logging.info("Successfully imported M2M table %s", m2m_table_name)
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
analytics_filename = os.path.join(import_dir, "analytics.json")
if not os.path.exists(analytics_filename):
return
logging.info("Importing analytics data from %s", analytics_filename)
with open(analytics_filename, "rb") as f:
data = orjson.loads(f.read())
# Process the data through the fixer functions.
fix_datetime_fields(data, "analytics_realmcount")
re_map_foreign_keys(data, "analytics_realmcount", "realm", related_table="realm")
update_model_ids(RealmCount, data, "analytics_realmcount")
bulk_import_model(data, RealmCount)
fix_datetime_fields(data, "analytics_usercount")
re_map_foreign_keys(data, "analytics_usercount", "realm", related_table="realm")
re_map_foreign_keys(data, "analytics_usercount", "user", related_table="user_profile")
update_model_ids(UserCount, data, "analytics_usercount")
bulk_import_model(data, UserCount)
fix_datetime_fields(data, "analytics_streamcount")
re_map_foreign_keys(data, "analytics_streamcount", "realm", related_table="realm")
re_map_foreign_keys(data, "analytics_streamcount", "stream", related_table="stream")
update_model_ids(StreamCount, data, "analytics_streamcount")
bulk_import_model(data, StreamCount)
| zulip/zulip | zerver/lib/import_realm.py | Python | apache-2.0 | 62,551 |
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
import re
import six
from lxml.etree import (
ElementTree,
Element,
ParseError,
XML,
XMLSyntaxError,
tostring,
XMLParser as LXMLParser,
)
# ============= local library imports ==========================
# xml tokenizer pattern
xml = re.compile("<([/?!]?\w+)|&(#?\w+);|([^<>&'\"=\s]+)|(\s+)|(.)")
def scan(txt, target):
def gettoken(space=0, scan=xml.scanner(txt).match):
try:
while 1:
m = scan()
code = m.lastindex
text = m.group(m.lastindex)
if not space or code != 4:
return code, text
except AttributeError:
raise EOFError
try:
while 1:
# cc, tt = gettoken()
yield gettoken()
except EOFError:
pass
except SyntaxError as v:
raise
def pprint_xml(txt):
line = []
lines = []
indent = " "
stack = []
skip_next = False
for c, t in scan(txt, None):
# print c, t, len(t)
# print t, ord(t[-1]), ord('\n')
# if t.endswith('\n'):
# continue
t = t.rstrip()
# if not t:
# continue
# t = t.strip()
# print c, t, line, stack
if skip_next:
skip_next = False
continue
if c == 1:
if t.startswith("/"):
stack.pop()
line.append("<{}>".format(t))
lines.append("{}{}".format(indent * len(stack), "".join(line).strip()))
line = []
skip_next = True
continue
else:
lines.append(
"{}{}".format(indent * (len(stack) - 1), "".join(line).strip())
)
line = []
if not t.startswith("?xml"):
stack.append(t)
line.append("<{}".format(t))
# if not line and c == 1:
# line.append('<{}'.format(t))
# continue
else:
if c == 4:
t = " "
line.append(t)
if line:
lines.append("".join(line).strip())
# print '-------------------'
# for li in lines:
# print li
# lines[0]=lines[0].lstrip()
return "\n".join([li for li in lines if li.strip()])
def indent(elem, level=0):
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for ei in elem:
indent(ei, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class XMLParser(object):
_root = None
path = None
_syntax_error = None
def __init__(self, path=None, *args, **kw):
if path:
self.path = path
if path.endswith(".xml"):
try:
self._parse_file(path)
except ParseError as e:
from pyface.message_dialog import warning
warning(None, str(e))
else:
self._root = Element("root")
def _parse_file(self, p):
txt = None
if isinstance(p, (str, six.text_type)):
txt = ""
if os.path.isfile(p):
with open(p, "rb") as rfile:
txt = rfile.read()
if txt is None:
txt = p.read()
try:
self._root = XML(txt, parser=LXMLParser(remove_blank_text=True))
return True
except XMLSyntaxError as e:
print("Syntax error", p, e)
self._syntax_error = str(e)
print("asdfasdfas", p, self._syntax_error)
def load(self, rfile):
return self._parse_file(rfile)
def add(self, tag, value, root=None, **kw):
if root is None:
root = self._root
elem = self.new_element(tag, value, **kw)
root.append(elem)
return elem
def new_element(self, tag, value, **kw):
e = Element(tag, attrib=kw)
if value not in ("", None):
e.text = str(value)
return e
def get_root(self):
return self._root
def get_tree(self):
return ElementTree(self._root)
def save(self, p=None, pretty_print=True):
if p is None:
p = self.path
if p and os.path.isdir(os.path.dirname(p)):
indent(self._root)
tree = self.get_tree()
tree.write(p, xml_declaration=True, method="xml", pretty_print=pretty_print)
def tostring(self, pretty_print=True):
tree = self.get_tree()
if tree:
return tostring(tree, pretty_print=pretty_print)
def get_elements(self, name=None):
root = self.get_root()
path = "//{}".format(name)
return root.xpath(path)
# return self._get_elements(None, True, name)
def _get_elements(self, group, element, name):
if group is None:
group = self.get_root()
return [v if element else v.text.strip() for v in group.findall(name)]
# class XMLParser2(object):
# '''
# wrapper for ElementTree
# '''
# _tree = None
#
# def __init__(self, path=None, *args, **kw):
# self._tree = ElementTree()
# if path:
# self._path = path
# try:
# self._parse_file(path)
# except ParseError, e:
# warning(None, str(e))
#
# def load(self, fp):
# '''
# path or file-like object
# '''
# return self._parse_file(fp)
#
# def _parse_file(self, p):
# self._tree.parse(p)
#
# def get_tree(self):
# return self._tree
#
# def save(self, p=None):
# if p is None:
# p = self._path
#
# if p and os.path.isdir(os.path.dirname(p)):
# # self.indent(self._tree.getroot())
# self._tree.write(p, pretty_print=True)
#
# # def indent(self, elem, level=0):
# # i = '\n' + level * ' '
# # if len(elem):
# # if not elem.text or not elem.text.strip():
# # elem.text = i + ' '
# # if not elem.tail or not elem.tail.strip():
# # elem.tail = i
# # for elem in elem:
# # self.indent(elem, level + 1)
# # if not elem.tail or not elem.tail.strip():
# # elem.tail = i
# # else:
# # if level and (not elem.tail or not elem.tail.strip()):
# # elem.tail = i
#
# def add_element(self, tag, value, root, **kw):
# if root is None:
# root = self._tree.getroot()
# elem = self.new_element(tag, value, **kw)
# root.append(elem)
# return elem
#
# def new_element(self, tag, value, **kw):
# e = Element(tag, attrib=kw)
# # if value:
# # e.text = value
# return e
# ============= EOF ====================================
| USGSDenverPychron/pychron | pychron/core/xml/xml_parser.py | Python | apache-2.0 | 8,717 |
# -*- coding: utf-8 -*-
import mock
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf.models.licenses import NodeLicense
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
NodeLicenseRecordFactory,
PrivateLinkFactory,
PreprintFactory,
IdentifierFactory,
)
from rest_framework import exceptions
from tests.base import fake
from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not
from website.views import find_bookmark_collection
from website.util import permissions
from website.util.sanitize import strip_html
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeDetail:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user)
@pytest.fixture()
def project_private(self, user):
return ProjectFactory(
title='Project Two',
is_public=False,
creator=user)
@pytest.fixture()
def component_public(self, user, project_public):
return NodeFactory(parent=project_public, creator=user, is_public=True)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_component_public(self, component_public):
return '/{}nodes/{}/'.format(API_BASE, component_public._id)
@pytest.fixture()
def permissions_read(self):
return ['read']
@pytest.fixture()
def permissions_write(self):
return ['read', 'write']
@pytest.fixture()
def permissions_admin(self):
return ['read', 'admin', 'write']
def test_return_project_details(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
permissions_read, permissions_admin):
# test_return_public_project_details_logged_out
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_public_project_details_contributor_logged_in
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_public_project_details_non_contributor_logged_in
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_private_project_details_logged_in_admin_contributor
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_private_project_details_logged_out
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_details_logged_in_non_contributor
res = app.get(url_private, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_return_private_project_details_logged_in_write_contributor(
self, app, user, user_two, project_private, url_private, permissions_write):
project_private.add_contributor(
contributor=user_two, auth=Auth(user), save=True)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_write)
def test_top_level_project_has_no_parent(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert 'parent' not in res.json['data']['relationships']
assert 'id' in res.json['data']
assert res.content_type == 'application/vnd.api+json'
def test_child_project_has_parent(
self, app, user, project_public, url_public):
public_component = NodeFactory(
parent=project_public, creator=user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(
API_BASE, public_component._id)
res = app.get(public_component_url)
assert res.status_code == 200
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert urlparse(url).path == url_public
def test_node_has(self, app, url_public):
# test_node_has_children_link
res = app.get(url_public)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = '{}children/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_contributors_link
res = app.get(url_public)
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = '{}contributors/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_node_links_link
res = app.get(url_public)
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = '{}node_links/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_registrations_link
res = app.get(url_public)
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = '{}registrations/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_files_link
res = app.get(url_public)
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = '{}files/'.format(url_public)
assert urlparse(url).path == expected_url
def test_node_has_comments_link(
self, app, user, project_public, url_public):
CommentFactory(node=project_public, user=user)
res = app.get(url_public)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
res = app.get(url)
assert res.status_code == 200
assert res.json['data'][0]['type'] == 'comments'
def test_node_comments_link_query_params_formatted(
self, app, user, project_public, project_private, url_private):
CommentFactory(node=project_public, user=user)
project_private_link = PrivateLinkFactory(anonymous=False)
project_private_link.nodes.add(project_private)
project_private_link.save()
res = app.get(url_private, auth=user.auth)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key not in url
res = app.get(
'{}?view_only={}'.format(
url_private,
project_private_link.key))
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key in url
def test_node_has_correct_unread_comments_count(
self, app, user, project_public, url_public):
contributor = AuthUserFactory()
project_public.add_contributor(
contributor=contributor, auth=Auth(user), save=True)
CommentFactory(
node=project_public,
user=contributor,
page='node')
res = app.get(
'{}?related_counts=True'.format(url_public),
auth=user.auth)
unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
unread_comments_node = unread['node']
assert unread_comments_node == 1
def test_node_properties(self, app, url_public):
res = app.get(url_public)
assert res.json['data']['attributes']['public'] is True
assert res.json['data']['attributes']['registration'] is False
assert res.json['data']['attributes']['collection'] is False
assert res.json['data']['attributes']['tags'] == []
def test_requesting_folder_returns_error(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 404
def test_cannot_return_registrations_at_node_detail_endpoint(
self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
res = app.get('/{}nodes/{}/'.format(
API_BASE, registration._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_cannot_return_folder_at_node_detail_endpoint(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class NodeCRUDTestCase:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def title_new(self):
return 'Super Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def description_new(self):
return 'An even cooler project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def category_new(self):
return 'project'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_fake(self):
return '/{}nodes/{}/'.format(API_BASE, '12345')
@pytest.fixture()
def make_node_payload(self):
def payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
return payload
@pytest.mark.django_db
class TestNodeUpdate(NodeCRUDTestCase):
def test_node_update_invalid_data(self, app, user, url_public):
res = app.put_json_api(
url_public, 'Incorrect data',
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_public, ['Incorrect data'],
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_cannot_make_project_public_if_non_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private):
non_contrib = AuthUserFactory()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_contrib.auth, expect_errors=True
)
assert res.status_code == 403
def test_cannot_make_project_public_if_non_admin_contributor(
self, app, project_private, url_private, make_node_payload):
non_admin = AuthUserFactory()
project_private.add_contributor(
non_admin,
permissions=(permissions.READ, permissions.WRITE),
auth=Auth(project_private.creator)
)
project_private.save()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_admin.auth, expect_errors=True
)
assert res.status_code == 403
project_private.reload()
assert not project_private.is_public
def test_can_make_project_public_if_admin_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log(NodeLog.MADE_PUBLIC, project_private):
admin_user = AuthUserFactory()
project_private.add_contributor(
admin_user,
permissions=(permissions.READ,
permissions.WRITE,
permissions.ADMIN),
auth=Auth(project_private.creator))
project_private.save()
res = app.patch_json_api(
url_private,
make_node_payload(project_private, {'public': True}),
auth=admin_user.auth # self.user is creator/admin
)
assert res.status_code == 200
project_private.reload()
assert project_private.is_public
def test_update_errors(
self, app, user, user_two, title_new, description_new,
category_new, project_public, project_private,
url_public, url_private):
# test_update_project_properties_not_nested
res = app.put_json_api(url_public, {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
'description': description_new,
'category': category_new,
'public': True,
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_update_invalid_id
res = app.put_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_invalid_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_no_id
res = app.put_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_update_no_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_update_public_project_logged_out
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_project_invalid_title
project = {
'data': {
'type': 'nodes',
'id': project_public._id,
'attributes': {
'title': 'A' * 201,
'category': 'project',
}
}
}
res = app.put_json_api(
url_public, project,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.'
# test_update_public_project_logged_in_but_unauthorized
res = app.put_json_api(url_public, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_out
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_in_non_contributor
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_update_public_project_logged_in(
self, app, user, title_new, description_new,
category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_cannot_update_a_registration(self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
original_title = registration.title
original_description = registration.description
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.put_json_api(url, {
'data': {
'id': registration._id,
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'hypothesis',
'public': True
}
}
}, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 404
assert registration.title == original_title
assert registration.description == original_description
def test_update_private_project_logged_in_contributor(
self, app, user, title_new, description_new,
category_new, project_private, url_private):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_private):
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_update_project_sanitizes_html_properly(
self, app, user, category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
"""Post request should update resource, and any HTML in fields should be stripped"""
new_title = '<strong>Super</strong> Cool Project'
new_description = 'An <script>alert("even cooler")</script> project'
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': category_new,
'public': True,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == strip_html(
new_description)
def test_partial_update_project_updates_project_correctly_and_sanitizes_html(
self, app, user, description, category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
new_title = 'An <script>alert("even cooler")</script> project'
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_partial_update_public_project_logged_in(
self, app, user, title_new, description,
category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_write_to_public_field_non_contrib_forbidden(
self, app, user_two, project_public, url_public):
# Test non-contrib writing to public field
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'public': False},
'id': project_public._id,
'type': 'nodes'
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_partial_update_errors(
self, app, user, user_two, title_new,
project_public, project_private,
url_public, url_private):
# test_partial_update_public_project_logged_out
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_public_project_logged_in_but_unauthorized
# Public resource, logged in, unauthorized
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'title': title_new},
'id': project_public._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_out
res = app.patch_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_in_non_contributor
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_invalid_id
res = app.patch_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_invalid_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_no_id
res = app.patch_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_partial_update_no_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# Nothing will be updated here
# test_partial_update_project_properties_not_nested
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
def test_partial_update_private_project_logged_in_contributor(
self, app, user, title_new, description, category, project_private, url_private):
with assert_latest_log(NodeLog.EDITED_TITLE, project_private):
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_multiple_patch_requests_with_same_category_generates_one_log(
self, app, user, project_private, url_private, make_node_payload):
project_private.category = 'project'
project_private.save()
new_category = 'data'
payload = make_node_payload(
project_private,
attributes={'category': new_category})
original_n_logs = project_private.logs.count()
res = app.patch_json_api(url_private, payload, auth=user.auth)
assert res.status_code == 200
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1 # sanity check
app.patch_json_api(url_private, payload, auth=user.auth)
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1
def test_public_project_with_publicly_editable_wiki_turns_private(
self, app, user, project_public, url_public, make_node_payload):
wiki = project_public.get_addon('wiki')
wiki.set_editing(permissions=True, auth=Auth(user=user), log=True)
res = app.patch_json_api(
url_public,
make_node_payload(project_public, {'public': False}),
auth=user.auth # self.user is creator/admin
)
assert res.status_code == 200
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_set_node_private_updates_ezid(
self, mock_update_ezid_metadata, app, user, project_public,
url_public, make_node_payload):
IdentifierFactory(referent=project_public, category='doi')
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
mock_update_ezid_metadata.assert_called_with(
project_public._id, status='unavailable')
@mock.patch('website.preprints.tasks.update_ezid_metadata_on_change')
def test_set_node_with_preprint_private_updates_ezid(
self, mock_update_ezid_metadata, app, user,
project_public, url_public, make_node_payload):
target_object = PreprintFactory(project=project_public)
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
mock_update_ezid_metadata.assert_called_with(
target_object._id, status='unavailable')
@pytest.mark.django_db
class TestNodeDelete(NodeCRUDTestCase):
def test_deletes_node_errors(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
url_fake):
# test_deletes_public_node_logged_out
res = app.delete(url_public, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_public_node_fails_if_unauthorized
res = app.delete_json_api(
url_public,
auth=user_two.auth,
expect_errors=True)
project_public.reload()
assert res.status_code == 403
assert project_public.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_out
res = app.delete(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_in_non_contributor
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_invalid_node
res = app.delete(url_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_read_only_contributor(
self, app, user_two, project_private, url_private):
project_private.add_contributor(
user_two, permissions=[permissions.READ])
project_private.save()
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_delete_project_with_component_returns_error(self, app, user):
project = ProjectFactory(creator=user)
NodeFactory(parent=project, creator=user)
# Return a 400 because component must be deleted before deleting the
# parent
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert (
errors[0]['detail'] ==
'Any child components must be deleted prior to deleting this project.')
def test_delete_bookmark_collection_returns_error(self, app, user):
bookmark_collection = find_bookmark_collection(user)
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, bookmark_collection._id),
auth=user.auth,
expect_errors=True
)
# Bookmark collections are collections, so a 404 is returned
assert res.status_code == 404
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_delete_node_with_preprint_calls_preprint_update_status(
self, mock_update_ezid_metadata_on_change, app, user,
project_public, url_public):
PreprintFactory(project=project_public)
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_ezid_metadata_on_change.called
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_delete_node_with_identifier_calls_preprint_update_status(
self, mock_update_ezid_metadata_on_change, app, user,
project_public, url_public):
IdentifierFactory(referent=project_public, category='doi')
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_ezid_metadata_on_change.called
def test_deletes_public_node_succeeds_as_owner(
self, app, user, project_public, url_public):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_public):
res = app.delete_json_api(
url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert res.status_code == 204
assert project_public.is_deleted is True
def test_requesting_deleted_returns_410(
self, app, project_public, url_public):
project_public.is_deleted = True
project_public.save()
res = app.get(url_public, expect_errors=True)
assert res.status_code == 410
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_contributor(
self, app, user, project_private, url_private):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_private):
res = app.delete(url_private, auth=user.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 204
assert project_private.is_deleted is True
@pytest.mark.django_db
class TestReturnDeletedNode:
@pytest.fixture()
def project_public_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This public project has been deleted',
category='project',
is_public=True
)
@pytest.fixture()
def project_private_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This private project has been deleted',
category='project',
is_public=False
)
@pytest.fixture()
def title_new(self):
return 'This deleted node has been edited'
@pytest.fixture()
def url_project_public_deleted(self, project_public_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id)
@pytest.fixture()
def url_project_private_deleted(self, project_private_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id)
def test_return_deleted_node(
self, app, user, title_new, project_public_deleted,
project_private_deleted, url_project_public_deleted,
url_project_private_deleted):
# test_return_deleted_public_node
res = app.get(url_project_public_deleted, expect_errors=True)
assert res.status_code == 410
# test_return_deleted_private_node
res = app.get(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_public_node
res = app.put_json_api(
url_project_public_deleted,
params={
'title': title_new,
'node_id': project_public_deleted._id,
'category': project_public_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_private_node
res = app.put_json_api(
url_project_private_deleted,
params={
'title': title_new,
'node_id': project_private_deleted._id,
'category': project_private_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_public_node
res = app.delete(
url_project_public_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_private_node
res = app.delete(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestNodeTags:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_admin):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_public
@pytest.fixture()
def project_private(self, user, user_admin):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def payload_public(self, project_public):
return {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
@pytest.fixture()
def payload_private(self, project_private):
return {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
def test_public_project_starts_with_no_tags(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_node_detail_does_not_expose_system_tags(
self, app, project_public, url_public):
project_public.add_system_tag('systag', save=True)
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_contributor_can_add_tag_to_public_project(
self, app, user, project_public, payload_public, url_public):
with assert_latest_log(NodeLog.TAG_ADDED, project_public):
res = app.patch_json_api(
url_public,
payload_public,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_public.reload()
assert project_public.tags.count() == 1
assert project_public.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_public)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_contributor_can_add_tag_to_private_project(
self, app, user, project_private, payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_private.reload()
assert project_private.tags.count() == 1
assert project_private.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_private, auth=user.auth)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_partial_update_project_does_not_clear_tags(
self, app, user_admin, project_private, payload_private, url_private):
res = app.patch_json_api(
url_private,
payload_private,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload = {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'public': True
}
}
}
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload['data']['attributes']['public'] = False
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
def test_add_tag_to_project_errors(
self, app, user_non_contrib, user_read_contrib,
payload_public, payload_private,
url_public, url_private):
# test_non_authenticated_user_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_authenticated_user_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_non_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
def test_tags_add_and_remove_properly(
self, app, user, project_private,
payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1):
# Ensure removing and adding tag data is correct from the PATCH
# response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': ['newer-tag']}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'newer-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private):
# Ensure removing tag data is correct from the PATCH response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': []}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_tags_post_object_instead_of_list(self, user, app):
url = '/{}nodes/'.format(API_BASE)
payload = {'data': {
'type': 'nodes',
'attributes': {
'title': 'new title',
'category': 'project',
'tags': {'foo': 'bar'}
}
}}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
def test_tags_patch_object_instead_of_list(
self, app, user, payload_public, url_public):
payload_public['data']['attributes']['tags'] = {'foo': 'bar'}
res = app.patch_json_api(
url_public, payload_public,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
@pytest.mark.django_db
class TestNodeLicense:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def license_name(self):
return 'MIT License'
@pytest.fixture()
def node_license(self, license_name):
return NodeLicense.objects.filter(name=license_name).first()
@pytest.fixture()
def year(self):
return '2105'
@pytest.fixture()
def copyright_holders(self):
return ['Foo', 'Bar']
@pytest.fixture()
def project_public(
self, user, user_admin, node_license,
year, copyright_holders):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_public.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_public.save()
return project_public
@pytest.fixture()
def project_private(
self, user, user_admin, node_license,
year, copyright_holders):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_private.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_private.save()
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
def test_node_has(
self, app, user, node_license, project_public,
project_private, url_private, url_public):
# test_public_node_has_node_license
res = app.get(url_public)
assert project_public.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_public_node_has_license_relationship
res = app.get(url_public)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
# test_private_node_has_node_license
res = app.get(url_private, auth=user.auth)
assert project_private.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_private_node_has_license_relationship
res = app.get(url_private, auth=user.auth)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
def test_component_return_parent_license_if_no_license(
self, app, user, node_license, project_public):
node = NodeFactory(parent=project_public, creator=user)
node.save()
node_url = '/{}nodes/{}/'.format(API_BASE, node._id)
res = app.get(node_url, auth=user.auth)
assert not node.node_license
assert project_public.node_license.year == \
res.json['data']['attributes']['node_license']['year']
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
assert expected_license_url in actual_license_url
@pytest.mark.django_db
class TestNodeUpdateLicense:
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def node(self, user_admin_contrib, user_write_contrib, user_read_contrib):
node = NodeFactory(creator=user_admin_contrib)
node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib))
node.add_contributor(
user_read_contrib,
auth=Auth(user_admin_contrib),
permissions=['read'])
node.save()
return node
@pytest.fixture()
def license_cc0(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def license_mit(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def license_no(self):
return NodeLicense.objects.get(name='No license')
@pytest.fixture()
def url_node(self, node):
return '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.fixture()
def make_payload(self):
def payload(
node_id, license_id=None, license_year=None,
copyright_holders=None):
attributes = {}
if license_year and copyright_holders:
attributes = {
'node_license': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'node_license': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'node_license': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(
url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(
self, user_admin_contrib, node, make_payload,
make_request, url_node):
data = make_payload(
node_id=node._id,
license_id='thisisafakelicenseid'
)
assert node.node_license is None
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
node.reload()
assert node.node_license is None
def test_admin_can_update_license(
self, user_admin_contrib, node,
make_payload, make_request,
license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year is None
assert node.node_license.copyright_holders == []
def test_admin_can_update_license_record(
self, user_admin_contrib, node,
make_payload, make_request,
license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_cannot_update(
self, user_write_contrib, user_read_contrib,
user_non_contrib, node, make_payload,
make_request, license_cc0, url_node):
# def test_rw_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_read_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_non_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_unauthenticated_user_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_node_with_existing_license_year_attribute_only(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_year='2015'
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_copyright_holders_attribute_only(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_with_existing_license_relationship_only(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_relationship_and_attributes(
self, user_admin_contrib, node, make_payload, make_request,
license_no, license_cc0, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
save=True
)
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_license_without_required_year_in_payload(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
copyright_holders=['Rick', 'Morty']
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
def test_update_node_license_without_required_copyright_holders_in_payload_(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='1994'
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_node_license_adds_log(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
logs_before_update = node.logs.count()
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
logs_after_update = node.logs.count()
assert logs_before_update != logs_after_update
assert node.logs.latest().action == 'license_changed'
def test_update_node_license_without_change_does_not_add_log(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(user_admin_contrib),
save=True
)
before_num_logs = node.logs.count()
before_update_log = node.logs.latest()
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
node.reload()
after_num_logs = node.logs.count()
after_update_log = node.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
| leb2dg/osf.io | api_tests/nodes/views/test_node_detail.py | Python | apache-2.0 | 73,296 |
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add switch command."""
import unittest
import os
import socket
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
SW_HOSTNAME = "utpgsw0.aqd-unittest.ms.com"
class TestVlan(TestBrokerCommand):
def getswip(self):
return self.net.tor_net[10].usable[0]
def test_001_addvlan714(self):
command = ["add_vlan", "--vlan=714", "--name=user_714",
"--vlan_type=user"]
self.noouttest(command)
command = "show vlan --vlan 714"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Vlan: 714", command)
self.matchoutput(out, "Name: user_714", command)
def test_001_addutpgsw(self):
ip = self.getswip()
self.dsdb_expect_add(SW_HOSTNAME, ip, "xge49",
ip.mac)
command = ["add", "switch", "--type", "tor",
"--switch", SW_HOSTNAME, "--rack", "ut3",
"--model", "rs g8000", "--interface", "xge49",
"--mac", ip.mac, "--ip", ip]
self.ignoreoutputtest(command)
self.dsdb_verify()
def test_010_pollutpgsw(self):
command = ["poll", "switch", "--vlan", "--switch",
SW_HOSTNAME]
err = self.statustest(command)
self.matchoutput(err, "Using jump host nyaqd1.ms.com from service "
"instance poll_helper/unittest to run CheckNet for "
"switch utpgsw0.aqd-unittest.ms.com.", command)
self.matchoutput(err, "vlan 5 is not defined in AQ. Please use "
"add_vlan to add it.", command)
# Adding vlan 5 as unknown will suppress poll_switch vlan warning.
def test_012_addvlan5(self):
command = ["add_vlan", "--vlan=5", "--name=user_5",
"--vlan_type=unknown"]
self.noouttest(command)
command = "show vlan --vlan 5"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Vlan: 5", command)
self.matchoutput(out, "Name: user_5", command)
def test_012_pollutpgsw(self):
command = ["poll", "switch", "--vlan", "--switch",
SW_HOSTNAME]
err = self.statustest(command)
self.matchoutput(err, "Using jump host nyaqd1.ms.com from service "
"instance poll_helper/unittest to run CheckNet for "
"switch utpgsw0.aqd-unittest.ms.com.", command)
self.matchclean(err, "vlan 5 is not defined in AQ. Please use "
"add_vlan to add it.", command)
def test_015_searchswbyvlan(self):
command = ["search_switch", "--vlan=714",
"--format=csv"]
out = self.commandtest(command)
ip = self.getswip()
self.matchoutput(out,
"utpgsw0.aqd-unittest.ms.com,%s,tor,ut3,ut,bnt,"
"rs g8000,,xge49,%s" % (ip, ip.mac), command)
self.matchclean(out,
"ut3gd1r01.aqd-unittest.ms.com,4.2.5.8,bor,ut3,ut,hp,"
"uttorswitch,SNgd1r01,,", command)
def test_020_faildelvlan(self):
command = ["del_vlan", "--vlan=714"]
errOut = self.badrequesttest(command)
self.matchoutput(errOut,
"VlanInfo 714 is still in use and cannot be "
"deleted.", command)
# Unknown vlans have no dependencies, can be deleted.
def test_025_delvlan(self):
command = ["del_vlan", "--vlan=5"]
self.noouttest(command)
command = ["show_vlan", "--vlan=5"]
self.notfoundtest(command)
def test_030_delutpgsw(self):
self.dsdb_expect_delete(self.getswip())
command = "del switch --switch %s" % SW_HOSTNAME
self.noouttest(command.split(" "))
plenary = os.path.join(self.config.get("broker", "plenarydir"),
"switchdata", "%s.tpl" % SW_HOSTNAME)
self.failIf(os.path.exists(plenary),
"Plenary file '%s' still exists" % plenary)
self.dsdb_verify()
def test_040_delvlan(self):
command = ["del_vlan", "--vlan=714"]
self.noouttest(command)
command = ["show_vlan", "--vlan=714"]
self.notfoundtest(command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestVlan)
unittest.TextTestRunner(verbosity=2).run(suite)
| stdweird/aquilon | tests/broker/test_vlan.py | Python | apache-2.0 | 5,203 |
# -*- coding: utf-8 -*-
#
# QOpenScienceFramework documentation build configuration file, created by
# sphinx-quickstart on Thu May 12 11:05:40 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the version from QNotifications
sys.path.insert(0, os.path.abspath("../.."))
import QOpenScienceFramework
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.autosummary',
'numpydoc'
]
numpydoc_show_class_members = True
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Markdown support
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'QOpenScienceFramework'
copyright = u'2016, Daniel Schreij'
author = u'Daniel Schreij'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = QOpenScienceFramework.__version__
# The full version, including alpha/beta/rc tags.
release = QOpenScienceFramework.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'QOpenScienceFramework v1.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'QOpenScienceFrameworkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'QOpenScienceFramework.tex', u'QOpenScienceFramework Documentation',
u'Daniel Schreij', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'qopenscienceframework', u'QOpenScienceFramework Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'QOpenScienceFramework', u'QOpenScienceFramework Documentation',
author, 'QOpenScienceFramework', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| dschreij/osf-api-python-toolkit | docs/source/conf.py | Python | apache-2.0 | 10,111 |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import conversion
| molejar/pyOCD | pyOCD/utility/__init__.py | Python | apache-2.0 | 619 |
from collections import namedtuple
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from xray import Variable, Dataset, DataArray
from xray.core import indexing
from xray.core.variable import (Coordinate, as_variable, _as_compatible_data)
from xray.core.indexing import (NumpyIndexingAdapter, PandasIndexAdapter,
LazilyIndexedArray)
from xray.core.pycompat import PY3, OrderedDict
from . import TestCase, source_ndarray
class VariableSubclassTestCases(object):
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(['time'], data, {'foo': 'bar'})
self.assertEqual(v.dims, ('time',))
self.assertArrayEqual(v.values, data)
self.assertEqual(v.dtype, float)
self.assertEqual(v.shape, (10,))
self.assertEqual(v.size, 10)
self.assertEqual(v.nbytes, 80)
self.assertEqual(v.ndim, 1)
self.assertEqual(len(v), 10)
self.assertEqual(v.attrs, {'foo': u'bar'})
def test_attrs(self):
v = self.cls(['time'], 0.5 * np.arange(10))
self.assertEqual(v.attrs, {})
attrs = {'foo': 'bar'}
v.attrs = attrs
self.assertEqual(v.attrs, attrs)
self.assertIsInstance(v.attrs, OrderedDict)
v.attrs['foo'] = 'baz'
self.assertEqual(v.attrs['foo'], 'baz')
def test_getitem_dict(self):
v = self.cls(['x'], np.random.randn(5))
actual = v[{'x': 0}]
expected = v[0]
self.assertVariableIdentical(expected, actual)
def assertIndexedLikeNDArray(self, variable, expected_value0,
expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
self.assertEqual(variable[0].shape, ())
self.assertEqual(variable[0].ndim, 0)
self.assertEqual(variable[0].size, 1)
# test identity
self.assertTrue(variable.equals(variable.copy()))
self.assertTrue(variable.identical(variable.copy()))
# check value is equal for both ndarray and Variable
self.assertEqual(variable.values[0], expected_value0)
self.assertEqual(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
self.assertEqual(type(variable.values[0]), type(expected_value0))
self.assertEqual(type(variable[0].values), type(expected_value0))
else:
self.assertEqual(variable.values[0].dtype, expected_dtype)
self.assertEqual(variable[0].values.dtype, expected_dtype)
def test_index_0d_int(self):
for value, dtype in [(0, np.int_),
(np.int32(0), np.int32)]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_),
(np.float32(0.5), np.float32)]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
for value, dtype in [('foo', np.dtype('U3' if PY3 else 'S3')),
(u'foo', np.dtype('U3'))]:
x = self.cls(['x'], [value])
self.assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(['x'], [d])
self.assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(['x'], [np.datetime64(d)])
self.assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
x = self.cls(['x'], pd.DatetimeIndex([d]))
self.assertIndexedLikeNDArray(x, np.datetime64(d), 'datetime64[ns]')
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(['x'], [np.timedelta64(td)])
self.assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
x = self.cls(['x'], pd.to_timedelta([td]))
self.assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]')
def test_index_0d_not_a_time(self):
d = np.datetime64('NaT')
x = self.cls(['x'], [d])
self.assertIndexedLikeNDArray(x, d, None)
def test_index_0d_object(self):
class HashableItemWrapper(object):
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return '%s(item=%r)' % (type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls('x', [item])
self.assertIndexedLikeNDArray(x, item)
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range('2011-09-01', periods=10)
for dates in [date_range, date_range.values,
date_range.to_pydatetime()]:
expected = self.cls('t', dates)
for times in [[expected[i] for i in range(10)],
[expected[i:(i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)]]:
actual = Variable.concat(times, 't')
self.assertEqual(expected.dtype, actual.dtype)
self.assertArrayEqual(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls('time', pd.date_range('2000-01-01', periods=5))
expected = np.datetime64('2000-01-01T00Z', 'ns')
self.assertEqual(x[0].values, expected)
def test_datetime64_conversion(self):
times = pd.date_range('2000-01-01', periods=3)
for values, preserve_source in [
(times, False),
(times.values, True),
(times.values.astype('datetime64[s]'), False),
(times.to_pydatetime(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
if preserve_source and self.cls is Variable:
self.assertTrue(same_source)
else:
self.assertFalse(same_source)
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, False),
(times.values, True),
(times.values.astype('timedelta64[s]'), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(['t'], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertArrayEqual(v.values, times.values)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
same_source = source_ndarray(v.values) is source_ndarray(values)
if preserve_source and self.cls is Variable:
self.assertTrue(same_source)
else:
self.assertFalse(same_source)
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls('x', data)
self.assertEqual(actual.dtype, data.dtype)
def test_pandas_data(self):
v = self.cls(['x'], pd.Series([0, 1, 2], index=[3, 2, 1]))
self.assertVariableIdentical(v, v[[0, 1, 2]])
v = self.cls(['x'], pd.Index([0, 1, 2]))
self.assertEqual(v[0].values, v.values[0])
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
v = self.cls(['x'], x)
# unary ops
self.assertVariableIdentical(v, +v)
self.assertVariableIdentical(v, abs(v))
self.assertArrayEqual((-v).values, -x)
# bianry ops with numbers
self.assertVariableIdentical(v, v + 0)
self.assertVariableIdentical(v, 0 + v)
self.assertVariableIdentical(v, v * 1)
self.assertArrayEqual((v > 2).values, x > 2)
self.assertArrayEqual((0 == v).values, 0 == x)
self.assertArrayEqual((v - 1).values, x - 1)
self.assertArrayEqual((1 - v).values, 1 - x)
# binary ops with numpy arrays
self.assertArrayEqual((v * x).values, x ** 2)
self.assertArrayEqual((x * v).values, x ** 2)
self.assertArrayEqual(v - y, v - 1)
self.assertArrayEqual(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(['x'], x, {'units': 'meters'})
self.assertVariableIdentical(v, +v2)
# binary ops with all variables
self.assertArrayEqual(v + v, 2 * v)
w = self.cls(['x'], y, {'foo': 'bar'})
self.assertVariableIdentical(v + w, self.cls(['x'], x + y))
self.assertArrayEqual((v * w).values, x * y)
# something complicated
self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
self.assertEqual(float, (+v).dtype)
self.assertEqual(float, (+v).values.dtype)
self.assertEqual(float, (0 + v).dtype)
self.assertEqual(float, (0 + v).values.dtype)
# check types of returned data
self.assertIsInstance(+v, Variable)
self.assertNotIsInstance(+v, Coordinate)
self.assertIsInstance(0 + v, Variable)
self.assertNotIsInstance(0 + v, Coordinate)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(['x'], x)
actual = v.sum()
expected = Variable((), 10)
self.assertVariableIdentical(expected, actual)
self.assertIs(type(actual), Variable)
def test_array_interface(self):
x = np.arange(5)
v = self.cls(['x'], x)
self.assertArrayEqual(np.asarray(v), x)
# test patched in methods
self.assertArrayEqual(v.astype(float), x.astype(float))
self.assertVariableIdentical(v.argsort(), v)
self.assertVariableIdentical(v.clip(2, 3), self.cls('x', x.clip(2, 3)))
# test ufuncs
self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)))
self.assertIsInstance(np.sin(v), Variable)
self.assertNotIsInstance(np.sin(v), Coordinate)
def example_1d_objects(self):
for data in [range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range('2000-01-01', periods=3),
np.array(['a', 'b', 'c'], dtype=object)]:
yield (self.cls('x', data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
self.assertArrayEqual(v.values, np.asarray(data))
self.assertArrayEqual(np.asarray(v), np.asarray(data))
self.assertEqual(v[0].values, np.asarray(data)[0])
self.assertEqual(np.asarray(v[0]), np.asarray(data)[0])
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
self.assertTrue(v.equals(v2))
self.assertTrue(v.identical(v2))
self.assertTrue(v[0].equals(v2[0]))
self.assertTrue(v[0].identical(v2[0]))
self.assertTrue(v[:2].equals(v2[:2]))
self.assertTrue(v[:2].identical(v2[:2]))
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = self.cls('x', 3 * [False])
for v, _ in self.example_1d_objects():
actual = 'z' == v
self.assertVariableIdentical(expected, actual)
actual = ~('z' != v)
self.assertVariableIdentical(expected, actual)
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(['a'], x)
w = self.cls(['a'], y)
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat([v, w], 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
self.assertVariableIdentical(Variable(['b', 'a'], np.array([x, y])),
Variable.concat((v, w), 'b'))
with self.assertRaisesRegexp(ValueError, 'inconsistent dimensions'):
Variable.concat([v, Variable(['c'], y)], 'b')
# test indexers
actual = Variable.concat([v, w], indexers=[range(0, 10, 2), range(1, 10, 2)], dim='a')
expected = Variable('a', np.array([x, y]).ravel(order='F'))
self.assertVariableIdentical(expected, actual)
# test concatenating along a dimension
v = Variable(['time', 'x'], np.random.random((10, 8)))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time'))
self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time'))
# test dimension order
self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x'))
with self.assertRaisesRegexp(ValueError, 'same number of dimensions'):
Variable.concat([v[:, 0], v[:, 1:]], 'x')
def test_concat_attrs(self):
# different or conflicting attributes should be removed
v = self.cls('a', np.arange(5), {'foo': 'bar'})
w = self.cls('a', np.ones(5))
expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)]))
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 2
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
w.attrs['foo'] = 'bar'
expected.attrs['foo'] = 'bar'
self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ['S', 'U']:
x = self.cls('animal', np.array(['horse'], dtype=kind))
y = self.cls('animal', np.array(['aardvark'], dtype=kind))
actual = Variable.concat([x, y], 'animal')
expected = Variable(
'animal', np.array(['horse', 'aardvark'], dtype=kind))
self.assertVariableEqual(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls('x', ['0', '1', '2'])
b = self.cls('x', ['3', '4'])
actual = Variable.concat([a, b], dim='x')
expected = Variable('x', np.arange(5).astype(str).astype(object))
self.assertVariableIdentical(expected, actual)
self.assertEqual(expected.dtype, object)
self.assertEqual(type(expected.values[0]), str)
def test_copy(self):
v = self.cls('x', 0.5 * np.arange(10), {'foo': 'bar'})
for deep in [True, False]:
w = v.copy(deep=deep)
self.assertIs(type(v), type(w))
self.assertVariableIdentical(v, w)
self.assertEqual(v.dtype, w.dtype)
if self.cls is Variable:
if deep:
self.assertIsNot(source_ndarray(v.values),
source_ndarray(w.values))
else:
self.assertIs(source_ndarray(v.values),
source_ndarray(w.values))
self.assertVariableIdentical(v, copy(v))
class TestVariable(TestCase, VariableSubclassTestCases):
cls = staticmethod(Variable)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(['time', 'x'], self.d)
self.assertArrayEqual(v.data, self.d)
self.assertArrayEqual(v.values, self.d)
self.assertIs(source_ndarray(v.values), self.d)
with self.assertRaises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
self.assertIs(source_ndarray(v.values), d2)
d3 = np.random.random((10, 3))
v.data = d3
self.assertIs(source_ndarray(v.data), d3)
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
self.assertEqual(v.item(), 0)
self.assertIs(type(v.item()), float)
v = Coordinate('x', np.arange(5))
self.assertEqual(2, v.searchsorted(2))
def test_datetime64_conversion_scalar(self):
expected = np.datetime64('2000-01-01T00:00:00Z', 'ns')
for values in [
np.datetime64('2000-01-01T00Z'),
pd.Timestamp('2000-01-01T00'),
datetime(2000, 1, 1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('datetime64[ns]'))
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns')
for values in [
np.timedelta64(1, 'D'),
pd.Timedelta('1 day'),
timedelta(days=1),
]:
v = Variable([], values)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, expected)
self.assertEqual(v.values.dtype, np.dtype('timedelta64[ns]'))
def test_0d_str(self):
v = Variable([], u'foo')
self.assertEqual(v.dtype, np.dtype('U3'))
self.assertEqual(v.values, 'foo')
v = Variable([], np.string_('foo'))
self.assertEqual(v.dtype, np.dtype('S3'))
self.assertEqual(v.values, bytes('foo', 'ascii') if PY3 else 'foo')
def test_0d_datetime(self):
v = Variable([], pd.Timestamp('2000-01-01'))
self.assertEqual(v.dtype, np.dtype('datetime64[ns]'))
self.assertEqual(v.values, np.datetime64('2000-01-01T00Z', 'ns'))
def test_0d_timedelta(self):
for td in [pd.to_timedelta('1s'), np.timedelta64(1, 's')]:
v = Variable([], td)
self.assertEqual(v.dtype, np.dtype('timedelta64[ns]'))
self.assertEqual(v.values, np.timedelta64(10 ** 9, 'ns'))
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
v2 = Variable(('dim1', 'dim2'), data=d,
attrs={'att1': 3, 'att2': [1, 2, 3]})
self.assertTrue(v1.equals(v2))
self.assertTrue(v1.identical(v2))
v3 = Variable(('dim1', 'dim3'), data=d)
self.assertFalse(v1.equals(v3))
v4 = Variable(('dim1', 'dim2'), data=d)
self.assertTrue(v1.equals(v4))
self.assertFalse(v1.identical(v4))
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
self.assertFalse(v1.equals(v5))
self.assertFalse(v1.equals(None))
self.assertFalse(v1.equals(d))
self.assertFalse(v1.identical(None))
self.assertFalse(v1.identical(d))
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(('x'), [np.nan, np.nan])
self.assertTrue(v1.broadcast_equals(v2))
self.assertFalse(v1.equals(v2))
self.assertFalse(v1.identical(v2))
v3 = Variable(('x'), [np.nan])
self.assertTrue(v1.broadcast_equals(v3))
self.assertFalse(v1.equals(v3))
self.assertFalse(v1.identical(v3))
self.assertFalse(v1.broadcast_equals(None))
v4 = Variable(('x'), [np.nan] * 3)
self.assertFalse(v2.broadcast_equals(v4))
def test_as_variable(self):
data = np.arange(10)
expected = Variable('x', data)
self.assertVariableIdentical(expected, as_variable(expected))
ds = Dataset({'x': expected})
self.assertVariableIdentical(expected, as_variable(ds['x']))
self.assertNotIsInstance(ds['x'], Variable)
self.assertIsInstance(as_variable(ds['x']), Variable)
self.assertIsInstance(as_variable(ds['x'], strict=False), DataArray)
FakeVariable = namedtuple('FakeVariable', 'values dims')
fake_xarray = FakeVariable(expected.values, expected.dims)
self.assertVariableIdentical(expected, as_variable(fake_xarray))
xarray_tuple = (expected.dims, expected.values)
self.assertVariableIdentical(expected, as_variable(xarray_tuple))
with self.assertRaisesRegexp(TypeError, 'cannot convert arg'):
as_variable(tuple(data))
with self.assertRaisesRegexp(TypeError, 'cannot infer .+ dimensions'):
as_variable(data)
actual = as_variable(data, key='x')
self.assertVariableIdentical(expected, actual)
actual = as_variable(0)
expected = Variable([], 0)
self.assertVariableIdentical(expected, actual)
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
expected = dedent("""
<xray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
""").strip()
self.assertEqual(expected, repr(v))
def test_repr_lazy_data(self):
v = Variable('x', LazilyIndexedArray(np.arange(2e5)))
self.assertIn('200000 values with dtype', repr(v))
self.assertIsInstance(v._data, LazilyIndexedArray)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(['x', 'y'], data)
# test slicing
self.assertVariableIdentical(v, v[:])
self.assertVariableIdentical(v, v[...])
self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
v[:3, :2])
# test array indexing
x = Variable(['x'], np.arange(10))
y = Variable(['y'], np.arange(11))
self.assertVariableIdentical(v, v[x.values])
self.assertVariableIdentical(v, v[x])
self.assertVariableIdentical(v[:3], v[x < 3])
self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
self.assertVariableIdentical(Variable(['y'], data[n]), item)
with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
iter(Variable([], 0))
# test setting
v.values[:] = 0
self.assertTrue(np.all(v.values == 0))
# test orthogonal setting
v[range(10), range(11)] = 1
self.assertArrayEqual(v.values, np.ones((10, 11)))
def test_isel(self):
v = Variable(['time', 'x'], self.d)
self.assertVariableIdentical(v.isel(time=slice(None)), v)
self.assertVariableIdentical(v.isel(time=0), v[0])
self.assertVariableIdentical(v.isel(time=slice(0, 3)), v[:3])
self.assertVariableIdentical(v.isel(x=0), v[:, 0])
with self.assertRaisesRegexp(ValueError, 'do not exist'):
v.isel(not_a_dim=0)
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_('asdf'))
self.assertVariableIdentical(v[()], v)
def test_transpose(self):
v = Variable(['time', 'x'], self.d)
v2 = Variable(['x', 'time'], self.d.T)
self.assertVariableIdentical(v, v2.transpose())
self.assertVariableIdentical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(['a', 'b', 'c', 'd'], x)
w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
self.assertEqual(w2.shape, (5, 3, 4, 2))
self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
def test_squeeze(self):
v = Variable(['x', 'y'], [[1]])
self.assertVariableIdentical(Variable([], 1), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))
v = Variable(['x', 'y'], [[1, 2]])
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
v.squeeze('y')
def test_get_axis_num(self):
v = Variable(['x', 'y', 'z'], np.random.randn(2, 3, 4))
self.assertEqual(v.get_axis_num('x'), 0)
self.assertEqual(v.get_axis_num(['x']), (0,))
self.assertEqual(v.get_axis_num(['x', 'y']), (0, 1))
self.assertEqual(v.get_axis_num(['z', 'y', 'x']), (2, 1, 0))
with self.assertRaisesRegexp(ValueError, 'not found in array dim'):
v.get_axis_num('foobar')
def test_expand_dims(self):
v = Variable(['x'], [0, 1])
actual = v.expand_dims(['x', 'y'])
expected = Variable(['x', 'y'], [[0], [1]])
self.assertVariableIdentical(actual, expected)
actual = v.expand_dims(['y', 'x'])
self.assertVariableIdentical(actual, expected.T)
actual = v.expand_dims(OrderedDict([('x', 2), ('y', 2)]))
expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
self.assertVariableIdentical(actual, expected)
v = Variable(['foo'], [0, 1])
actual = v.expand_dims('foo')
expected = v
self.assertVariableIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'must be a superset'):
v.expand_dims(['z'])
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(['a', 'b'], x)
# 1d to 2d broadcasting
self.assertVariableIdentical(
v * v,
Variable(['a', 'b'], np.einsum('ab,ab->ab', x, x)))
self.assertVariableIdentical(
v * v[0],
Variable(['a', 'b'], np.einsum('ab,b->ab', x, x[0])))
self.assertVariableIdentical(
v[0] * v,
Variable(['b', 'a'], np.einsum('b,ab->ba', x[0], x)))
self.assertVariableIdentical(
v[0] * v[:, 0],
Variable(['b', 'a'], np.einsum('b,a->ba', x[0], x[:, 0])))
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(['b', 'c', 'd'], y)
self.assertVariableIdentical(
v * w, Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,bcd->abcd', x, y)))
self.assertVariableIdentical(
w * v, Variable(['b', 'c', 'd', 'a'],
np.einsum('bcd,ab->bcda', y, x)))
self.assertVariableIdentical(
v * w[0], Variable(['a', 'b', 'c', 'd'],
np.einsum('ab,cd->abcd', x, y[0])))
def test_broadcasting_failures(self):
a = Variable(['x'], np.arange(10))
b = Variable(['x'], np.arange(5))
c = Variable(['x', 'x'], np.arange(100).reshape(10, 10))
with self.assertRaisesRegexp(ValueError, 'mismatched lengths'):
a + b
with self.assertRaisesRegexp(ValueError, 'duplicate dimensions'):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(['x'], x)
v2 = v
v2 += 1
self.assertIs(v, v2)
# since we provided an ndarray for data, it is also modified in-place
self.assertIs(source_ndarray(v.values), x)
self.assertArrayEqual(v.values, np.arange(5) + 1)
with self.assertRaisesRegexp(ValueError, 'dimensions cannot change'):
v += Variable('y', np.arange(5))
def test_reduce(self):
v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
self.assertVariableIdentical(v.reduce(np.std, 'x'),
Variable(['y'], self.d.std(axis=0)))
self.assertVariableIdentical(v.reduce(np.std, axis=0),
v.reduce(np.std, dim='x'))
self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
Variable([], self.d.std(axis=(0, 1))))
self.assertVariableIdentical(v.reduce(np.std),
Variable([], self.d.std()))
self.assertVariableIdentical(
v.reduce(np.mean, 'x').reduce(np.std, 'y'),
Variable([], self.d.mean(axis=0).std()))
self.assertVariableIdentical(v.mean('x'), v.reduce(np.mean, 'x'))
with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
v.mean(dim='x', axis=0)
def test_reduce_funcs(self):
v = Variable('x', np.array([1, np.nan, 2, 3]))
self.assertVariableIdentical(v.mean(), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan))
self.assertVariableIdentical(np.mean(v), Variable([], 2))
self.assertVariableIdentical(v.prod(), Variable([], 6))
self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))
if LooseVersion(np.__version__) < '1.9':
with self.assertRaises(NotImplementedError):
v.median()
else:
self.assertVariableIdentical(v.median(), Variable([], 2))
v = Variable('x', [True, False, False])
self.assertVariableIdentical(v.any(), Variable([], True))
self.assertVariableIdentical(v.all(dim='x'), Variable([], False))
v = Variable('t', pd.date_range('2000-01-01', periods=3))
with self.assertRaises(NotImplementedError):
v.max(skipna=True)
self.assertVariableIdentical(
v.max(), Variable([], pd.Timestamp('2000-01-03')))
def test_reduce_keep_attrs(self):
_attrs = {'units': 'test', 'long_name': 'testing'}
v = Variable(['x', 'y'], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = v.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(_attrs))
self.assertEqual(vm.attrs, _attrs)
def test_count(self):
expected = Variable([], 3)
actual = Variable(['x'], [1, 2, 3, np.nan]).count()
self.assertVariableIdentical(expected, actual)
v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
actual = v.count()
self.assertVariableIdentical(expected, actual)
actual = Variable(['x'], [True, False, True]).count()
self.assertVariableIdentical(expected, actual)
self.assertEqual(actual.dtype, int)
expected = Variable(['x'], [2, 3])
actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
self.assertVariableIdentical(expected, actual)
class TestCoordinate(TestCase, VariableSubclassTestCases):
cls = staticmethod(Coordinate)
def test_init(self):
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
Coordinate((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = Coordinate(['time'], data, {'foo': 'bar'})
self.assertTrue(pd.Index(data, name='time').identical(v.to_index()))
def test_data(self):
x = Coordinate('x', np.arange(3.0))
# data should be initially saved as an ndarray
self.assertIs(type(x._data), np.ndarray)
self.assertEqual(float, x.dtype)
self.assertArrayEqual(np.arange(3), x)
self.assertEqual(float, x.values.dtype)
# after inspecting x.values, the Coordinate value will be saved as an Index
self.assertIsInstance(x._data, PandasIndexAdapter)
with self.assertRaisesRegexp(TypeError, 'cannot be modified'):
x[:] = 0
def test_name(self):
coord = Coordinate('x', [10.0])
self.assertEqual(coord.name, 'x')
with self.assertRaises(AttributeError):
coord.name = 'y'
class TestAsCompatibleData(TestCase):
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, indexing.LazilyIndexedArray)
for t in types:
for data in [np.arange(3),
pd.date_range('2000-01-01', periods=3),
pd.date_range('2000-01-01', periods=3).values]:
x = t(data)
self.assertIs(source_ndarray(x),
source_ndarray(_as_compatible_data(x)))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = _as_compatible_data(input_array)
self.assertArrayEqual(np.asarray(input_array), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.asarray(input_array).dtype, actual.dtype)
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = _as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(int), actual.dtype)
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = _as_compatible_data(original)
self.assertArrayEqual(expected, actual)
self.assertEqual(np.dtype(float), actual.dtype)
def test_datetime(self):
expected = np.datetime64('2000-01-01T00Z')
actual = _as_compatible_data(expected)
self.assertEqual(expected, actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z')])
actual = _as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
expected = np.array([np.datetime64('2000-01-01T00Z', 'ns')])
actual = _as_compatible_data(expected)
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
self.assertIs(expected, source_ndarray(np.asarray(actual)))
expected = np.datetime64('2000-01-01T00Z', 'ns')
actual = _as_compatible_data(datetime(2000, 1, 1))
self.assertEqual(np.asarray(expected), actual)
self.assertEqual(np.ndarray, type(actual))
self.assertEqual(np.dtype('datetime64[ns]'), actual.dtype)
| clarkfitzg/xray | xray/test/test_variable.py | Python | apache-2.0 | 35,943 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import exceptions
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.actions import page_action
from telemetry.page import action_runner as action_runner_module
from telemetry.testing import tab_test_case
from telemetry.timeline import model
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_options
from telemetry.web_perf import timeline_interaction_record as tir_module
util.AddDirToPythonPath(util.GetTelemetryDir(), 'third_party', 'mock')
import mock
class ActionRunnerInteractionTest(tab_test_case.TabTestCase):
def GetInteractionRecords(self, trace_data):
timeline_model = model.TimelineModel(trace_data)
renderer_thread = timeline_model.GetRendererThreadFromTabId(self._tab.id)
return [
tir_module.TimelineInteractionRecord.FromAsyncEvent(e)
for e in renderer_thread.async_slices
if tir_module.IsTimelineInteractionRecord(e.name)
]
def VerifyIssuingInteractionRecords(self, **interaction_kwargs):
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
self.Navigate('interaction_enabled_page.html')
action_runner.Wait(1)
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
self._browser.platform.tracing_controller.Start(
options, tracing_category_filter.CreateNoOverheadFilter())
with action_runner.CreateInteraction('InteractionName',
**interaction_kwargs):
pass
trace_data = self._browser.platform.tracing_controller.Stop()
records = self.GetInteractionRecords(trace_data)
self.assertEqual(
1, len(records),
'Failed to issue the interaction record on the tracing timeline.'
' Trace data:\n%s' % repr(trace_data._raw_data))
self.assertEqual('InteractionName', records[0].label)
for attribute_name in interaction_kwargs:
self.assertTrue(getattr(records[0], attribute_name))
# Test disabled for android: crbug.com/437057
@decorators.Disabled('android', 'chromeos')
def testIssuingMultipleMeasurementInteractionRecords(self):
self.VerifyIssuingInteractionRecords(repeatable=True)
class ActionRunnerTest(tab_test_case.TabTestCase):
def testExecuteJavaScript(self):
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
self.Navigate('blank.html')
action_runner.ExecuteJavaScript('var testing = 42;')
self.assertEqual(42, self._tab.EvaluateJavaScript('testing'))
def testWaitForNavigate(self):
self.Navigate('page_with_link.html')
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
action_runner.ClickElement('#clickme')
action_runner.WaitForNavigate()
self.assertTrue(self._tab.EvaluateJavaScript(
'document.readyState == "interactive" || '
'document.readyState == "complete"'))
self.assertEqual(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testWait(self):
action_runner = action_runner_module.ActionRunner(self._tab)
self.Navigate('blank.html')
action_runner.ExecuteJavaScript(
'window.setTimeout(function() { window.testing = 101; }, 50);')
action_runner.Wait(0.1)
self.assertEqual(101, self._tab.EvaluateJavaScript('window.testing'))
action_runner.ExecuteJavaScript(
'window.setTimeout(function() { window.testing = 102; }, 100);')
action_runner.Wait(0.2)
self.assertEqual(102, self._tab.EvaluateJavaScript('window.testing'))
def testWaitForJavaScriptCondition(self):
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
self.Navigate('blank.html')
action_runner.ExecuteJavaScript('window.testing = 219;')
action_runner.WaitForJavaScriptCondition(
'window.testing == 219', timeout_in_seconds=0.1)
action_runner.ExecuteJavaScript(
'window.setTimeout(function() { window.testing = 220; }, 50);')
action_runner.WaitForJavaScriptCondition(
'window.testing == 220', timeout_in_seconds=0.1)
self.assertEqual(220, self._tab.EvaluateJavaScript('window.testing'))
def testWaitForElement(self):
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
self.Navigate('blank.html')
action_runner.ExecuteJavaScript(
'(function() {'
' var el = document.createElement("div");'
' el.id = "test1";'
' el.textContent = "foo";'
' document.body.appendChild(el);'
'})()')
action_runner.WaitForElement('#test1', timeout_in_seconds=0.1)
action_runner.WaitForElement(text='foo', timeout_in_seconds=0.1)
action_runner.WaitForElement(
element_function='document.getElementById("test1")')
action_runner.ExecuteJavaScript(
'window.setTimeout(function() {'
' var el = document.createElement("div");'
' el.id = "test2";'
' document.body.appendChild(el);'
'}, 50)')
action_runner.WaitForElement('#test2', timeout_in_seconds=0.1)
action_runner.ExecuteJavaScript(
'window.setTimeout(function() {'
' document.getElementById("test2").textContent = "bar";'
'}, 50)')
action_runner.WaitForElement(text='bar', timeout_in_seconds=0.1)
action_runner.ExecuteJavaScript(
'window.setTimeout(function() {'
' var el = document.createElement("div");'
' el.id = "test3";'
' document.body.appendChild(el);'
'}, 50)')
action_runner.WaitForElement(
element_function='document.getElementById("test3")')
def testWaitForElementWithWrongText(self):
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
self.Navigate('blank.html')
action_runner.ExecuteJavaScript(
'(function() {'
' var el = document.createElement("div");'
' el.id = "test1";'
' el.textContent = "foo";'
' document.body.appendChild(el);'
'})()')
action_runner.WaitForElement('#test1', timeout_in_seconds=0.2)
def WaitForElement():
action_runner.WaitForElement(text='oo', timeout_in_seconds=0.2)
self.assertRaises(exceptions.TimeoutException, WaitForElement)
def testClickElement(self):
self.Navigate('page_with_clickables.html')
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
action_runner.ExecuteJavaScript('valueSettableByTest = 1;')
action_runner.ClickElement('#test')
self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest'))
action_runner.ExecuteJavaScript('valueSettableByTest = 2;')
action_runner.ClickElement(text='Click/tap me')
self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest'))
action_runner.ExecuteJavaScript('valueSettableByTest = 3;')
action_runner.ClickElement(
element_function='document.body.firstElementChild;')
self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest'))
def WillFail():
action_runner.ClickElement('#notfound')
self.assertRaises(exceptions.EvaluateException, WillFail)
@decorators.Disabled('android', 'debug', # crbug.com/437068
'chromeos') # crbug.com/483212
def testTapElement(self):
self.Navigate('page_with_clickables.html')
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
action_runner.ExecuteJavaScript('valueSettableByTest = 1;')
action_runner.TapElement('#test')
self.assertEqual(1, action_runner.EvaluateJavaScript('valueToTest'))
action_runner.ExecuteJavaScript('valueSettableByTest = 2;')
action_runner.TapElement(text='Click/tap me')
self.assertEqual(2, action_runner.EvaluateJavaScript('valueToTest'))
action_runner.ExecuteJavaScript('valueSettableByTest = 3;')
action_runner.TapElement(
element_function='document.body.firstElementChild')
self.assertEqual(3, action_runner.EvaluateJavaScript('valueToTest'))
def WillFail():
action_runner.TapElement('#notfound')
self.assertRaises(exceptions.EvaluateException, WillFail)
@decorators.Disabled('android', # crbug.com/437065.
'chromeos') # crbug.com/483212.
def testScroll(self):
if not page_action.IsGestureSourceTypeSupported(
self._tab, 'touch'):
return
self.Navigate('page_with_swipeables.html')
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
action_runner.ScrollElement(
selector='#left-right', direction='right', left_start_ratio=0.9)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.querySelector("#left-right").scrollLeft') > 75)
action_runner.ScrollElement(
selector='#top-bottom', direction='down', top_start_ratio=0.9)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.querySelector("#top-bottom").scrollTop') > 75)
action_runner.ScrollPage(direction='right', left_start_ratio=0.9,
distance=100)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.body.scrollLeft') > 75)
@decorators.Disabled('android', # crbug.com/437065.
'chromeos') # crbug.com/483212.
def testSwipe(self):
if not page_action.IsGestureSourceTypeSupported(
self._tab, 'touch'):
return
self.Navigate('page_with_swipeables.html')
action_runner = action_runner_module.ActionRunner(self._tab,
skip_waits=True)
action_runner.SwipeElement(
selector='#left-right', direction='left', left_start_ratio=0.9)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.querySelector("#left-right").scrollLeft') > 75)
action_runner.SwipeElement(
selector='#top-bottom', direction='up', top_start_ratio=0.9)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.querySelector("#top-bottom").scrollTop') > 75)
action_runner.SwipePage(direction='left', left_start_ratio=0.9)
self.assertTrue(action_runner.EvaluateJavaScript(
'document.body.scrollLeft') > 75)
class InteractionTest(unittest.TestCase):
def setUp(self):
self.mock_action_runner = mock.Mock(action_runner_module.ActionRunner)
def testIssuingInteractionRecordCommand(self):
with action_runner_module.Interaction(
self.mock_action_runner, label='ABC', flags=[]):
pass
expected_calls = [
mock.call.ExecuteJavaScript('console.time("Interaction.ABC");'),
mock.call.ExecuteJavaScript('console.timeEnd("Interaction.ABC");')]
self.assertEqual(expected_calls, self.mock_action_runner.mock_calls)
def testExceptionRaisedInWithInteraction(self):
class FooException(Exception):
pass
# Test that the Foo exception raised in the with block is propagated to the
# caller.
with self.assertRaises(FooException):
with action_runner_module.Interaction(
self.mock_action_runner, label='ABC', flags=[]):
raise FooException()
# Test that the end console.timeEnd(...) isn't called because exception was
# raised.
expected_calls = [
mock.call.ExecuteJavaScript('console.time("Interaction.ABC");')]
self.assertEqual(expected_calls, self.mock_action_runner.mock_calls)
| googlearchive/big-rig | app/src/thirdparty/telemetry/internal/actions/action_runner_unittest.py | Python | apache-2.0 | 12,077 |
import hashlib
from waterbutler import settings
config = settings.child('SERVER_CONFIG')
ADDRESS = config.get('ADDRESS', 'localhost')
PORT = config.get('PORT', 7777)
DOMAIN = config.get('DOMAIN', "http://localhost:7777")
DEBUG = config.get_bool('DEBUG', True)
SSL_CERT_FILE = config.get_nullable('SSL_CERT_FILE', None)
SSL_KEY_FILE = config.get_nullable('SSL_KEY_FILE', None)
XHEADERS = config.get_bool('XHEADERS', False)
CORS_ALLOW_ORIGIN = config.get('CORS_ALLOW_ORIGIN', '*')
CHUNK_SIZE = int(config.get('CHUNK_SIZE', 65536)) # 64KB
MAX_BODY_SIZE = int(config.get('MAX_BODY_SIZE', int(4.9 * (1024 ** 3)))) # 4.9 GB
AUTH_HANDLERS = config.get('AUTH_HANDLERS', [
'osf',
])
HMAC_ALGORITHM = getattr(hashlib, config.get('HMAC_ALGORITHM', 'sha256'))
HMAC_SECRET = config.get('HMAC_SECRET')
if not settings.DEBUG:
assert HMAC_SECRET, 'HMAC_SECRET must be specified when not in debug mode'
HMAC_SECRET = (HMAC_SECRET or 'changeme').encode('utf-8')
# Configs for WB API Rate-limiting with Redis
ENABLE_RATE_LIMITING = config.get_bool('ENABLE_RATE_LIMITING', False)
REDIS_HOST = config.get('REDIS_HOST', '192.168.168.167')
REDIS_PORT = config.get('REDIS_PORT', '6379')
REDIS_PASSWORD = config.get('REDIS_PASSWORD', None)
# Number of seconds until the redis key expires
RATE_LIMITING_FIXED_WINDOW_SIZE = int(config.get('RATE_LIMITING_FIXED_WINDOW_SIZE', 3600))
# number of reqests permitted while the redis key is active
RATE_LIMITING_FIXED_WINDOW_LIMIT = int(config.get('RATE_LIMITING_FIXED_WINDOW_LIMIT', 3600))
| CenterForOpenScience/waterbutler | waterbutler/server/settings.py | Python | apache-2.0 | 1,532 |
def install(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
job.context['token'] = get_jwt_token(job.service.aysrepo)
node = Node.from_ays(service.parent, job.context['token'])
if node.client.nft.rule_exists(service.model.data.port):
return
node.client.nft.open_port(service.model.data.port)
service.model.data.status = "opened"
service.saveAll()
def drop(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
job.context['token'] = get_jwt_token(job.service.aysrepo)
node = Node.from_ays(service.parent, job.context['token'])
if not node.client.nft.rule_exists(service.model.data.port):
return
node.client.nft.drop_port(service.model.data.port)
service.model.data.status = "dropped"
service.saveAll()
def monitor(job):
from zeroos.orchestrator.configuration import get_jwt_token
service = job.service
if service.model.data.status != "opened":
return
job.context['token'] = get_jwt_token(service.aysrepo)
install(job)
| zero-os/0-orchestrator | templates/tcp/actions.py | Python | apache-2.0 | 1,208 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import json
import os
import sys
import time
from marionette import MarionetteTestCase
from marionette import Marionette
from marionette import MarionetteTouchMixin
from marionette.errors import NoSuchElementException
from marionette.errors import ElementNotVisibleException
from marionette.errors import TimeoutException
import mozdevice
class LockScreen(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_lock_screen.js"))
self.marionette.import_script(js)
@property
def is_locked(self):
return self.marionette.execute_script('window.wrappedJSObject.LockScreen.locked')
def lock(self):
result = self.marionette.execute_async_script('GaiaLockScreen.lock()')
assert result, 'Unable to lock screen'
def unlock(self):
result = self.marionette.execute_async_script('GaiaLockScreen.unlock()')
assert result, 'Unable to unlock screen'
class GaiaApp(object):
def __init__(self, origin=None, name=None, frame=None, src=None):
self.frame = frame
self.frame_id = frame
self.src = src
self.name = name
self.origin = origin
class GaiaApps(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
def get_permission(self, app_name, permission_name):
return self.marionette.execute_async_script("return GaiaApps.getPermission('%s', '%s')" % (app_name, permission_name))
def set_permission(self, app_name, permission_name, value):
return self.marionette.execute_async_script("return GaiaApps.setPermission('%s', '%s', '%s')" %
(app_name, permission_name, value))
def launch(self, name, switch_to_frame=True, url=None):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("GaiaApps.launchWithName('%s')" % name)
assert result, "Failed to launch app with name '%s'" % name
app = GaiaApp(frame=result.get('frame'),
src=result.get('src'),
name=result.get('name'),
origin=result.get('origin'))
if app.frame_id is None:
raise Exception("App failed to launch; there is no app frame")
if switch_to_frame:
self.switch_to_frame(app.frame_id, url)
return app
def uninstall(self, name):
self.marionette.switch_to_frame()
self.marionette.execute_async_script("GaiaApps.uninstallWithName('%s')" % name)
def kill(self, app):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
result = self.marionette.execute_async_script("GaiaApps.kill('%s');" % app.origin)
assert result, "Failed to kill app with name '%s'" % app.name
def kill_all(self):
self.marionette.switch_to_frame()
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_apps.js"))
self.marionette.import_script(js)
self.marionette.execute_async_script("GaiaApps.killAll()")
def runningApps(self):
return self.marionette.execute_script("return GaiaApps.getRunningApps()")
def switch_to_frame(self, app_frame, url=None, timeout=30):
self.marionette.switch_to_frame(app_frame)
start = time.time()
if not url:
def check(now):
return "about:blank" not in now
else:
def check(now):
return url in now
while (time.time() - start < timeout):
if check(self.marionette.get_url()):
return
time.sleep(2)
raise TimeoutException('Could not switch to app frame %s in time' % app_frame)
class GaiaData(object):
def __init__(self, marionette):
self.marionette = marionette
js = os.path.abspath(os.path.join(__file__, os.path.pardir, 'atoms', "gaia_data_layer.js"))
self.marionette.import_script(js)
self.marionette.set_search_timeout(10000)
def set_time(self, date_number):
self.marionette.set_context(self.marionette.CONTEXT_CHROME)
self.marionette.execute_script("window.navigator.mozTime.set(%s);" % date_number)
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
def insert_contact(self, contact):
self.marionette.execute_script("GaiaDataLayer.insertContact(%s)" % contact.json())
def remove_contact(self, contact):
self.marionette.execute_script("GaiaDataLayer.findAndRemoveContact(%s)" % contact.json())
def get_setting(self, name):
self.marionette.switch_to_frame()
return self.marionette.execute_async_script('return GaiaDataLayer.getSetting("%s")' % name)
@property
def all_settings(self):
return self.get_setting('*')
def set_setting(self, name, value):
import json
value = json.dumps(value)
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script('return GaiaDataLayer.setSetting("%s", %s)' % (name, value))
assert result, "Unable to change setting with name '%s' to '%s'" % (name, value)
def set_volume(self, value):
self.set_setting('audio.volume.master', value)
def enable_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.enableCellData()")
assert result, 'Unable to enable cell data'
def disable_cell_data(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableCellData()")
assert result, 'Unable to disable cell data'
def enable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', True)
def disable_cell_roaming(self):
self.set_setting('ril.data.roaming_enabled', False)
def enable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.enableWiFi()")
assert result, 'Unable to enable WiFi'
def disable_wifi(self):
self.marionette.switch_to_frame()
result = self.marionette.execute_async_script("return GaiaDataLayer.disableWiFi()")
assert result, 'Unable to disable WiFi'
def connect_to_wifi(self, network):
result = self.marionette.execute_async_script("return GaiaDataLayer.connectToWiFi(%s)" % json.dumps(network))
assert result, 'Unable to connect to WiFi network'
def forget_all_networks(self):
self.marionette.execute_async_script('return GaiaDataLayer.forgetAllNetworks()')
def is_wifi_connected(self, network):
return self.marionette.execute_script("return GaiaDataLayer.isWiFiConnected(%s)" % json.dumps(network))
@property
def known_networks(self):
return self.marionette.execute_async_script('return GaiaDataLayer.getKnownNetworks()')
@property
def active_telephony_state(self):
# Returns the state of only the currently active call or None if no active call
return self.marionette.execute_script("return GaiaDataLayer.getMozTelephonyState()")
@property
def is_antenna_available(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.antennaAvailable')
@property
def is_fm_radio_enabled(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.enabled')
@property
def fm_radio_frequency(self):
return self.marionette.execute_script('return window.navigator.mozFMRadio.frequency')
@property
def media_files(self):
return self.marionette.execute_async_script('return GaiaDataLayer.getAllMediaFiles();')
class GaiaTestCase(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
self.marionette.__class__ = type('Marionette', (Marionette, MarionetteTouchMixin), {})
self.marionette.setup_touch()
# the emulator can be really slow!
self.marionette.set_script_timeout(60000)
self.marionette.set_search_timeout(10000)
self.lockscreen = LockScreen(self.marionette)
self.apps = GaiaApps(self.marionette)
self.data_layer = GaiaData(self.marionette)
self.keyboard = Keyboard(self.marionette)
# wifi is true if testvars includes wifi details and wifi manager is defined
self.wifi = self.testvars and \
'wifi' in self.testvars and \
self.marionette.execute_script('return window.navigator.mozWifiManager !== undefined')
self.cleanUp()
@property
def is_android_build(self):
return 'Android' in self.marionette.session_capabilities['platform']
@property
def device_manager(self):
if not self.is_android_build:
raise Exception('Device manager is only available for devices.')
if hasattr(self, '_device_manager') and self._device_manager:
return self._device_manager
else:
dm_type = os.environ.get('DM_TRANS', 'adb')
if dm_type == 'adb':
self._device_manager = mozdevice.DeviceManagerADB()
elif dm_type == 'sut':
host = os.environ.get('TEST_DEVICE')
if not host:
raise Exception('Must specify host with SUT!')
self._device_manager = mozdevice.DeviceManagerSUT(host=host)
else:
raise Exception('Unknown device manager type: %s' % dm_type)
return self._device_manager
def cleanUp(self):
# remove media
if self.is_android_build and self.data_layer.media_files:
for filename in self.data_layer.media_files:
self.device_manager.removeFile('/'.join(['sdcard', filename]))
# unlock
self.lockscreen.unlock()
# kill any open apps
self.apps.kill_all()
# disable sound completely
self.data_layer.set_volume(0)
if self.wifi:
# forget any known networks
self.data_layer.enable_wifi()
self.data_layer.forget_all_networks()
self.data_layer.disable_wifi()
# reset to home screen
self.marionette.execute_script("window.wrappedJSObject.dispatchEvent(new Event('home'));")
def push_resource(self, filename, destination=''):
local = os.path.abspath(os.path.join(os.path.dirname(__file__), 'resources', filename))
remote = '/'.join(['sdcard', destination, filename])
self.device_manager.mkDirs(remote)
self.device_manager.pushFile(local, remote)
def wait_for_element_present(self, by, locator, timeout=10):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
return self.marionette.find_element(by, locator)
except NoSuchElementException:
pass
else:
raise TimeoutException(
'Element %s not found before timeout' % locator)
def wait_for_element_not_present(self, by, locator, timeout=10):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
self.marionette.find_element(by, locator)
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still present after timeout' % locator)
def wait_for_element_displayed(self, by, locator, timeout=10):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
if self.marionette.find_element(by, locator).is_displayed():
break
except NoSuchElementException:
pass
else:
raise TimeoutException(
'Element %s not visible before timeout' % locator)
def wait_for_element_not_displayed(self, by, locator, timeout=10):
timeout = float(timeout) + time.time()
while time.time() < timeout:
time.sleep(0.5)
try:
if not self.marionette.find_element(by, locator).is_displayed():
break
except NoSuchElementException:
break
else:
raise TimeoutException(
'Element %s still visible after timeout' % locator)
def wait_for_condition(self, method, timeout=10,
message="Condition timed out"):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + timeout
while time.time() < end_time:
try:
value = method(self.marionette)
if value:
return value
except NoSuchElementException:
pass
time.sleep(0.5)
else:
raise TimeoutException(message)
def is_element_present(self, by, locator):
try:
self.marionette.find_element(by, locator)
return True
except:
return False
def tearDown(self):
if any(sys.exc_info()):
# test has failed, gather debug
test_class, test_name = self.marionette.test_name.split()[-1].split('.')
xml_output = self.testvars.get('xml_output', None)
debug_path = os.path.join(xml_output and os.path.dirname(xml_output) or 'debug', test_class)
if not os.path.exists(debug_path):
os.makedirs(debug_path)
# screenshot
with open(os.path.join(debug_path, '%s_screenshot.png' % test_name), 'w') as f:
# TODO: Bug 818287 - Screenshots include data URL prefix
screenshot = self.marionette.screenshot()[22:]
f.write(base64.decodestring(screenshot))
self.lockscreen = None
self.apps = None
self.data_layer = None
MarionetteTestCase.tearDown(self)
class Keyboard(object):
_upper_case_key = '20'
_numeric_sign_key = '-2'
_alpha_key = '-1'
_alt_key = '18'
# Keyboard app
_keyboard_frame_locator = ('css selector', '#keyboard-frame iframe')
_button_locator = ('css selector', 'button.keyboard-key[data-keycode="%s"]')
def __init__(self, marionette):
self.marionette = marionette
def _switch_to_keyboard(self):
self.marionette.switch_to_frame()
keybframe = self.marionette.find_element(*self._keyboard_frame_locator)
self.marionette.switch_to_frame(keybframe, focus=False)
def _key_locator(self, val):
if len(val) == 1:
val = ord(val)
return (self._button_locator[0], self._button_locator[1] % val)
def _press(self, val):
self.marionette.find_element(*self._key_locator(val)).click()
def is_element_present(self, by, locator):
try:
self.marionette.set_search_timeout(500)
self.marionette.find_element(by, locator)
return True
except:
return False
finally:
# set the search timeout to the default value
self.marionette.set_search_timeout(10000)
def send(self, string):
self._switch_to_keyboard()
for val in string:
if val.isalnum():
if val.islower():
self._press(val)
elif val.isupper():
self._press(self._upper_case_key)
self._press(val)
elif val.isdigit():
self._press(self._numeric_sign_key)
self._press(val)
self._press(self._alpha_key)
else:
self._press(self._numeric_sign_key)
if self.is_element_present(*self._key_locator(val)):
self._press(val)
else:
self._press(self._alt_key)
if self.is_element_present(*self._key_locator(val)):
self._press(val)
else:
assert False, 'Key %s not found on the keyboard' % val
self._press(self._alpha_key)
self.marionette.switch_to_frame()
| sergecodd/FireFox-OS | B2G/gaia/tests/python/gaiatest/gaia_test.py | Python | apache-2.0 | 16,942 |
import mock
import unittest
import uuid
from vnc_api import vnc_api
from svc_monitor.vrouter_instance_manager import VRouterInstanceManager
class DBObjMatcher(object):
"""
Object for assert_called_with to check if db object is created properly
"""
def __init__(self, prefix):
self.prefix = prefix
def _has_field(self, name, ob):
return (self.prefix + name) in ob
def __eq__(self, other):
if not(self._has_field("name", other)
and self._has_field("uuid", other)
and self._has_field("state", other)
and self._has_field("vrouter", other)):
return False
if other[self.prefix + "vrouter"] == "None":
return False
return True
class VRouterInstanceManagerTest(unittest.TestCase):
VM_UUID = str(uuid.uuid4())
VR_UUID = str(uuid.uuid4())
DB_PREFIX = "test"
MOCKED_VR_BACK_REF = [{
"uuid": VR_UUID
}]
def setUp(self):
mocked_vnc = mock.MagicMock()
mocked_vm_ob = mock.MagicMock()
mocked_vm_ob.get_virtual_router_back_refs\
.return_value = self.MOCKED_VR_BACK_REF
mocked_vm_ob.uuid = self.VM_UUID
mocked_vnc.virtual_machine_read.return_value = mocked_vm_ob
self.mocked_vm_ob = mocked_vm_ob
mocked_db = mock.MagicMock()
mocked_db.get_vm_db_prefix.return_value = self.DB_PREFIX
self.vrouter_manager = VRouterInstanceManager(
db=mocked_db, logger=mock.MagicMock(),
vnc_lib=mocked_vnc, vrouter_scheduler=mock.MagicMock(),
nova_client=mock.MagicMock())
def test_create(self):
st_obj = vnc_api.ServiceTemplate(name="test-template")
svc_properties = vnc_api.ServiceTemplateType()
svc_properties.set_service_virtualization_type('vrouter-instance')
svc_properties.set_image_name('test')
svc_properties.set_ordered_interfaces(True)
if_list = [['management', False], ['left', False], ['right', False]]
for itf in if_list:
if_type = vnc_api.ServiceTemplateInterfaceType(shared_ip=itf[1])
if_type.set_service_interface_type(itf[0])
svc_properties.add_interface_type(if_type)
svc_properties.set_vrouter_instance_type("docker")
st_obj.set_service_template_properties(svc_properties)
si_obj = vnc_api.ServiceInstance("test2")
si_prop = vnc_api.ServiceInstanceType(
left_virtual_network="left", right_virtual_network="right",
management_virtual_network="management")
si_prop.set_interface_list(
[vnc_api.ServiceInstanceInterfaceType(virtual_network="left"),
vnc_api.ServiceInstanceInterfaceType(virtual_network="right"),
vnc_api.ServiceInstanceInterfaceType(
virtual_network="management")])
si_prop.set_virtual_router_id(uuid.uuid4())
si_obj.set_service_instance_properties(si_prop)
si_obj.set_service_template(st_obj)
si_obj.uuid = str(uuid.uuid4())
st_obj.uuid = str(uuid.uuid4())
self.vrouter_manager.create_service(st_obj, si_obj)
self.vrouter_manager.db.service_instance_insert.assert_called_with(
si_obj.get_fq_name_str(), DBObjMatcher(self.DB_PREFIX)
)
def test_delete(self):
mocked_vr = mock.MagicMock()
mocked_vr.uuid = self.VR_UUID
self.vrouter_manager._vnc_lib.virtual_router_read.\
return_value = mocked_vr
self.vrouter_manager.delete_service(mock.MagicMock(), self.VM_UUID)
self.vrouter_manager._vnc_lib.virtual_machine_delete\
.assert_called_with(id=self.VM_UUID)
mocked_vr.del_virtual_machine.assert_called_with(
self.mocked_vm_ob)
| cloudwatt/contrail-controller | src/config/svc-monitor/svc_monitor/tests/test_vrouter_instance_manager.py | Python | apache-2.0 | 3,797 |
"""
This file implements a brew resolver for Galaxy requirements. In order for Galaxy
to pick up on recursively defined and versioned brew dependencies recipes should
be installed using the experimental `brew-vinstall` external command.
More information here:
https://github.com/jmchilton/brew-tests
https://github.com/Homebrew/homebrew-science/issues/1191
This is still an experimental module and there will almost certainly be backward
incompatible changes coming.
"""
from .resolver_mixins import UsesHomebrewMixin
from ..resolvers import DependencyResolver, INDETERMINATE_DEPENDENCY
# TODO: Implement prefer version linked...
PREFER_VERSION_LINKED = 'linked'
PREFER_VERSION_LATEST = 'latest'
UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE = "HomebrewDependencyResolver prefer_version must be %s"
UNKNOWN_PREFER_VERSION_MESSAGE = UNKNOWN_PREFER_VERSION_MESSAGE_TEMPLATE % (PREFER_VERSION_LATEST)
DEFAULT_PREFER_VERSION = PREFER_VERSION_LATEST
class HomebrewDependencyResolver(DependencyResolver, UsesHomebrewMixin):
resolver_type = "homebrew"
def __init__(self, dependency_manager, **kwds):
self.versionless = _string_as_bool(kwds.get('versionless', 'false'))
self.prefer_version = kwds.get('prefer_version', None)
if self.prefer_version is None:
self.prefer_version = DEFAULT_PREFER_VERSION
if self.versionless and self.prefer_version not in [PREFER_VERSION_LATEST]:
raise Exception(UNKNOWN_PREFER_VERSION_MESSAGE)
self._init_homebrew(**kwds)
def resolve(self, name, version, type, **kwds):
if type != "package":
return INDETERMINATE_DEPENDENCY
if version is None or self.versionless:
return self._find_dep_default(name, version)
else:
return self._find_dep_versioned(name, version)
def _string_as_bool( value ):
return str( value ).lower() == "true"
__all__ = ['HomebrewDependencyResolver']
| ssorgatem/pulsar | galaxy/tools/deps/resolvers/homebrew.py | Python | apache-2.0 | 1,947 |
import networkx as nx
import re
import json
from learning.PageManager import PageManager
class TreeListLearner(object):
def __init__(self):
self.__minEdgeWeight = 2
self.__DEBUG = False
"""
pageRepresentation is the invisible/visible data structure
only_consider_tag lets you filter to just one tag type, like DIV
"""
def prefix_tree(self, pageRepresentation, only_consider_tag=None):
ptree = {}
path_to_visible_texts = {}
path_to_first_invis_tokens = {}
for tupl in pageRepresentation:
invisible_token_string = tupl['invisible_token_buffer_before'].replace("> <", "><")
invisible_tokens = re.findall("(<.+?>)", invisible_token_string)
if only_consider_tag is not None:
invisible_tokens = [a for a in invisible_tokens if a.startswith("<" + only_consider_tag)]
path_string = ''.join(invisible_tokens)
if path_string not in path_to_visible_texts:
path_to_visible_texts[path_string] = []
path_to_visible_texts[path_string].append(tupl['visible_token_buffer'])
if path_string not in path_to_first_invis_tokens:
path_to_first_invis_tokens[path_string] = []
path_to_first_invis_tokens[path_string].append(tupl['first_invis_token'])
invisible_tokens.append('VISIBLE') # BC we are going to reverse and make this root
# first, we want to process right to left...
invisible_tokens.reverse()
for depth in range(len(invisible_tokens)):
if depth not in ptree:
ptree[depth] = {}
if depth == 0:
if 'VISIBLE' not in ptree[depth]:
ptree[depth]['VISIBLE'] = {'count': 9999999, 'parent': ''}
else:
node = invisible_tokens[depth]
if node not in ptree[depth]:
ptree[depth][node] = {}
ptree[depth][node] = {'count': 1, 'parent': invisible_tokens[depth - 1]}
else:
ptree[depth][node]['count'] += 1
return ptree, path_to_visible_texts, path_to_first_invis_tokens
def prefix_tree_to_paths(self, prefix_tree):
# basically go through the prefix tree, turn each path into a rule and see the visible text that follows it
# turn paths in the tree into results by looking at the visible text that follows each path
# go from leaf to root
G = nx.DiGraph()
for i in prefix_tree.keys():
if i == 0:
continue
else:
if i == 1:
for node in prefix_tree[i]:
G.add_edge('VISIBLE', str(i) + "||" + node, weight=prefix_tree[i][node]['count'], label=prefix_tree[i][node]['count'])
else:
for node in prefix_tree[i]:
G.add_edge(str(i - 1) + "||" + prefix_tree[i][node]['parent'], str(i) + "||" + node,
weight=prefix_tree[i][node]['count'], label=prefix_tree[i][node]['count'])
leaves = [x for x in G.nodes_iter() if G.out_degree(x) == 0] # nodes with no out degree are leaves
# note we have some disconnected trees, so there might not be a path... but...
paths = []
for leaf in leaves:
has_path = nx.has_path(G, 'VISIBLE', leaf)
if has_path:
short_path = nx.shortest_path(G, 'VISIBLE', leaf)
# leading divs
leading_tags = [a for a in short_path if a != 'VISIBLE']
leading_tags.reverse()
paths.append(leading_tags)
# first, create the path sets... note, any path set would share hte same first token
path_sets = {} # key: first token of path, value: list of members
for pth in paths:
pth.reverse()
first_tok = pth[0]
if first_tok not in path_sets:
path_sets[first_tok] = []
path_sets[first_tok].append(pth)
# now, see if the path set is "valid." A valid pathset is a pathset where at least one member is
# valid (e.g., path from root to leaf has all edges occur at least once
paths_to_keep = []
for path_set_identifier in path_sets.keys():
good_path_parts = [] # for holding paths where edges occur at least number of times we want
for p in path_sets[path_set_identifier]:
edge_data = [G.get_edge_data(p[i], p[i+1]) for i in range(len(p)) if i < len(p) - 1]
tok_with_edge_data = zip(p, edge_data)
keepers = [tupl[0] for tupl in tok_with_edge_data if tupl[1]['weight'] >= self.__minEdgeWeight]
# TODO: If you are missing the first (last?) token, then it means you are breaking from VISIBLE...
# why you end up with lists that are just one node and don't actually extract anything
good_path_parts.append(keepers)
# now, find the intersection of the guys in good path parts, this will be our final path
final_keeper = []
for i in range(len(good_path_parts)):
if i == 0:
final_keeper = good_path_parts[i]
else:
final_keeper = [z for z in good_path_parts[i] if z in final_keeper]
final_keeper.reverse() # reverse it back to what it looked like before
if len(final_keeper) > 0:
paths_to_keep.append(final_keeper)
# finally, clean the tags
cleaned_tags = []
for pth in paths_to_keep:
cleaned_tags.append([a.split("||")[-1] for a in pth])
#nx.drawing.nx_pydot.write_dot(G, 'test.dot')
return cleaned_tags
"""
Given the rows we extract, separate them into clusters where you have overlapping rows or not.
This is the first step to finding interleaving...
Once we find the interleaving, we merge them in (via common parts of the paths), and create
the lists.
From that, we make markup and that's what we give back
Note: we need the page_manager only to find the end token of the last row's Row HTML
"""
def creat_row_markup(self, row_json, all_page_tokens, page_manager):
markup = {}
earliest_latest_row_locations = {}
for path in row_json: # the path defines the row...
earliest = -1
latest = -1
for i in range(len(row_json[path]['rows'])):
row = row_json[path]['rows'][i]
loc = row['starting_token_location']
if earliest == -1: # first run through
earliest = loc
latest = loc
continue
if loc < earliest:
earliest = loc
if loc > latest:
latest = loc
earliest_latest_row_locations[path] = (earliest, latest)
overlaps = []
for pth in earliest_latest_row_locations:
begin = earliest_latest_row_locations[pth][0]
end = earliest_latest_row_locations[pth][1]
if begin == -1 or end == -1: # ill defined locations
continue
if len(overlaps) == 0: # first guy...
overlaps.append([pth])
continue
overlap_clust = -1
for clust_id in range(len(overlaps)):
cluster = overlaps[clust_id]
for cpath in cluster: # could probably just find min and max of cluster and check w/ that, but easier for now...
p_begin = earliest_latest_row_locations[cpath][0]
p_end = earliest_latest_row_locations[cpath][1]
# now, see if there is not overlap...
if p_end < begin or p_begin > end:
continue
overlap_clust = clust_id
if overlap_clust == -1:
overlaps.append([pth])
else:
overlaps[overlap_clust].append(pth)
table_paths = []
for clust in overlaps:
if self.__DEBUG:
print "===oo00 CLUSTER 00oo==="
print clust
path_for_start = ""
#left most, largest row is the beginning, so use that one as A's'
row_start_location = 99999999999
# first, find the member with the most rows
max_rows = max([len(row_json[member]['rows']) for member in clust])
# Ok, so the HTML between rows could have been messed up before bc we didn't know that these were
# overlapping lists. For instance, the first row could be alone and now it's merged, so let's remake
# the html between...
for member in clust:
num_rows = len(row_json[member]['rows'])
if self.__DEBUG:
print "\t--> (%d, %d): %d" % (earliest_latest_row_locations[member][0], earliest_latest_row_locations[member][1], num_rows)
print "\t\t PATH: "+member
print '\n'.join(["\t\t\t"+str(b['starting_token_location'])+" "+b['visible_text']+": "+b['html_between_row'] for b in row_json[member]['rows']])
if num_rows == max_rows:
if earliest_latest_row_locations[member][0] < row_start_location:
row_start_location = earliest_latest_row_locations[member][0]
path_for_start = member
if self.__DEBUG:
print ">> Row starts at: %d (%s) " % (row_start_location, path_for_start)
table_paths.append(path_for_start)
if self.__DEBUG:
print '== TABLE PATHS =='
print '\n'.join(table_paths)
# for each table path, we need to sort the members, and then assign their inner HTML values. Note that
# these might be empty (for first row, etc.) in which case we fill it in. But if it's there, then keep it...
# so we turn each table path into a little regex, and starting from each token, find the next one, and use the
# stuff between as the
# they also need to be sorted bc we need to assign teh correct number to each
for table_path in table_paths:
# make the structure that we want...
by_location = {} # makes it easy to sort by location, etc.
for row in row_json[table_path]['rows']:
by_location[row['starting_token_location']] = row
if len(by_location) < 2:
continue
ordered_row_indexes = sorted(by_location.keys())
extract_sequences = []
ending_row_locations = [] # the token location for the end of each row...
table_path_regex = '+?'.join([tp for tp in table_path])
# Three cases for what your extracted value could be:
# 1 - Normal case: it's the html_between_row value
# 2 - You are a first or optional row, so your html_between_row is empty (bc you might have been
# on a path by yourself). So, we find it as the html between you and the next guy in this combined list
# 3 - The last row. For this, we guess what the end looks like by looking at all of the HTML tags
# for the html_between_row for the guy preceding it, and then find those tags from the start of the
# last row, to the end of the HTML page
for idx in range(len(ordered_row_indexes)):
ordered_row_idx = ordered_row_indexes[idx]
ext_seq = ''
if by_location[ordered_row_idx]['html_between_row'] == '' and idx < len(ordered_row_indexes) - 1:
# can get the HTML as the text between this guy and the next
next_start_token = ordered_row_indexes[idx+1] - 1
sub_page = all_page_tokens.getTokensAsString(ordered_row_idx, next_start_token,
whitespace=True)
ext_seq = sub_page
else:
ext_seq = by_location[ordered_row_idx]['html_between_row']
if idx < len(ordered_row_indexes) - 1: # We don't know where the last guy ends, so we don't have this.
extract_sequences.append(ext_seq)
ending_row_locations.append(ordered_row_indexes[idx+1] - 1)
if idx == len(ordered_row_indexes) - 1: # last guy, so use the end_it regex and find from this guy
# initially was doing longest common substring for all prev rows, but you really just need
# # the last one, I think. Otherwise if you are mixing in optional/first-row you get weirdness...
found_end_loc = self.slot_to_end_token_loc(''.join(extract_sequences[-1]), all_page_tokens,
ordered_row_idx,
page_manager)
seen_etags = [s for s in re.findall("<[a-z]+", ''.join(extract_sequences[-1]))]
# now, jump to the next HTML token we see, after the occurrence of these guys...
rest_of_page = all_page_tokens.getTokensAsString(ordered_row_idx, len(all_page_tokens) - 1,
whitespace=True)
found_match = re.search('.+?'.join(seen_etags), rest_of_page)
if found_match:
found = found_match.end()
else:
found = len(all_page_tokens) - 1
# now, find the next HTML tag from this point, and add that into the extract
# TODO: get this last token in there...
slot = rest_of_page[0:found]
extract_sequences.append(slot)
ending_row_locations.append(found_end_loc)
# now, add this markup in
markup[table_path] = {'sequence': []}
for i in range(len(extract_sequences)):
extract = extract_sequences[i]
seq_number = i+1
#start_tok_loc = by_location[ordered_row_indexes[i]]['starting_token_location']
start_tok_loc = self.slot_to_start_loc(table_path, extract, page_manager)
end_tok_loc = ending_row_locations[i]
if start_tok_loc and end_tok_loc:
markup_value = all_page_tokens.getTokensAsString(start_tok_loc, end_tok_loc, whitespace=True)
markup[table_path]['sequence'].append({'extract': markup_value, 'sequence_number': seq_number,
'starting_token_location': start_tok_loc,
'ending_token_location': end_tok_loc})
return markup
# TODO: This could have errors bc of reliance on regex
def slot_to_start_loc(self, rule, row_html, page_manager):
rule_regex = rule.replace("><", ">.*?<")
# print "ROW: %s" % row_html
# print "RULE: %s" % rule_regex
found_match = re.search(rule_regex, row_html)
if found_match:
found = found_match.end()
possible_locs = page_manager.getPossibleLocations(page_manager.getPageIds()[0], row_html[found:])
best_loc = possible_locs[0] # now we've turned this slot into a location
return best_loc[0]
return None
# TODO: This could have errors... lots of regex stuff...
def slot_to_end_token_loc(self, extraction, all_page_tokens, starting_token_location, page_manager):
seen_etags = [s for s in re.findall("<[a-z]+", extraction)]
# now, jump to the next HTML token we see, after the occurrence of these guys...
rest_of_page = all_page_tokens.getTokensAsString(starting_token_location, len(all_page_tokens) - 1,
whitespace=True)
found_match = re.search('.*?'.join(seen_etags), rest_of_page)
if found_match:
found = found_match.end()
else:
return None
# now, find the next HTML tag from this point, and add that into the extract
# TODO: get this last token in there...
slot = rest_of_page[0:found]
# we know this is the slot for the only page in the page manager passed in...
possible_locs = page_manager.getPossibleLocations(page_manager.getPageIds()[0], slot)
best_loc = possible_locs[0] # now we've turned this slot into a location
if best_loc[0] == starting_token_location:
return best_loc[-1]
else:
raise Exception("Could not locate the correct end token")
# def remove_html(self, value):
# processor = RemoveHtml(value)
# value = processor.post_process()
# processor = RemoveExtraSpaces(value)
# value = processor.post_process()
# return value
# def longest_common_substring(self, s1, s2):
# m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
# longest, x_longest = 0, 0
# for x in xrange(1, 1 + len(s1)):
# for y in xrange(1, 1 + len(s2)):
# if s1[x - 1] == s2[y - 1]:
# m[x][y] = m[x - 1][y - 1] + 1
# if m[x][y] > longest:
# longest = m[x][y]
# x_longest = x
# else:
# m[x][y] = 0
# return s1[x_longest - longest: x_longest]
"""
@param pages: A hash where key is hte page name, and value is the raw page content
"""
def learn_list_extractors(self, pages):
page_mgr = PageManager() #write_debug_files=True)
markup = {}
for page in pages:
page_content = pages[page]
page_mgr.addPage(page, page_content)
content_list_markup = self.lists_on_single_page(page_content)
markup[page] = content_list_markup
# print '--- MARKUP ---'
# print json.dumps(markup)
page_mgr.learnStripes(markups=markup)
rules = page_mgr.learnRulesFromMarkup(markup)
# now, for each markup rule, learn a little page manager
sublist_page_managers = {}
for page in markup:
for rule_name in markup[page]:
if rule_name not in sublist_page_managers:
sublist_page_managers[rule_name] = PageManager()
for rid in range(len(markup[page][rule_name]['sequence'])):
row = markup[page][rule_name]['sequence'][rid]
sublist_page_managers[rule_name].addPage(page+"html%d" % rid, row['extract'])
sublist_sub_rules = {}
for sublist in sublist_page_managers:
sublist_page_managers[sublist].learnStripes()
sub_rules = sublist_page_managers[sublist].learnAllRules(in_list = True)
sublist_sub_rules[sublist] = sub_rules # This should match a rule name in the rules...
count = 1
for rule in rules.rules:
# print "== RULE INFO =="
# print str(rule.name)
rule.set_sub_rules(sublist_sub_rules[rule.name])
list_name = '_div_list'+format(count, '04')
for page_id in markup:
if rule.name in markup[page_id]:
markup[page_id][list_name] = markup[page_id].pop(rule.name)
rule.name = list_name
# print str(json.dumps(rule.toJson()))
# print "==============="
#
# print rules.toJson()
return rules, markup
def lists_on_single_page(self, content):
pg = PageManager()
pg.addPage("zzz", content)
triples = pg.getVisibleTokenStructure()
(ptree, paths_to_vis_text, path_to_invis_toks) = self.prefix_tree(triples, only_consider_tag='div')
potential_lists = self.prefix_tree_to_paths(ptree)
if self.__DEBUG:
print '.... POTENTIAL LISTS ARE ....'
print '\n'.join([''.join(p) for p in potential_lists])
print '.... OK!....'
all_tokens_list = pg.getPage("zzz").tokens
# Now, let's get our lists
lists = {}
for i in range(len(potential_lists)):
pot_list = potential_lists[i]
as_path = ''.join(pot_list)
if self.__DEBUG:
print "PATH: %s" % as_path
lists[as_path] = {
'rows': []
}
# if as_path in paths_to_vis_text:
for path_to_vis in paths_to_vis_text:
if path_to_vis.find(as_path) > -1:
vis_texts = [a for a in paths_to_vis_text[path_to_vis]]
invis_toks = [t for t in path_to_invis_toks[path_to_vis]]
for idx in range(len(vis_texts)):
if self.__DEBUG:
print "%s ==> %s" % (vis_texts[idx], str(invis_toks[idx].token_location))
html_between_row = ''
if (idx+1) < len(vis_texts):
begin = invis_toks[idx].token_location
end = invis_toks[idx+1].token_location - 1
html_between_row = all_tokens_list.getTokensAsString(begin, end, whitespace=True)
lists[as_path]['rows'].append({
'visible_text': vis_texts[idx],
'starting_token_location': invis_toks[idx].token_location,
'html_between_row': html_between_row
})
as_json_str = json.dumps(lists)
if self.__DEBUG:
print "--------"
print as_json_str
print "--------"
# # do it as an extraction instead?
# item_rule_begin = Landmark.escape_regex_string('<html')
# item_rule_end = Landmark.escape_regex_string('/html>')
#
# begin_iter_rule = '.+?'.join([Landmark.escape_regex_string(a) for a in pot_list])
#
# # figure out: for each tag in the rule, add it's end tag (keep track of tag type)
# # NOTE: for now, this assumes that the HTML is well formed
# end_it = '.+?'.join(['</div>' for i in range(len(pot_list))])
#
# end_iter_rule = end_it
#
# # include end-regex: included in the stuff that's extracted.
# # Solve for the case where you only see part of the stuff
# rule = IterationRule(str(i) + "_pathListRule", item_rule_begin, item_rule_end,
# begin_iter_rule, end_iter_rule, removehtml=True)
# extraction = rule.apply(content)
#
# print "**PATH: "+''.join(pot_list)
# as_json_str = json.dumps(extraction)
#
# for seq in extraction['sequence']:
# print "\t"+seq['extract']
# TODO: do this here????
# TODO: big drop down the path should be considered... not just if hte path occurs twice
# TODO: fix bugs
markup = self.creat_row_markup(lists, all_tokens_list, pg)
if self.__DEBUG:
print "list markup"
json.dumps(markup)
return markup
| usc-isi-i2/landmark-extraction | src/learning/TreeListLearner.py | Python | apache-2.0 | 23,788 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import operator
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_preprocessing_layer import Combiner
from tensorflow.python.keras.engine.base_preprocessing_layer import CombinerPreprocessingLayer
from tensorflow.python.keras.layers.preprocessing import categorical_encoding
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = categorical_encoding.TFIDF
INT = categorical_encoding.INT
BINARY = categorical_encoding.BINARY
COUNT = categorical_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1) standardize each sample (usually lowercasing + punctuation stripping)
2) split each sample into substrings (usually words)
3) recombine substrings into tokens (usually ngrams)
4) index tokens (associate a unique int value with each token)
5) transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1) Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2) When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3) When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token.
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
```
max_features = 5000 # Maximum vocab size.
max_len = 40 # Sequence length to pad the outputs to.
# Create the layer.
vectorize_layer = text_vectorization.TextVectorization(
max_tokens=max_features,
output_mode='int',
output_sequence_length=max_len)
# Now that the vocab layer has been created, call `adapt` on the text-only
# dataset to create the vocabulary. You don't have to batch, but for large
# datasets this means we're not keeping spare copies of the dataset in memory.
vectorize_layer.adapt(text_dataset.batch(64))
# Create the model that uses the vectorize text layer
model = tf.keras.models.Sequential()
# Start by creating an explicit input layer. It needs to have a shape of (1,)
# (because we need to guarantee that there is exactly one string input per
# batch), and the dtype needs to be 'string'.
model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
# The first layer in our model is the vectorization layer. After this layer,
# we have a tensor of shape (batch_size, max_len) containing vocab indices.
model.add(vectorize_layer)
# Next, we add a layer to map those vocab indices into a space of
# dimensionality 'embedding_dims'. Note that we're using max_features+1 here,
# since there's an OOV token that gets added to the vocabulary in
# vectorize_layer.
model.add(tf.keras.layers.Embedding(max_features+1, embedding_dims))
# At this point, you have embedded float data representing your tokens, and
# can add whatever other layers you need to create your model.
```
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, we have two reserved values (PAD and OOV). However, non-INT
# modes don't have a PAD value, so we only need to reserve one value.
self._reserved_values = 2 if output_mode == INT else 1
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
# We always reduce the max token number by 1 to account for the OOV token
# if it is set. Keras' use of the reserved number 0 for padding tokens,
# if the output is in INT mode, does not really count as a 'token' for
# vocabulary purposes, so we only reduce vocab size by 1 here.
self._max_vocab_size = max_tokens - 1 if max_tokens is not None else None
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=_TextVectorizationCombiner(
self._max_vocab_size, compute_idf=output_mode == TFIDF),
**kwargs)
self._supports_ragged_inputs = True
reserve_zero = output_mode in [None, INT]
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, reserve_zero=reserve_zero, dtype=dtypes.string)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
return
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return categorical_encoding.CategoricalEncoding
def _get_table_data(self):
keys, values = self._table.export()
return (keys.numpy(), values.numpy())
def _get_index_lookup_class(self):
return index_lookup.IndexLookup
def _to_numpy(self, preprocessed_data):
"""Converts preprocessed inputs into numpy arrays."""
if isinstance(preprocessed_data, np.ndarray):
return preprocessed_data
return np.array(preprocessed_data.to_list())
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
return input_shape
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = K.floatx() if self._output_mode == TFIDF else dtypes.int64
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, np.ndarray):
if data.ndim == 1:
data = np.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._to_numpy(self._preprocess(data))
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or a Numpy array as input, got {}".format(
type(data)))
super(TextVectorization, self).adapt(preprocessed_inputs, reset_state)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None,
append=False):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will either
replace it, if 'append' is set to False, or append to it (if 'append' is set
to True).
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF. OOV data is optional when appending additional
data in TFIDF mode; if an OOV value is supplied it will overwrite the
existing OOV value.
append: Whether to overwrite or append any existing vocabulary data.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
current_table_size = self._index_lookup_layer.vocab_size()
self._index_lookup_layer.set_vocabulary(vocab, append)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size() + self._reserved_values
self._vectorize_layer.set_num_elements(num_tokens)
# We're only _really_ appending if the table_size is nonzero. This is
# important for some sanity checks in tfidf mode (specifically, checking if
# oov_df_value is set or not) and handling existing tfidf weight data.
append = append if current_table_size > 0 else False
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if not append and oov_df_value is None:
raise ValueError("You must pass an oov_df_value the first time "
"'set_vocabulary' is called when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if append:
# The existing IDF data is stored in a Keras weight, so we can get it
# by calling K.get_value() on the weight object. Take the first
# table_size+1 values in case we're padding the weight with zeros
existing_df_data = K.get_value(
self._vectorize_layer.tf_idf_weights)[:current_table_size + 1]
df_data = np.append(existing_df_data, df_data, axis=0)
# If we are appending and need to replace the OOV DF value, we can
# assign it over the existing OOV DF value at index 0 of the (already-
# concatenated) DF value array.
if oov_df_value is not None:
df_data[0] = oov_df_value
else:
# If we are not appending (that is, we have only new data) we need to
# insert the OOV value to the front of the array. (This is a append to
# the head, not a replacement of the zeroth value.)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None and not input_shape[1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the first "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(updates[_VOCAB_NAME], updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if ragged_tensor.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
inputs = array_ops.squeeze(inputs, axis=1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if ragged_tensor.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
dense_data.set_shape(tensor_shape.TensorShape((None, None)))
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_tensor.set_shape(
tensor_shape.TensorShape((None, self._output_sequence_length)))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
class _TextVectorizationAccumulator(
collections.namedtuple("_TextVectorizationAccumulator",
["count_dict", "per_doc_count_dict", "metadata"])):
pass
# A note on this combiner: This contains functionality that will be extracted
# into the Vectorization and IndexLookup combiner objects. At that point,
# TextVectorization can become a PreprocessingStage instead of a Layer and
# this combiner can be retired. Until then, we leave this as is instead of
# attempting a refactor of what will soon be deleted.
class _TextVectorizationCombiner(Combiner):
"""Combiner for the TextVectorization preprocessing layer.
This class encapsulates the logic for computing a vocabulary based on the
frequency of each token.
Attributes:
vocab_size: (Optional) If set, only the top `vocab_size` tokens (based on
frequency across the dataset) are retained in the vocabulary. If None, or
set to a value greater than the total number of distinct tokens in the
dataset, all tokens are retained.
compute_idf: (Optional) If set, the inverse document frequency will be
computed for each value.
"""
def __init__(self, vocab_size=None, compute_idf=False):
self._vocab_size = vocab_size
self._compute_idf = compute_idf
self._input_dtype = dtypes.string
def compute(self, values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator."""
if dtypes.as_dtype(self._input_dtype) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected input type %s, got %s" %
(self._input_dtype, values.dtype))
if ragged_tensor.is_ragged(values):
values = values.to_list()
if isinstance(values, ops.EagerTensor):
values = values.numpy()
if isinstance(values, np.ndarray):
values = values.tolist()
if accumulator is None:
accumulator = self._create_accumulator()
# TODO(momernick): Benchmark improvements to this algorithm.
for document in values:
current_doc_id = accumulator.metadata[0]
for token in document:
accumulator.count_dict[token] += 1
if self._compute_idf:
doc_count = accumulator.per_doc_count_dict[token]
if doc_count["last_doc_id"] != current_doc_id:
doc_count["count"] += 1
doc_count["last_doc_id"] = current_doc_id
accumulator.metadata[0] += 1
return accumulator
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator."""
if not accumulators:
return accumulators
base_accumulator = accumulators[0]
for accumulator in accumulators[1:]:
base_accumulator.metadata[0] += accumulator.metadata[0]
for token, value in accumulator.count_dict.items():
base_accumulator.count_dict[token] += value
if self._compute_idf:
for token, value in accumulator.per_doc_count_dict.items():
# Any newly created token counts in 'base_accumulator''s
# per_doc_count_dict will have a last_doc_id of -1. This is always
# less than the next doc id (which are strictly positive), so any
# future occurrences are guaranteed to be counted.
base_accumulator.per_doc_count_dict[token]["count"] += value["count"]
return base_accumulator
def _inverse_document_frequency(self, document_counts, num_documents):
"""Compute the inverse-document-frequency (IDF) component of TFIDF.
Uses the default weighting scheme described in
https://en.wikipedia.org/wiki/Tf%E2%80%93idf.
Args:
document_counts: An array of the # of documents each token appears in.
num_documents: An int representing the total number of documents
Returns:
An array of "inverse document frequency" weights.
"""
return np.log(1 + num_documents / (1 + np.array(document_counts)))
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: An accumulator aggregating over the full dataset.
Returns:
A dict of:
"vocab": A list of the retained items in the vocabulary.
"idf": The inverse-document-frequency for each item in vocab.
idf[vocab_idx] is the IDF value for the corresponding vocab item.
"oov_idf": The inverse-document-frequency for the OOV token.
"""
if self._compute_idf:
vocab_counts, document_counts, num_documents = accumulator
else:
vocab_counts, _, _ = accumulator
sorted_counts = sorted(
vocab_counts.items(), key=operator.itemgetter(1, 0), reverse=True)
vocab_data = (
sorted_counts[:self._vocab_size] if self._vocab_size else sorted_counts)
vocab = [data[0] for data in vocab_data]
if self._compute_idf:
doc_counts = [document_counts[token]["count"] for token in vocab]
idf = self._inverse_document_frequency(doc_counts, num_documents[0])
oov_idf = np.array([np.log(1 + num_documents[0])])
return {_VOCAB_NAME: vocab, _IDF_NAME: idf, _OOV_IDF_NAME: oov_idf}
else:
return {_VOCAB_NAME: vocab}
def restore(self, output):
"""Create an accumulator based on 'output'."""
raise NotImplementedError(
"TextVectorization does not restore or support streaming updates.")
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call."""
output_dict = {}
output_dict["metadata"] = accumulator.metadata
output_dict["vocab"] = list(accumulator.count_dict.keys())
output_dict["vocab_counts"] = list(accumulator.count_dict.values())
if self._compute_idf:
output_dict["idf_vocab"] = list(accumulator.per_doc_count_dict.keys())
output_dict["idf_counts"] = [
counter["count"]
for counter in accumulator.per_doc_count_dict.values()
]
return compat.as_bytes(json.dumps(output_dict))
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'."""
accumulator_dict = json.loads(compat.as_text(encoded_accumulator))
accumulator = self._create_accumulator()
accumulator.metadata[0] = accumulator_dict["metadata"][0]
count_dict = dict(
zip(accumulator_dict["vocab"], accumulator_dict["vocab_counts"]))
accumulator.count_dict.update(count_dict)
if self._compute_idf:
create_dict = lambda x: {"count": x, "last_doc_id": -1}
idf_count_dicts = [
create_dict(count) for count in accumulator_dict["idf_counts"]
]
idf_dict = dict(zip(accumulator_dict["idf_vocab"], idf_count_dicts))
accumulator.per_doc_count_dict.update(idf_dict)
return accumulator
def _create_accumulator(self):
"""Accumulate a sorted array of vocab tokens and corresponding counts."""
count_dict = collections.defaultdict(int)
if self._compute_idf:
create_default_dict = lambda: {"count": 0, "last_doc_id": -1}
per_doc_count_dict = collections.defaultdict(create_default_dict)
else:
per_doc_count_dict = None
metadata = [0]
return _TextVectorizationAccumulator(count_dict, per_doc_count_dict,
metadata)
| xzturn/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization.py | Python | apache-2.0 | 36,215 |
from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'^order/address',views.addressPage,name='addressPage1'),
url(r'^order_with_prescription/address',views.addressPage,name='addressPage2'),
url(r'^order_with_prescription/specify',views.choicePage,name='choicePage'),
url(r'^order_with_prescription/placed',views.orderPlaced,name='orderPlaced1'),
url(r'^order_with_prescription',views.prescriptionPage,name='prescriptionPage'),
url(r'^order/placed',views.orderPlaced,name='orderPlaced2')
] | mpiplani/Online-Pharmacy | online_pharmacy/order/urls.py | Python | apache-2.0 | 547 |
import sys
from setuptools import setup
from setuptools import find_packages
version = '0.5.0.dev0'
install_requires = [
'letsencrypt=={0}'.format(version),
'letsencrypt-apache=={0}'.format(version),
'docker-py',
'requests',
'zope.interface',
]
if sys.version_info < (2, 7):
install_requires.append('mock<1.1.0')
else:
install_requires.append('mock')
if sys.version_info < (2, 7, 9):
# For secure SSL connexion with Python 2.7 (InsecurePlatformWarning)
install_requires.append('ndg-httpsclient')
install_requires.append('pyasn1')
docs_extras = [
'repoze.sphinx.autointerface',
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='letsencrypt-compatibility-test',
version=version,
description="Compatibility tests for Let's Encrypt client",
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'console_scripts': [
'letsencrypt-compatibility-test = letsencrypt_compatibility_test.test_driver:main',
],
},
)
| TheBoegl/letsencrypt | letsencrypt-compatibility-test/setup.py | Python | apache-2.0 | 1,792 |
# -*- coding: utf-8 -*-
'''
Management of iptables
======================
This is an iptables-specific module designed to manage Linux firewalls. It is
expected that this state module, and other system-specific firewall states, may
at some point be deprecated in favor of a more generic ``firewall`` state.
.. code-block:: yaml
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: '127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
.. Invert Rule
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: '! 127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match:
- state
- comment
- comment: "Allow HTTP"
- connstate: NEW
- source: 'not 127.0.0.1'
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.append:
- table: filter
- family: ipv4
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dports:
- 80
- 443
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.delete:
- table: filter
- family: ipv6
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
default to accept:
iptables.set_policy:
- chain: INPUT
- policy: ACCEPT
.. note::
Various functions of the ``iptables`` module use the ``--check`` option. If
the version of ``iptables`` on the target system does not include this
option, an alternate version of this check will be performed using the
output of iptables-save. This may have unintended consequences on legacy
releases of ``iptables``.
'''
from __future__ import absolute_import
# Import salt libs
import salt.utils
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
def __virtual__():
'''
Only load if the locale module is available in __salt__
'''
return 'iptables.version' in __salt__
def chain_present(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0
Verify the chain is exist.
name
A user-defined chain name.
table
The table to own the chain.
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if chain_check is True:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already exist in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be set for {2}'.format(
name,
table,
family)
return ret
command = __salt__['iptables.new_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table create success for {2}'
.format(name, table, family))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} chain in {1} table: {2} for {3}'.format(
name,
table,
command.strip(),
family
)
return ret
def chain_absent(name, table='filter', family='ipv4'):
'''
.. versionadded:: 2014.1.0
Verify the chain is absent.
table
The table to remove the chain from
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
chain_check = __salt__['iptables.check_chain'](table, name, family)
if not chain_check:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already absent in {1} table for {2}'
.format(name, table, family))
return ret
if __opts__['test']:
ret['comment'] = 'iptables {0} chain in {1} table needs to be removed {2}'.format(
name,
table,
family)
return ret
flush_chain = __salt__['iptables.flush'](table, name, family)
if not flush_chain:
command = __salt__['iptables.delete_chain'](table, name, family)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table delete success for {2}'
.format(name, table, family))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete {0} chain in {1} table: {2} for {3}'
.format(name, table, command.strip(), family))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2} for {3}'.format(
name,
table,
flush_chain.strip(),
family
)
return ret
def append(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 0.17.0
Add a rule to the end of the specified chain.
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain which should be modified
family
Network family, ipv4 or ipv6.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
save = True
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = append(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full='True', family=family, command='A', **kwargs)
if __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set ({1}) for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs and kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
saved_rules = __salt__['iptables.get_saved_rules'](family=family)
_rules = __salt__['iptables.get_rules'](family=family)
__rules = []
for table in _rules:
for chain in _rules[table]:
__rules.append(_rules[table][chain].get('rules'))
__saved_rules = []
for table in saved_rules:
for chain in saved_rules[table]:
__saved_rules.append(saved_rules[table][chain].get('rules'))
# Only save if rules in memory are different than saved rules
if __rules != __saved_rules:
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] += ('\nSaved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set ({1}) for {2}'.format(
name,
command.strip(),
family)
return ret
if __salt__['iptables.append'](table, kwargs['chain'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] = ('Set and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1} for {2}').format(
name,
command.strip(), family)
return ret
def insert(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Insert a rule into a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
position
The numerical representation of where the rule should be inserted into
the chain. Note that ``-1`` is not a supported position value.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
save = True
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = insert(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='I', **kwargs)
if __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set for {1} ({2})'.format(
name,
family,
command.strip())
if 'save' in kwargs and kwargs['save']:
if kwargs['save'] is not True:
filename = kwargs['save']
else:
filename = None
saved_rules = __salt__['iptables.get_saved_rules'](family=family)
_rules = __salt__['iptables.get_rules'](family=family)
__rules = []
for table in _rules:
for chain in _rules[table]:
__rules.append(_rules[table][chain].get('rules'))
__saved_rules = []
for table in saved_rules:
for chain in saved_rules[table]:
__saved_rules.append(saved_rules[table][chain].get('rules'))
# Only save if rules in memory are different than saved rules
if __rules != __saved_rules:
out = __salt__['iptables.save'](filename, family=family)
ret['comment'] += ('\nSaved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if not __salt__['iptables.insert'](table, kwargs['chain'], kwargs['position'], rule, family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1} for {2}'.format(
name,
command.strip(),
family)
if 'save' in kwargs:
if kwargs['save']:
out = __salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Set and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def delete(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Delete a rule to a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: ``--state`` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
Jump options that doesn't take arguments should be passed in with an empty
string.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if 'rules' in kwargs:
ret['changes']['locale'] = []
comments = []
save = False
for rule in kwargs['rules']:
if 'rules' in rule:
del rule['rules']
if '__agg__' in rule:
del rule['__agg__']
if 'save' in rule and rule['save']:
if rule['save'] is not True:
save_file = rule['save']
else:
save_file = True
rule['save'] = False
_ret = delete(**rule)
if 'locale' in _ret['changes']:
ret['changes']['locale'].append(_ret['changes']['locale'])
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if save:
if save_file is True:
save_file = None
__salt__['iptables.save'](save_file, family=family)
if not ret['changes']['locale']:
del ret['changes']['locale']
ret['comment'] = '\n'.join(comments)
return ret
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
kwargs['name'] = name
kwargs['table'] = table
rule = __salt__['iptables.build_rule'](family=family, **kwargs)
command = __salt__['iptables.build_rule'](full=True, family=family, command='D', **kwargs)
if not __salt__['iptables.check'](table,
kwargs['chain'],
rule,
family) is True:
if 'position' not in kwargs:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already absent for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be deleted for {1} ({2})'.format(
name,
family,
command.strip())
return ret
if 'position' in kwargs:
result = __salt__['iptables.delete'](
table,
kwargs['chain'],
family=family,
position=kwargs['position'])
else:
result = __salt__['iptables.delete'](
table,
kwargs['chain'],
family=family,
rule=rule)
if not result:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Delete iptables rule for {0} {1}'.format(
name,
command.strip())
if 'save' in kwargs:
if kwargs['save']:
out = __salt__['iptables.save'](filename=None, family=family)
ret['comment'] = ('Deleted and saved iptables rule {0} for {1}\n'
'{2}\n{3}').format(name, family, command.strip(), out)
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to delete iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def set_policy(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Sets the default policy for iptables firewall tables
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
policy
The requested table policy
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if __salt__['iptables.get_policy'](
table,
kwargs['chain'],
family) == kwargs['policy']:
ret['result'] = True
ret['comment'] = ('iptables default policy for chain {0} on table {1} for {2} already set to {3}'
.format(kwargs['chain'], table, family, kwargs['policy']))
return ret
if __opts__['test']:
ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format(
kwargs['chain'],
table,
family,
kwargs['policy']
)
return ret
if not __salt__['iptables.set_policy'](
table,
kwargs['chain'],
kwargs['policy'],
family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, family=family)
ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format(
kwargs['chain'],
kwargs['policy'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set iptables default policy'
return ret
def flush(name, table='filter', family='ipv4', **kwargs):
'''
.. versionadded:: 2014.1.0
Flush current iptables state
table
The table that owns the chain that should be modified
family
Networking family, either ipv4 or ipv6
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if 'chain' not in kwargs:
kwargs['chain'] = ''
if __opts__['test']:
ret['comment'] = 'iptables rules in {0} table {1} chain {2} family needs to be flushed'.format(
name,
table,
family)
return ret
if not __salt__['iptables.flush'](table, kwargs['chain'], family):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flush iptables rules in {0} table {1} chain {2} family'.format(
table,
kwargs['chain'],
family
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush iptables rules'
return ret
def mod_aggregate(low, chunks, running):
'''
The mod_aggregate function which looks up all rules in the available
low chunks and merges them into a single rules ref in the present low data
'''
rules = []
agg_enabled = [
'append',
'insert',
]
if low.get('fun') not in agg_enabled:
return low
for chunk in chunks:
tag = salt.utils.gen_state_tag(chunk)
if tag in running:
# Already ran the iptables state, skip aggregation
continue
if chunk.get('state') == 'iptables':
if '__agg__' in chunk:
continue
# Check for the same function
if chunk.get('fun') != low.get('fun'):
continue
if chunk not in rules:
rules.append(chunk)
chunk['__agg__'] = True
if rules:
if 'rules' in low:
low['rules'].extend(rules)
else:
low['rules'] = rules
return low
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/states/iptables.py | Python | apache-2.0 | 25,947 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0009_auto_20160704_0023'),
]
operations = [
migrations.RemoveField(
model_name='slide',
name='slide_show',
),
migrations.RemoveField(
model_name='panel',
name='slide_show',
),
migrations.DeleteModel(
name='Slide',
),
migrations.DeleteModel(
name='SlideShow',
),
]
| thanos/mykonosbiennale.org | pages/migrations/0010_auto_20160704_0030.py | Python | apache-2.0 | 601 |
"""s6 services management.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import errno
import os
import logging
import six
from treadmill import fs
from .. import _utils
from .. import _service_base
_LOGGER = logging.getLogger(__name__)
class BundleService(_service_base.Service):
"""s6 rc bundle service.
"""
__slots__ = (
'_contents',
)
_TYPE = _service_base.ServiceType.Bundle
def __init__(self, directory, name, contents=None):
super(BundleService, self).__init__(directory, name)
self._contents = contents
@property
def type(self):
return self._TYPE
@property
def _contents_file(self):
return os.path.join(self._dir, 'contents')
@property
def contents(self):
"""Gets the contents of the bundle.
"""
if self._contents is None:
self._contents = _utils.set_list_read(self._contents_file)
return self._contents
def write(self):
"""Write down the service definition.
"""
super(BundleService, self).write()
# Mandatory settings
if not self._contents and not os.path.exists(self._contents_file):
raise ValueError('Invalid Bundle: No content')
if self._contents is not None:
if not self._contents:
raise ValueError('Invalid Bundle: empty')
_utils.set_list_write(self._contents_file, self._contents)
@six.add_metaclass(abc.ABCMeta)
class _AtomicService(_service_base.Service):
"""Abstract base class for all atomic services (per s6-rc definition).
"""
__slots__ = (
'_dependencies',
'_timeout_up',
'_timeout_down',
'_env',
)
def __init__(self, directory, name,
timeout_up=None, timeout_down=None,
dependencies=None, environ=None):
super(_AtomicService, self).__init__(directory, name)
self._dependencies = dependencies
self._timeout_up = timeout_up
self._timeout_down = timeout_down
self._env = environ
@property
def data_dir(self):
"""Returns the data directory for the services.
:returns ``str``:
Full path to the service data directory.
"""
return os.path.join(self._dir, 'data')
@property
def env_dir(self):
"""Returns the environ directory for the services.
:returns ``str``:
Full path to the service environ directory.
"""
return os.path.join(self._dir, 'env')
@property
def environ(self):
"""Returns the environ dictionary for the services.
:returns ``dict``:
Service environ dictionary.
"""
if self._env is None:
self._env = _utils.environ_dir_read(self.env_dir)
return self._env
@environ.setter
def environ(self, new_environ):
self._env = new_environ
@property
def _dependencies_file(self):
return os.path.join(self._dir, 'dependencies')
@property
def dependencies(self):
"""Returns the dependencies set for the services.
:returns ``set``:
Service dependencies set.
"""
if self._dependencies is None:
self._dependencies = _utils.set_list_read(self._dependencies_file)
return self._dependencies
@dependencies.setter
def dependencies(self, new_deps):
self._dependencies = set(new_deps)
@property
def timeout_up(self):
"""Returns amount of milliseconds to wait for the service to come up.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_up is None:
self._timeout_up = _utils.value_read(
os.path.join(self._dir, 'timeout-up'),
default=0
)
return self._timeout_up
@property
def timeout_down(self):
"""Returns amount of milliseconds to wait for the service to come down.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely.
"""
if self._timeout_down is None:
self._timeout_down = _utils.value_read(
os.path.join(self._dir, 'timeout-down'),
default=0
)
return self._timeout_down
@abc.abstractmethod
def write(self):
"""Write down the service definition.
"""
super(_AtomicService, self).write()
# We only write dependencies/environ if we have new ones.
fs.mkdir_safe(self.env_dir)
fs.mkdir_safe(self.data_dir)
if self._dependencies is not None:
_utils.set_list_write(self._dependencies_file, self._dependencies)
if self._env is not None:
_utils.environ_dir_write(self.env_dir, self._env)
if self._timeout_up is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-up'),
self._timeout_up
)
if self._timeout_down is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-down'),
self._timeout_down
)
class LongrunService(_AtomicService):
"""s6 long running service.
"""
__slots__ = (
'_consumer_for',
'_default_down',
'_finish_script',
'_log_run_script',
'_notification_fd',
'_pipeline_name',
'_producer_for',
'_run_script',
'_timeout_finish',
)
_TYPE = _service_base.ServiceType.LongRun
def __init__(self, directory, name,
run_script=None, finish_script=None, notification_fd=None,
log_run_script=None, timeout_finish=None, default_down=None,
pipeline_name=None, producer_for=None, consumer_for=None,
dependencies=None, environ=None):
super(LongrunService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
if producer_for and log_run_script:
raise ValueError('Invalid LongRun service options: producer/log')
self._consumer_for = consumer_for
self._default_down = default_down
self._finish_script = finish_script
self._log_run_script = log_run_script
self._notification_fd = notification_fd
self._pipeline_name = pipeline_name
self._producer_for = producer_for
self._run_script = run_script
self._timeout_finish = timeout_finish
@property
def type(self):
return self._TYPE
@property
def logger_dir(self):
"""Returns the logger directory for the services.
:returns ``str``:
Full path to the service log directory.
"""
return os.path.join(self._dir, 'log')
@property
def notification_fd(self):
"""s6 "really up" notification fd.
"""
if self._notification_fd is None:
self._notification_fd = _utils.value_read(
os.path.join(self._dir, 'notification-fd'),
default=-1
)
return self._notification_fd
@notification_fd.setter
def notification_fd(self, new_notification_fd):
self._notification_fd = new_notification_fd
@property
def default_down(self):
"""Is the default service state set to down?
"""
if self._default_down is None:
self._default_down = os.path.exists(
os.path.join(self._dir, 'down')
)
return self._default_down
@default_down.setter
def default_down(self, default_down):
self._default_down = bool(default_down)
@property
def _run_file(self):
return os.path.join(self._dir, 'run')
@property
def _finish_file(self):
return os.path.join(self._dir, 'finish')
@property
def _log_run_file(self):
return os.path.join(self.logger_dir, 'run')
@property
def run_script(self):
"""Service run script.
"""
if self._run_script is None:
self._run_script = _utils.script_read(self._run_file)
return self._run_script
@run_script.setter
def run_script(self, new_script):
self._run_script = new_script
@property
def finish_script(self):
"""Service finish script.
"""
if self._finish_script is None:
try:
self._finish_script = _utils.script_read(self._finish_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._finish_script
@finish_script.setter
def finish_script(self, new_script):
self._finish_script = new_script
@property
def log_run_script(self):
"""Service log run script.
"""
if self._log_run_script is None:
try:
self._log_run_script = _utils.script_read(self._log_run_file)
except IOError as err:
if err.errno is not errno.ENOENT:
raise
return self._log_run_script
@log_run_script.setter
def log_run_script(self, new_script):
self._log_run_script = new_script
@property
def timeout_finish(self):
"""Returns amount of milliseconds to wait for the finish script to
complete.
:returns ``int``:
Amount of milliseconds to wait. 0 means infinitely. Default 5000.
"""
if self._timeout_finish is None:
self._timeout_finish = _utils.value_read(
os.path.join(self._dir, 'timeout-finish'),
default=5000
)
return self._timeout_finish
@timeout_finish.setter
def timeout_finish(self, timeout_finish):
"""Service finish script timeout.
"""
if timeout_finish is not None:
if isinstance(timeout_finish, six.integer_types):
self._timeout_finish = timeout_finish
else:
self._timeout_finish = int(timeout_finish, 10)
@property
def _pipeline_name_file(self):
return os.path.join(self._dir, 'pipeline-name')
@property
def pipeline_name(self):
"""Gets the name of the pipeline.
"""
if self._pipeline_name is None:
self._pipeline_name = _utils.data_read(self._pipeline_name_file)
return self._pipeline_name
@pipeline_name.setter
def pipeline_name(self, new_name):
self._pipeline_name = new_name
@property
def _producer_for_file(self):
return os.path.join(self._dir, 'producer-for')
@property
def producer_for(self):
"""Gets which services this service is a producer for.
"""
if self._producer_for is None:
self._producer_for = _utils.data_read(self._producer_for_file)
return self._producer_for
@producer_for.setter
def producer_for(self, new_name):
"""Sets the producer for another service.
"""
self._producer_for = new_name
@property
def _consumer_for_file(self):
return os.path.join(self._dir, 'consumer-for')
@property
def consumer_for(self):
"""Gets which services this service is a consumer for.
"""
if self._consumer_for is None:
self._consumer_for = _utils.data_read(self._consumer_for_file)
return self._consumer_for
@consumer_for.setter
def consumer_for(self, new_name):
"""Sets which services this service is a consumer for.
"""
self._consumer_for = new_name
def write(self):
"""Write down the service definition.
"""
# Disable R0912: Too many branche
# pylint: disable=R0912
super(LongrunService, self).write()
# Mandatory settings
if self._run_script is None and not os.path.exists(self._run_file):
raise ValueError('Invalid LongRun service: not run script')
if self._run_script is not None:
_utils.script_write(self._run_file, self._run_script)
# Handle the case where the run script is a generator
if not isinstance(self._run_script, six.string_types):
self._run_script = None
# Optional settings
if self._finish_script is not None:
_utils.script_write(self._finish_file, self._finish_script)
# Handle the case where the finish script is a generator
if not isinstance(self._finish_script, six.string_types):
self._finish_script = None
if self._log_run_script is not None:
# Create the log dir on the spot
fs.mkdir_safe(os.path.dirname(self._log_run_file))
_utils.script_write(self._log_run_file, self._log_run_script)
# Handle the case where the run script is a generator
if not isinstance(self._log_run_script, six.string_types):
self._log_run_script = None
if self._default_down:
_utils.data_write(
os.path.join(self._dir, 'down'),
None
)
else:
fs.rm_safe(os.path.join(self._dir, 'down'))
if self._timeout_finish is not None:
_utils.value_write(
os.path.join(self._dir, 'timeout-finish'),
self._timeout_finish
)
if self._notification_fd is not None:
_utils.value_write(
os.path.join(self._dir, 'notification-fd'),
self._notification_fd
)
if self._pipeline_name is not None:
_utils.data_write(self._pipeline_name_file, self._pipeline_name)
if self._producer_for is not None:
_utils.data_write(self._producer_for_file, self._producer_for)
if self._consumer_for is not None:
_utils.data_write(self._consumer_for_file, self._consumer_for)
class OneshotService(_AtomicService):
"""Represents a s6 rc one-shot service which is only ever executed once.
"""
__slots__ = (
'_up',
'_down',
)
# TODO: timeout-up/timeout-down
_TYPE = _service_base.ServiceType.Oneshot
def __init__(self, directory, name=None,
up_script=None, down_script=None,
dependencies=None, environ=None):
super(OneshotService, self).__init__(
directory,
name,
dependencies=dependencies,
environ=environ
)
self._up = up_script
self._down = down_script
@property
def type(self):
return self._TYPE
@property
def _up_file(self):
return os.path.join(self._dir, 'up')
@property
def _down_file(self):
return os.path.join(self._dir, 'down')
@property
def up(self):
"""Gets the one shot service up file.
"""
if self._up is None:
self._up = _utils.script_read(self._up_file)
return self._up
@up.setter
def up(self, new_script):
"""Sets the one-shot service up file.
"""
self._up = new_script
@property
def down(self):
"""Gets the one-shot service down file.
"""
if self._down is None:
self._down = _utils.script_read(self._down_file)
return self._down
@down.setter
def down(self, new_script):
"""Sets the one-shot service down file.
"""
self._down = new_script
def write(self):
"""Write down the service definition.
"""
super(OneshotService, self).write()
# Mandatory settings
if not self._up and not os.path.exists(self._up_file):
raise ValueError('Invalid Oneshot service: not up script')
if self._up is not None:
_utils.script_write(self._up_file, self._up)
if not isinstance(self._up_file, six.string_types):
self._up_file = None
# Optional settings
if self._down is not None:
_utils.script_write(self._down_file, self._down)
if not isinstance(self._down_file, six.string_types):
self._down_file = None
def create_service(svc_basedir, svc_name, svc_type, **kwargs):
"""Factory function instantiating a new service object from parameters.
:param ``str`` svc_basedir:
Base directory where to create the service.
:param ``str`` svc_name:
Name of the new service.
:param ``_service_base.ServiceType`` svc_type:
Type for the new service.
:param ``dict`` kw_args:
Additional argument passed to the constructor of the new service.
:returns ``Service``:
New instance of the service
"""
cls = {
_service_base.ServiceType.Bundle: BundleService,
_service_base.ServiceType.LongRun: LongrunService,
_service_base.ServiceType.Oneshot: OneshotService,
}.get(svc_type, None)
if cls is None:
_LOGGER.critical('No implementation for service type %r', svc_type)
cls = LongrunService
return cls(svc_basedir, svc_name, **kwargs)
__all__ = (
'BundleService',
'LongrunService',
'OneshotService',
'create_service',
)
| Morgan-Stanley/treadmill | lib/python/treadmill/supervisor/s6/services.py | Python | apache-2.0 | 17,491 |
"""Implement test server."""
import logging
import socket
from time import sleep
from leicacam.cam import tuples_as_bytes
CAM_REPLY = [
[
(
"relpath",
"subfolder/exp1/CAM1/slide--S00/chamber--U00--V00/field--X01--Y01"
"/image--L0000--S00--U00--V00--J15--E04--O01"
"--X01--Y01--T0000--Z00--C00.ome",
)
],
[
(
"relpath",
"subfolder/exp1/CAM1/slide--S00/chamber--U00--V00/field--X02--Y02"
"/image--L0000--S00--U00--V00--J15--E02--O01"
"--X02--Y02--T0000--Z00--C31.ome",
)
],
]
def image_event(data):
"""Send a reply about saved image."""
if "startcamscan" in data.decode():
return tuples_as_bytes(CAM_REPLY.pop())
return None
class EchoServer:
"""Test server."""
def __init__(self, server_address):
"""Set up server."""
self.logger = logging.getLogger("EchoServer")
self.logger.debug("Setting up server")
self.server_address = server_address
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.setup()
def setup(self):
"""Bind and listen to incoming connections."""
self.sock.bind(self.server_address)
self.sock.listen(1)
def handle(self):
"""Handle incoming connections."""
# pylint: disable=no-member
self.logger.debug("Serve incoming connections")
conn, addr = self.sock.accept()
self.logger.debug("Connected by %s", addr)
try:
self.logger.debug("Send welcome")
conn.sendall("Welcome...".encode("utf-8"))
while True:
data = conn.recv(1024)
if not data:
self.logger.debug("No data, closing")
break
self.send(conn, data)
reply = image_event(data)
if not reply:
continue
sleep(0.2)
self.send(conn, reply)
except OSError as exc:
self.logger.error(exc)
finally:
self.logger.debug("Closing connection to %s", addr)
conn.close()
self.sock.shutdown(socket.SHUT_WR)
self.sock.close()
def send(self, conn, data):
"""Send data."""
self.logger.debug("Sending: %s", data)
conn.sendall(data + b"\n")
def run(self):
"""Run server."""
try:
self.handle()
except OSError as exc:
self.logger.error("Error on socket: %s", exc)
self.logger.debug("Server close")
self.sock.close()
def stop(self):
"""Stop server."""
try:
self.logger.debug("Server shutdown")
self.sock.shutdown(socket.SHUT_WR)
self.logger.debug("Server close")
self.sock.close()
except OSError:
self.logger.error("Error shutting down server socket")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(name)s: %(message)s")
ADDRESS = ("localhost", 8895)
SERVER = EchoServer(ADDRESS)
try:
SERVER.run()
except KeyboardInterrupt:
SERVER.stop()
| CellProfiling/cam_acq | tests/test_server.py | Python | apache-2.0 | 3,254 |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import tempfile
import json
import unittest
import shutil
from openstack_plugin_common import Config
from test_utils.utils import get_task
class TestOpenstackNovaNetManagerBlueprint(unittest.TestCase):
def test_openstack_configuration_copy_to_manager(self):
script_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'scripts',
'configure.py')
task = get_task(script_path,
'_copy_openstack_configuration_to_manager')
config_output_file_path = tempfile.mkstemp()[1]
def mock_put(file_path, *args, **kwargs):
shutil.copyfile(file_path, config_output_file_path)
task.func_globals['fabric'].api.put = mock_put
inputs_config = {
'username': 'inputs-username',
'region': 'inputs-region'
}
file_config = {
'username': 'file-username',
'password': 'file-password',
'auth_url': 'file-auth-url'
}
conf_file_path = tempfile.mkstemp()[1]
os.environ[Config.OPENSTACK_CONFIG_PATH_ENV_VAR] = conf_file_path
with open(conf_file_path, 'w') as f:
json.dump(file_config, f)
os.environ['OS_USERNAME'] = 'envar-username'
os.environ['OS_PASSWORD'] = 'envar-password'
os.environ['OS_TENANT_NAME'] = 'envar-tenant-name'
task(inputs_config)
with open(config_output_file_path) as f:
config = json.load(f)
self.assertEquals('inputs-username', config.get('username'))
self.assertEquals('inputs-region', config.get('region'))
self.assertEquals('file-password', config.get('password'))
self.assertEquals('file-auth-url', config.get('auth_url'))
self.assertEquals('envar-tenant-name', config.get('tenant_name'))
| szpotona/cloudify-manager-blueprints | openstack-nova-net/tests/test_openstack_nova_net_blueprint.py | Python | apache-2.0 | 2,514 |
#! /bin/env python
def mainFunc():
xml_file = open('new.xml', 'r')
out_file = open('newnew.xml', 'w')
for i, line in enumerate(xml_file.readlines()):
print 'processing line %d' % i
line = line.replace(';', '')
line = line.replace('>', '')
line = line.replace('<', '')
line = line.replace('&', '')
out_file.write(line)
if __name__ == '__main__':
mainFunc() | igemsoftware/HFUT-China_2015 | processXML.py | Python | apache-2.0 | 425 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from decorator_helper import prog_scope
import unittest
import paddle.fluid as fluid
import numpy as np
import paddle
import warnings
class TestBackwardInferVarDataTypeShape(unittest.TestCase):
def test_backward_infer_var_data_type_shape(self):
paddle.enable_static()
program = fluid.default_main_program()
dy = program.global_block().create_var(
name="Tmp@GRAD", shape=[1, 1], dtype=np.float32, persistable=True)
# invoke warning
fluid.backward._infer_var_data_type_shape_("Tmp@GRAD",
program.global_block())
res = False
with warnings.catch_warnings():
res = True
self.assertTrue(res)
if __name__ == '__main__':
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/test_backward_infer_var_data_type_shape.py | Python | apache-2.0 | 1,432 |
i = 40 - 3
for j in range(3, 12, 2):
print(j)
i = i + 1
print(i)
| almarklein/wasmfun | simplepy/example1.py | Python | bsd-2-clause | 74 |
# doc-export: Icons2
"""
This example demonstrates the use of icons in Flexx.
When run as a script, Icons1 is used, passing icon and title to the application.
In the examples section of the docs, Icons2 is used, which sets icon and title
in the init(). Click "open in new tab" to see the effect.
"""
import os
import flexx
from flexx import flx
# todo: support icons in widgets like Button, TabWidget, etc.
# todo: support fontawesome icons
fname = os.path.join(os.path.dirname(flexx.__file__), 'resources', 'flexx.ico')
black_png = ('data:image/png;base64,'
'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAIUlEQVR42mNgY'
'GD4TyEeTAacOHGCKDxqwKgBtDVgaGYmAD/v6XAYiQl7AAAAAElFTkSuQmCC')
class Icons1(flx.Widget):
def init(self):
flx.Button(text='Not much to see here ...')
class Icons2(flx.Widget):
def init(self):
self.set_title('Icon demo')
self.set_icon(black_png)
flx.Button(text='Not much to see here ...')
if __name__ == '__main__':
# Select application icon. Can be a url, a relative url to a shared asset,
# a base64 encoded image, or a local filename. Note that the local filename
# works for setting the aplication icon in a desktop-like app, but not for
# a web app. File types can be ico or png.
# << Uncomment any of the lines below >>
# icon = None # use default
# icon = 'https://assets-cdn.github.com/favicon.ico'
# icon = flx.assets.add_shared_data('ico.icon', open(fname, 'rb').read())
icon = black_png
# icon = fname
m = flx.App(Icons1, title='Icon demo', icon=icon).launch('app')
flx.start()
| zoofIO/flexx | flexxamples/howtos/icons.py | Python | bsd-2-clause | 1,647 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from apiclient import errors
from base import GDBase
import logging
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
from pprint import pprint
from auth import GDAuth
permission_resource_properties = {
"role": ["owner", "reader", "writer"],
"type": ["user", "group", "domain", "anyone"]}
help_permission_text = [(j + ": " + ', '.join(permission_resource_properties[j]))
for j in permission_resource_properties.keys()]
class GDPerm:
def __init__(self, file_id, action):
# base
auth = GDAuth()
creds = auth.get_credentials()
if creds is None:
raise Exception("Failed to retrieve credentials")
self.http = auth.get_authorized_http()
base = GDBase()
self.service = base.get_drive_service(self.http)
self.root = base.get_root()
self.file_id = base.get_id_from_url(file_id)
self.action = action['name']
self.param = action['param']
def run(self):
try:
result = getattr(self, self.action)()
except Exception as e:
logger.error(e)
raise
return result
def insert(self):
new_permission = {
'type': self.param[0],
'role': self.param[1],
'value': self.param[2],
}
try:
return self.service.permissions().insert(
fileId=self.file_id, body=new_permission).execute()
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def update(self):
new_permission = {
'type': self.param[1],
'role': self.param[2],
'value': self.param[3],
}
try:
return self.service.permissions().update(
fileId=self.file_id,
permissionId=self.param[0],
body=new_permission).execute()
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def list(self):
try:
permissions = self.service.permissions().list(fileId=self.file_id).execute()
logger.debug(permissions)
return permissions.get('items', [])
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def get(self):
try:
permissions = self.service.permissions().get(
fileId=self.file_id, permissionId=self.param).execute()
return permissions
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def delete(self):
try:
permissions = self.service.permissions().delete(
fileId=self.file_id, permissionId=self.param).execute()
return permissions
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def get_by_user(self):
permissions = self.list()
user_email = self.param.lower()
for p in permissions:
if "emailAddress" in p:
perm_email = p["emailAddress"].lower()
if user_email == perm_email:
return p
return None
| tienfuc/gdcmdtools | gdcmdtools/perm.py | Python | bsd-2-clause | 3,399 |
from functools import partial
from collections import deque
from llvmlite import ir
from numba.core.datamodel.registry import register_default
from numba.core import types, cgutils
from numba.np import numpy_support
class DataModel(object):
"""
DataModel describe how a FE type is represented in the LLVM IR at
different contexts.
Contexts are:
- value: representation inside function body. Maybe stored in stack.
The representation here are flexible.
- data: representation used when storing into containers (e.g. arrays).
- argument: representation used for function argument. All composite
types are unflattened into multiple primitive types.
- return: representation used for return argument.
Throughput the compiler pipeline, a LLVM value is usually passed around
in the "value" representation. All "as_" prefix function converts from
"value" representation. All "from_" prefix function converts to the
"value" representation.
"""
def __init__(self, dmm, fe_type):
self._dmm = dmm
self._fe_type = fe_type
@property
def fe_type(self):
return self._fe_type
def get_value_type(self):
raise NotImplementedError(self)
def get_data_type(self):
return self.get_value_type()
def get_argument_type(self):
"""Return a LLVM type or nested tuple of LLVM type
"""
return self.get_value_type()
def get_return_type(self):
return self.get_value_type()
def as_data(self, builder, value):
raise NotImplementedError(self)
def as_argument(self, builder, value):
"""
Takes one LLVM value
Return a LLVM value or nested tuple of LLVM value
"""
raise NotImplementedError(self)
def as_return(self, builder, value):
raise NotImplementedError(self)
def from_data(self, builder, value):
raise NotImplementedError(self)
def from_argument(self, builder, value):
"""
Takes a LLVM value or nested tuple of LLVM value
Returns one LLVM value
"""
raise NotImplementedError(self)
def from_return(self, builder, value):
raise NotImplementedError(self)
def load_from_data_pointer(self, builder, ptr, align=None):
"""
Load value from a pointer to data.
This is the default implementation, sufficient for most purposes.
"""
return self.from_data(builder, builder.load(ptr, align=align))
def traverse(self, builder):
"""
Traverse contained members.
Returns a iterable of contained (types, getters).
Each getter is a one-argument function accepting a LLVM value.
"""
return []
def traverse_models(self):
"""
Recursively list all models involved in this model.
"""
return [self._dmm[t] for t in self.traverse_types()]
def traverse_types(self):
"""
Recursively list all frontend types involved in this model.
"""
types = [self._fe_type]
queue = deque([self])
while len(queue) > 0:
dm = queue.popleft()
for i_dm in dm.inner_models():
if i_dm._fe_type not in types:
queue.append(i_dm)
types.append(i_dm._fe_type)
return types
def inner_models(self):
"""
List all *inner* models.
"""
return []
def get_nrt_meminfo(self, builder, value):
"""
Returns the MemInfo object or None if it is not tracked.
It is only defined for types.meminfo_pointer
"""
return None
def has_nrt_meminfo(self):
return False
def contains_nrt_meminfo(self):
"""
Recursively check all contained types for need for NRT meminfo.
"""
return any(model.has_nrt_meminfo() for model in self.traverse_models())
def _compared_fields(self):
return (type(self), self._fe_type)
def __hash__(self):
return hash(tuple(self._compared_fields()))
def __eq__(self, other):
if type(self) is type(other):
return self._compared_fields() == other._compared_fields()
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@register_default(types.Omitted)
class OmittedArgDataModel(DataModel):
"""
A data model for omitted arguments. Only the "argument" representation
is defined, other representations raise a NotImplementedError.
"""
# Omitted arguments are using a dummy value type
def get_value_type(self):
return ir.LiteralStructType([])
# Omitted arguments don't produce any LLVM function argument.
def get_argument_type(self):
return ()
def as_argument(self, builder, val):
return ()
def from_argument(self, builder, val):
assert val == (), val
return None
@register_default(types.Boolean)
@register_default(types.BooleanLiteral)
class BooleanModel(DataModel):
_bit_type = ir.IntType(1)
_byte_type = ir.IntType(8)
def get_value_type(self):
return self._bit_type
def get_data_type(self):
return self._byte_type
def get_return_type(self):
return self.get_data_type()
def get_argument_type(self):
return self.get_data_type()
def as_data(self, builder, value):
return builder.zext(value, self.get_data_type())
def as_argument(self, builder, value):
return self.as_data(builder, value)
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_data(self, builder, value):
ty = self.get_value_type()
resalloca = cgutils.alloca_once(builder, ty)
cond = builder.icmp_unsigned('==', value, value.type(0))
with builder.if_else(cond) as (then, otherwise):
with then:
builder.store(ty(0), resalloca)
with otherwise:
builder.store(ty(1), resalloca)
return builder.load(resalloca)
def from_argument(self, builder, value):
return self.from_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
class PrimitiveModel(DataModel):
"""A primitive type can be represented natively in the target in all
usage contexts.
"""
def __init__(self, dmm, fe_type, be_type):
super(PrimitiveModel, self).__init__(dmm, fe_type)
self.be_type = be_type
def get_value_type(self):
return self.be_type
def as_data(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_data(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def from_return(self, builder, value):
return value
class ProxyModel(DataModel):
"""
Helper class for models which delegate to another model.
"""
def get_value_type(self):
return self._proxied_model.get_value_type()
def get_data_type(self):
return self._proxied_model.get_data_type()
def get_return_type(self):
return self._proxied_model.get_return_type()
def get_argument_type(self):
return self._proxied_model.get_argument_type()
def as_data(self, builder, value):
return self._proxied_model.as_data(builder, value)
def as_argument(self, builder, value):
return self._proxied_model.as_argument(builder, value)
def as_return(self, builder, value):
return self._proxied_model.as_return(builder, value)
def from_data(self, builder, value):
return self._proxied_model.from_data(builder, value)
def from_argument(self, builder, value):
return self._proxied_model.from_argument(builder, value)
def from_return(self, builder, value):
return self._proxied_model.from_return(builder, value)
@register_default(types.EnumMember)
@register_default(types.IntEnumMember)
class EnumModel(ProxyModel):
"""
Enum members are represented exactly like their values.
"""
def __init__(self, dmm, fe_type):
super(EnumModel, self).__init__(dmm, fe_type)
self._proxied_model = dmm.lookup(fe_type.dtype)
@register_default(types.Opaque)
@register_default(types.PyObject)
@register_default(types.RawPointer)
@register_default(types.NoneType)
@register_default(types.StringLiteral)
@register_default(types.EllipsisType)
@register_default(types.Function)
@register_default(types.Type)
@register_default(types.Object)
@register_default(types.Module)
@register_default(types.Phantom)
@register_default(types.ContextManager)
@register_default(types.Dispatcher)
@register_default(types.ObjModeDispatcher)
@register_default(types.ExceptionClass)
@register_default(types.Dummy)
@register_default(types.ExceptionInstance)
@register_default(types.ExternalFunction)
@register_default(types.EnumClass)
@register_default(types.IntEnumClass)
@register_default(types.NumberClass)
@register_default(types.TypeRef)
@register_default(types.NamedTupleClass)
@register_default(types.DType)
@register_default(types.RecursiveCall)
@register_default(types.MakeFunctionLiteral)
@register_default(types.Poison)
class OpaqueModel(PrimitiveModel):
"""
Passed as opaque pointers
"""
_ptr_type = ir.IntType(8).as_pointer()
def __init__(self, dmm, fe_type):
be_type = self._ptr_type
super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.MemInfoPointer)
class MemInfoModel(OpaqueModel):
def inner_models(self):
return [self._dmm.lookup(self._fe_type.dtype)]
def has_nrt_meminfo(self):
return True
def get_nrt_meminfo(self, builder, value):
return value
@register_default(types.Integer)
@register_default(types.IntegerLiteral)
class IntegerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(fe_type.bitwidth)
super(IntegerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.Float)
class FloatModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
if fe_type == types.float32:
be_type = ir.FloatType()
elif fe_type == types.float64:
be_type = ir.DoubleType()
else:
raise NotImplementedError(fe_type)
super(FloatModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.CPointer)
class PointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
self._pointee_model = dmm.lookup(fe_type.dtype)
self._pointee_be_type = self._pointee_model.get_data_type()
be_type = self._pointee_be_type.as_pointer()
super(PointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.EphemeralPointer)
class EphemeralPointerModel(PointerModel):
def get_data_type(self):
return self._pointee_be_type
def as_data(self, builder, value):
value = builder.load(value)
return self._pointee_model.as_data(builder, value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.EphemeralArray)
class EphemeralArrayModel(PointerModel):
def __init__(self, dmm, fe_type):
super(EphemeralArrayModel, self).__init__(dmm, fe_type)
self._data_type = ir.ArrayType(self._pointee_be_type,
self._fe_type.count)
def get_data_type(self):
return self._data_type
def as_data(self, builder, value):
values = [builder.load(cgutils.gep_inbounds(builder, value, i))
for i in range(self._fe_type.count)]
return cgutils.pack_array(builder, values)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.ExternalFunctionPointer)
class ExternalFuncPointerModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
sig = fe_type.sig
# Since the function is non-Numba, there is no adaptation
# of arguments and return value, hence get_value_type().
retty = dmm.lookup(sig.return_type).get_value_type()
args = [dmm.lookup(t).get_value_type() for t in sig.args]
be_type = ir.PointerType(ir.FunctionType(retty, args))
super(ExternalFuncPointerModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.UniTuple)
@register_default(types.NamedUniTuple)
@register_default(types.StarArgUniTuple)
class UniTupleModel(DataModel):
def __init__(self, dmm, fe_type):
super(UniTupleModel, self).__init__(dmm, fe_type)
self._elem_model = dmm.lookup(fe_type.dtype)
self._count = len(fe_type)
self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
self._count)
self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
self._count)
def get_value_type(self):
return self._value_type
def get_data_type(self):
return self._data_type
def get_return_type(self):
return self.get_value_type()
def get_argument_type(self):
return (self._elem_model.get_argument_type(),) * self._count
def as_argument(self, builder, value):
out = []
for i in range(self._count):
v = builder.extract_value(value, [i])
v = self._elem_model.as_argument(builder, v)
out.append(v)
return out
def from_argument(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i, v in enumerate(value):
v = self._elem_model.from_argument(builder, v)
out = builder.insert_value(out, v, [i])
return out
def as_data(self, builder, value):
out = ir.Constant(self.get_data_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.as_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def from_data(self, builder, value):
out = ir.Constant(self.get_value_type(), ir.Undefined)
for i in range(self._count):
val = builder.extract_value(value, [i])
dval = self._elem_model.from_data(builder, val)
out = builder.insert_value(out, dval, [i])
return out
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def traverse(self, builder):
def getter(i, value):
return builder.extract_value(value, i)
return [(self._fe_type.dtype, partial(getter, i))
for i in range(self._count)]
def inner_models(self):
return [self._elem_model]
class CompositeModel(DataModel):
"""Any model that is composed of multiple other models should subclass from
this.
"""
pass
class StructModel(CompositeModel):
_value_type = None
_data_type = None
def __init__(self, dmm, fe_type, members):
super(StructModel, self).__init__(dmm, fe_type)
if members:
self._fields, self._members = zip(*members)
else:
self._fields = self._members = ()
self._models = tuple([self._dmm.lookup(t) for t in self._members])
def get_member_fe_type(self, name):
"""
StructModel-specific: get the Numba type of the field named *name*.
"""
pos = self.get_field_position(name)
return self._members[pos]
def get_value_type(self):
if self._value_type is None:
self._value_type = ir.LiteralStructType([t.get_value_type()
for t in self._models])
return self._value_type
def get_data_type(self):
if self._data_type is None:
self._data_type = ir.LiteralStructType([t.get_data_type()
for t in self._models])
return self._data_type
def get_argument_type(self):
return tuple([t.get_argument_type() for t in self._models])
def get_return_type(self):
return self.get_data_type()
def _as(self, methname, builder, value):
extracted = []
for i, dm in enumerate(self._models):
extracted.append(getattr(dm, methname)(builder,
self.get(builder, value, i)))
return tuple(extracted)
def _from(self, methname, builder, value):
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, (dm, val) in enumerate(zip(self._models, value)):
v = getattr(dm, methname)(builder, val)
struct = self.set(builder, struct, v, i)
return struct
def as_data(self, builder, value):
"""
Converts the LLVM struct in `value` into a representation suited for
storing into arrays.
Note
----
Current implementation rarely changes how types are represented for
"value" and "data". This is usually a pointless rebuild of the
immutable LLVM struct value. Luckily, LLVM optimization removes all
redundancy.
Sample usecase: Structures nested with pointers to other structures
that can be serialized into a flat representation when storing into
array.
"""
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_data(self, builder, value):
"""
Convert from "data" representation back into "value" representation.
Usually invoked when loading from array.
See notes in `as_data()`
"""
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def load_from_data_pointer(self, builder, ptr, align=None):
values = []
for i, model in enumerate(self._models):
elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
val = model.load_from_data_pointer(builder, elem_ptr, align)
values.append(val)
struct = ir.Constant(self.get_value_type(), ir.Undefined)
for i, val in enumerate(values):
struct = self.set(builder, struct, val, i)
return struct
def as_argument(self, builder, value):
return self._as("as_argument", builder, value)
def from_argument(self, builder, value):
return self._from("from_argument", builder, value)
def as_return(self, builder, value):
elems = self._as("as_data", builder, value)
struct = ir.Constant(self.get_data_type(), ir.Undefined)
for i, el in enumerate(elems):
struct = builder.insert_value(struct, el, [i])
return struct
def from_return(self, builder, value):
vals = [builder.extract_value(value, [i])
for i in range(len(self._members))]
return self._from("from_data", builder, vals)
def get(self, builder, val, pos):
"""Get a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
Extracted value
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.extract_value(val, [pos],
name="extracted." + self._fields[pos])
def set(self, builder, stval, val, pos):
"""Set a field at the given position or the fieldname
Args
----
builder:
LLVM IRBuilder
stval:
LLVM struct value
val:
value to be inserted
pos: int or str
field index or field name
Returns
-------
A new LLVM struct with the value inserted
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return builder.insert_value(stval, val, [pos],
name="inserted." + self._fields[pos])
def get_field_position(self, field):
try:
return self._fields.index(field)
except ValueError:
raise KeyError("%s does not have a field named %r"
% (self.__class__.__name__, field))
@property
def field_count(self):
return len(self._fields)
def get_type(self, pos):
"""Get the frontend type (numba type) of a field given the position
or the fieldname
Args
----
pos: int or str
field index or field name
"""
if isinstance(pos, str):
pos = self.get_field_position(pos)
return self._members[pos]
def get_model(self, pos):
"""
Get the datamodel of a field given the position or the fieldname.
Args
----
pos: int or str
field index or field name
"""
return self._models[pos]
def traverse(self, builder):
def getter(k, value):
if value.type != self.get_value_type():
args = self.get_value_type(), value.type
raise TypeError("expecting {0} but got {1}".format(*args))
return self.get(builder, value, k)
return [(self.get_type(k), partial(getter, k)) for k in self._fields]
def inner_models(self):
return self._models
@register_default(types.Complex)
class ComplexModel(StructModel):
_element_type = NotImplemented
def __init__(self, dmm, fe_type):
members = [
('real', fe_type.underlying_float),
('imag', fe_type.underlying_float),
]
super(ComplexModel, self).__init__(dmm, fe_type, members)
@register_default(types.LiteralList)
@register_default(types.LiteralStrKeyDict)
@register_default(types.Tuple)
@register_default(types.NamedTuple)
@register_default(types.StarArgTuple)
class TupleModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('f' + str(i), t) for i, t in enumerate(fe_type)]
super(TupleModel, self).__init__(dmm, fe_type, members)
@register_default(types.UnionType)
class UnionModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('tag', types.uintp),
# XXX: it should really be a MemInfoPointer(types.voidptr)
('payload', types.Tuple.from_types(fe_type.types)),
]
super(UnionModel, self).__init__(dmm, fe_type, members)
@register_default(types.Pair)
class PairModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('first', fe_type.first_type),
('second', fe_type.second_type)]
super(PairModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListPayload)
class ListPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
# The fields are mutable but the payload is always manipulated
# by reference. This scheme allows mutations of an array to
# be seen by its iterators.
members = [
('size', types.intp),
('allocated', types.intp),
# This member is only used only for reflected lists
('dirty', types.boolean),
# Actually an inlined var-sized array
('data', fe_type.container.dtype),
]
super(ListPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.List)
class ListModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type)
members = [
# The meminfo data points to a ListPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected lists
('parent', types.pyobject),
]
super(ListModel, self).__init__(dmm, fe_type, members)
@register_default(types.ListIter)
class ListIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.ListPayload(fe_type.container)
members = [
# The meminfo data points to a ListPayload (shared with the
# original list object)
('meminfo', types.MemInfoPointer(payload_type)),
('index', types.EphemeralPointer(types.intp)),
]
super(ListIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetEntry)
class SetEntryModel(StructModel):
def __init__(self, dmm, fe_type):
dtype = fe_type.set_type.dtype
members = [
# -1 = empty, -2 = deleted
('hash', types.intp),
('key', dtype),
]
super(SetEntryModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetPayload)
class SetPayloadModel(StructModel):
def __init__(self, dmm, fe_type):
entry_type = types.SetEntry(fe_type.container)
members = [
# Number of active + deleted entries
('fill', types.intp),
# Number of active entries
('used', types.intp),
# Allocated size - 1 (size being a power of 2)
('mask', types.intp),
# Search finger
('finger', types.intp),
# This member is only used only for reflected sets
('dirty', types.boolean),
# Actually an inlined var-sized array
('entries', entry_type),
]
super(SetPayloadModel, self).__init__(dmm, fe_type, members)
@register_default(types.Set)
class SetModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type)
members = [
# The meminfo data points to a SetPayload
('meminfo', types.MemInfoPointer(payload_type)),
# This member is only used only for reflected sets
('parent', types.pyobject),
]
super(SetModel, self).__init__(dmm, fe_type, members)
@register_default(types.SetIter)
class SetIterModel(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type.container)
members = [
# The meminfo data points to a SetPayload (shared with the
# original set object)
('meminfo', types.MemInfoPointer(payload_type)),
# The index into the entries table
('index', types.EphemeralPointer(types.intp)),
]
super(SetIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.Array)
@register_default(types.Buffer)
@register_default(types.ByteArray)
@register_default(types.Bytes)
@register_default(types.MemoryView)
@register_default(types.PyArray)
class ArrayModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [
('meminfo', types.MemInfoPointer(fe_type.dtype)),
('parent', types.pyobject),
('nitems', types.intp),
('itemsize', types.intp),
('data', types.CPointer(fe_type.dtype)),
('shape', types.UniTuple(types.intp, ndim)),
('strides', types.UniTuple(types.intp, ndim)),
]
super(ArrayModel, self).__init__(dmm, fe_type, members)
@register_default(types.ArrayFlags)
class ArrayFlagsModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('parent', fe_type.array_type),
]
super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
@register_default(types.NestedArray)
class NestedArrayModel(ArrayModel):
def __init__(self, dmm, fe_type):
self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
super(NestedArrayModel, self).__init__(dmm, fe_type)
@register_default(types.Optional)
class OptionalModel(StructModel):
def __init__(self, dmm, fe_type):
members = [
('data', fe_type.type),
('valid', types.boolean),
]
self._value_model = dmm.lookup(fe_type.type)
super(OptionalModel, self).__init__(dmm, fe_type, members)
def get_return_type(self):
return self._value_model.get_return_type()
def as_return(self, builder, value):
raise NotImplementedError
def from_return(self, builder, value):
return self._value_model.from_return(builder, value)
def traverse(self, builder):
def get_data(value):
valid = get_valid(value)
data = self.get(builder, value, "data")
return builder.select(valid, data, ir.Constant(data.type, None))
def get_valid(value):
return self.get(builder, value, "valid")
return [(self.get_type("data"), get_data),
(self.get_type("valid"), get_valid)]
@register_default(types.Record)
class RecordModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(RecordModel, self).__init__(dmm, fe_type)
self._models = [self._dmm.lookup(t) for _, t in fe_type.members]
self._be_type = ir.ArrayType(ir.IntType(8), fe_type.size)
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""Passed around as reference to underlying data
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_ptr_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
raise NotImplementedError("use load_from_data_pointer() instead")
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def load_from_data_pointer(self, builder, ptr, align=None):
return builder.bitcast(ptr, self.get_value_type())
@register_default(types.UnicodeCharSeq)
class UnicodeCharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(UnicodeCharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(numpy_support.sizeof_unicode_char * 8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
@register_default(types.CharSeq)
class CharSeq(DataModel):
def __init__(self, dmm, fe_type):
super(CharSeq, self).__init__(dmm, fe_type)
charty = ir.IntType(8)
self._be_type = ir.ArrayType(charty, fe_type.count)
def get_value_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_data(self, builder, value):
return value
def from_data(self, builder, value):
return value
def as_return(self, builder, value):
return value
def from_return(self, builder, value):
return value
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
class CContiguousFlatIter(StructModel):
def __init__(self, dmm, fe_type, need_indices):
assert fe_type.array_type.layout == 'C'
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('stride', types.intp),
('index', types.EphemeralPointer(types.intp)),
]
if need_indices:
# For ndenumerate()
members.append(('indices', types.EphemeralArray(types.intp, ndim)))
super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
class FlatIter(StructModel):
def __init__(self, dmm, fe_type):
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(FlatIter, self).__init__(dmm, fe_type, members)
@register_default(types.UniTupleIter)
class UniTupleIter(StructModel):
def __init__(self, dmm, fe_type):
members = [('index', types.EphemeralPointer(types.intp)),
('tuple', fe_type.container,)]
super(UniTupleIter, self).__init__(dmm, fe_type, members)
@register_default(types.misc.SliceLiteral)
@register_default(types.SliceType)
class SliceModel(StructModel):
def __init__(self, dmm, fe_type):
members = [('start', types.intp),
('stop', types.intp),
('step', types.intp),
]
super(SliceModel, self).__init__(dmm, fe_type, members)
@register_default(types.NPDatetime)
@register_default(types.NPTimedelta)
class NPDatetimeModel(PrimitiveModel):
def __init__(self, dmm, fe_type):
be_type = ir.IntType(64)
super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type)
@register_default(types.ArrayIterator)
class ArrayIterator(StructModel):
def __init__(self, dmm, fe_type):
# We use an unsigned index to avoid the cost of negative index tests.
members = [('index', types.EphemeralPointer(types.uintp)),
('array', fe_type.array_type)]
super(ArrayIterator, self).__init__(dmm, fe_type, members)
@register_default(types.EnumerateType)
class EnumerateType(StructModel):
def __init__(self, dmm, fe_type):
members = [('count', types.EphemeralPointer(types.intp)),
('iter', fe_type.source_type)]
super(EnumerateType, self).__init__(dmm, fe_type, members)
@register_default(types.ZipType)
class ZipType(StructModel):
def __init__(self, dmm, fe_type):
members = [('iter%d' % i, source_type.iterator_type)
for i, source_type in enumerate(fe_type.source_types)]
super(ZipType, self).__init__(dmm, fe_type, members)
@register_default(types.RangeIteratorType)
class RangeIteratorType(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.yield_type
members = [('iter', types.EphemeralPointer(int_type)),
('stop', int_type),
('step', int_type),
('count', types.EphemeralPointer(int_type))]
super(RangeIteratorType, self).__init__(dmm, fe_type, members)
@register_default(types.Generator)
class GeneratorModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(GeneratorModel, self).__init__(dmm, fe_type)
# XXX Fold this in DataPacker?
self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
if not isinstance(t, types.Omitted)]
self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
self._args_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._arg_models])
self._state_be_type = ir.LiteralStructType(
[t.get_data_type() for t in self._state_models])
# The whole generator closure
self._be_type = ir.LiteralStructType(
[self._dmm.lookup(types.int32).get_value_type(),
self._args_be_type, self._state_be_type])
self._be_ptr_type = self._be_type.as_pointer()
def get_value_type(self):
"""
The generator closure is passed around as a reference.
"""
return self._be_ptr_type
def get_argument_type(self):
return self._be_ptr_type
def get_return_type(self):
return self._be_type
def get_data_type(self):
return self._be_type
def as_argument(self, builder, value):
return value
def from_argument(self, builder, value):
return value
def as_return(self, builder, value):
return self.as_data(builder, value)
def from_return(self, builder, value):
return self.from_data(builder, value)
def as_data(self, builder, value):
return builder.load(value)
def from_data(self, builder, value):
stack = cgutils.alloca_once(builder, value.type)
builder.store(value, stack)
return stack
@register_default(types.ArrayCTypes)
class ArrayCTypesModel(StructModel):
def __init__(self, dmm, fe_type):
# ndim = fe_type.ndim
members = [('data', types.CPointer(fe_type.dtype)),
('meminfo', types.MemInfoPointer(fe_type.dtype))]
super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
@register_default(types.RangeType)
class RangeModel(StructModel):
def __init__(self, dmm, fe_type):
int_type = fe_type.iterator_type.yield_type
members = [('start', int_type),
('stop', int_type),
('step', int_type)]
super(RangeModel, self).__init__(dmm, fe_type, members)
# =============================================================================
@register_default(types.NumpyNdIndexType)
class NdIndexModel(StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [('shape', types.UniTuple(types.intp, ndim)),
('indices', types.EphemeralArray(types.intp, ndim)),
('exhausted', types.EphemeralPointer(types.boolean)),
]
super(NdIndexModel, self).__init__(dmm, fe_type, members)
@register_default(types.NumpyFlatType)
def handle_numpy_flat_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=False)
else:
return FlatIter(dmm, ty)
@register_default(types.NumpyNdEnumerateType)
def handle_numpy_ndenumerate_type(dmm, ty):
if ty.array_type.layout == 'C':
return CContiguousFlatIter(dmm, ty, need_indices=True)
else:
return FlatIter(dmm, ty)
@register_default(types.BoundFunction)
def handle_bound_function(dmm, ty):
# The same as the underlying type
return dmm[ty.this]
@register_default(types.NumpyNdIterType)
class NdIter(StructModel):
def __init__(self, dmm, fe_type):
array_types = fe_type.arrays
ndim = fe_type.ndim
shape_len = ndim if fe_type.need_shaped_indexing else 1
members = [('exhausted', types.EphemeralPointer(types.boolean)),
('arrays', types.Tuple(array_types)),
# The iterator's main shape and indices
('shape', types.UniTuple(types.intp, shape_len)),
('indices', types.EphemeralArray(types.intp, shape_len)),
]
# Indexing state for the various sub-iterators
# XXX use a tuple instead?
for i, sub in enumerate(fe_type.indexers):
kind, start_dim, end_dim, _ = sub
member_name = 'index%d' % i
if kind == 'flat':
# A single index into the flattened array
members.append((member_name, types.EphemeralPointer(types.intp)))
elif kind in ('scalar', 'indexed', '0d'):
# Nothing required
pass
else:
assert 0
# Slots holding values of the scalar args
# XXX use a tuple instead?
for i, ty in enumerate(fe_type.arrays):
if not isinstance(ty, types.Array):
member_name = 'scalar%d' % i
members.append((member_name, types.EphemeralPointer(ty)))
super(NdIter, self).__init__(dmm, fe_type, members)
@register_default(types.DeferredType)
class DeferredStructModel(CompositeModel):
def __init__(self, dmm, fe_type):
super(DeferredStructModel, self).__init__(dmm, fe_type)
self.typename = "deferred.{0}".format(id(fe_type))
self.actual_fe_type = fe_type.get()
def get_value_type(self):
return ir.global_context.get_identified_type(self.typename + '.value')
def get_data_type(self):
return ir.global_context.get_identified_type(self.typename + '.data')
def get_argument_type(self):
return self._actual_model.get_argument_type()
def as_argument(self, builder, value):
inner = self.get(builder, value)
return self._actual_model.as_argument(builder, inner)
def from_argument(self, builder, value):
res = self._actual_model.from_argument(builder, value)
return self.set(builder, self.make_uninitialized(), res)
def from_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.from_data(builder, elem)
out = self.make_uninitialized()
return self.set(builder, out, value)
def as_data(self, builder, value):
self._define()
elem = self.get(builder, value)
value = self._actual_model.as_data(builder, elem)
out = self.make_uninitialized(kind='data')
return self.set(builder, out, value)
def from_return(self, builder, value):
return value
def as_return(self, builder, value):
return value
def get(self, builder, value):
return builder.extract_value(value, [0])
def set(self, builder, value, content):
return builder.insert_value(value, content, [0])
def make_uninitialized(self, kind='value'):
self._define()
if kind == 'value':
ty = self.get_value_type()
else:
ty = self.get_data_type()
return ir.Constant(ty, ir.Undefined)
def _define(self):
valty = self.get_value_type()
self._define_value_type(valty)
datty = self.get_data_type()
self._define_data_type(datty)
def _define_value_type(self, value_type):
if value_type.is_opaque:
value_type.set_body(self._actual_model.get_value_type())
def _define_data_type(self, data_type):
if data_type.is_opaque:
data_type.set_body(self._actual_model.get_data_type())
@property
def _actual_model(self):
return self._dmm.lookup(self.actual_fe_type)
def traverse(self, builder):
return [(self.actual_fe_type,
lambda value: builder.extract_value(value, [0]))]
@register_default(types.StructRefPayload)
class StructPayloadModel(StructModel):
"""Model for the payload of a mutable struct
"""
def __init__(self, dmm, fe_typ):
members = tuple(fe_typ.field_dict.items())
super().__init__(dmm, fe_typ, members)
class StructRefModel(StructModel):
"""Model for a mutable struct.
A reference to the payload
"""
def __init__(self, dmm, fe_typ):
dtype = fe_typ.get_data_type()
members = [
("meminfo", types.MemInfoPointer(dtype)),
]
super().__init__(dmm, fe_typ, members)
| IntelLabs/numba | numba/core/datamodel/models.py | Python | bsd-2-clause | 44,245 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import tempfile
import os
import unittest
from telemetry import benchmark
from telemetry.core import bitmap
from telemetry.core import util
# This is a simple base64 encoded 2x2 PNG which contains, in order, a single
# Red, Yellow, Blue, and Green pixel.
test_png = """
iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
JpzAAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACx
MBAJqcGAAAABZJREFUCNdj/M/AwPCfgYGB4T/DfwY
AHAAD/iOWZXsAAAAASUVORK5CYII=
"""
test_png_path = os.path.join(util.GetUnittestDataDir(), 'test_png.png')
test_png_2_path = os.path.join(util.GetUnittestDataDir(), 'test_png_2.png')
class HistogramDistanceTest(unittest.TestCase):
def testNoData(self):
hist1 = []
hist2 = []
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
hist1 = [0, 0, 0]
hist2 = [0, 0, 0]
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
def testWrongSizes(self):
hist1 = [1]
hist2 = [1, 0]
self.assertRaises(
ValueError, lambda: bitmap.HistogramDistance(hist1, hist2))
def testNoDistance(self):
hist1 = [2, 4, 1, 8, 0, -1]
hist2 = [2, 4, 1, 8, 0, -1]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 0)
def testNormalizeCounts(self):
hist1 = [0, 0, 1, 0, 0]
hist2 = [0, 0, 0, 0, 7]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 2)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 2)
def testDistance(self):
hist1 = [2, 0, 1, 3, 4]
hist2 = [3, 1, 2, 4, 0]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 1)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 1)
hist1 = [0, 1, 3, 1]
hist2 = [2, 2, 1, 0]
self.assertEqual(bitmap.HistogramDistance(hist1, hist2), 1.2)
self.assertEqual(bitmap.HistogramDistance(hist2, hist1), 1.2)
class BitmapTest(unittest.TestCase):
# pylint: disable=C0324
def testReadFromBase64Png(self):
bmp = bitmap.Bitmap.FromBase64Png(test_png)
self.assertEquals(2, bmp.width)
self.assertEquals(2, bmp.height)
bmp.GetPixelColor(0, 0).AssertIsRGB(255, 0, 0)
bmp.GetPixelColor(1, 1).AssertIsRGB(0, 255, 0)
bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 255)
bmp.GetPixelColor(1, 0).AssertIsRGB(255, 255, 0)
def testReadFromPngFile(self):
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
self.assertEquals(2, file_bmp.width)
self.assertEquals(2, file_bmp.height)
file_bmp.GetPixelColor(0, 0).AssertIsRGB(255, 0, 0)
file_bmp.GetPixelColor(1, 1).AssertIsRGB(0, 255, 0)
file_bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 255)
file_bmp.GetPixelColor(1, 0).AssertIsRGB(255, 255, 0)
def testWritePngToPngFile(self):
orig = bitmap.Bitmap.FromPngFile(test_png_path)
temp_file = tempfile.NamedTemporaryFile().name
orig.WritePngFile(temp_file)
new_file = bitmap.Bitmap.FromPngFile(temp_file)
self.assertTrue(orig.IsEqual(new_file))
@benchmark.Disabled
def testWriteCroppedBmpToPngFile(self):
pixels = [255,0,0, 255,255,0, 0,0,0,
255,255,0, 0,255,0, 0,0,0]
orig = bitmap.Bitmap(3, 3, 2, pixels)
orig.Crop(0, 0, 2, 2)
temp_file = tempfile.NamedTemporaryFile().name
orig.WritePngFile(temp_file)
new_file = bitmap.Bitmap.FromPngFile(temp_file)
self.assertTrue(orig.IsEqual(new_file))
def testIsEqual(self):
bmp = bitmap.Bitmap.FromBase64Png(test_png)
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
self.assertTrue(bmp.IsEqual(file_bmp))
def testDiff(self):
file_bmp = bitmap.Bitmap.FromPngFile(test_png_path)
file_bmp_2 = bitmap.Bitmap.FromPngFile(test_png_2_path)
diff_bmp = file_bmp.Diff(file_bmp)
self.assertEquals(2, diff_bmp.width)
self.assertEquals(2, diff_bmp.height)
diff_bmp.GetPixelColor(0, 0).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(1, 1).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(0, 1).AssertIsRGB(0, 0, 0)
diff_bmp.GetPixelColor(1, 0).AssertIsRGB(0, 0, 0)
diff_bmp = file_bmp.Diff(file_bmp_2)
self.assertEquals(3, diff_bmp.width)
self.assertEquals(3, diff_bmp.height)
diff_bmp.GetPixelColor(0, 0).AssertIsRGB(0, 255, 255)
diff_bmp.GetPixelColor(1, 1).AssertIsRGB(255, 0, 255)
diff_bmp.GetPixelColor(0, 1).AssertIsRGB(255, 255, 0)
diff_bmp.GetPixelColor(1, 0).AssertIsRGB(0, 0, 255)
diff_bmp.GetPixelColor(0, 2).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(1, 2).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 0).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 1).AssertIsRGB(255, 255, 255)
diff_bmp.GetPixelColor(2, 2).AssertIsRGB(255, 255, 255)
@benchmark.Disabled
def testGetBoundingBox(self):
pixels = [0,0,0, 0,0,0, 0,0,0, 0,0,0,
0,0,0, 1,0,0, 1,0,0, 0,0,0,
0,0,0, 0,0,0, 0,0,0, 0,0,0]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
box, count = bmp.GetBoundingBox(bitmap.RgbaColor(1, 0, 0))
self.assertEquals(box, (1, 1, 2, 1))
self.assertEquals(count, 2)
box, count = bmp.GetBoundingBox(bitmap.RgbaColor(0, 1, 0))
self.assertEquals(box, None)
self.assertEquals(count, 0)
@benchmark.Disabled
def testCrop(self):
pixels = [0,0,0, 1,0,0, 2,0,0, 3,0,0,
0,1,0, 1,1,0, 2,1,0, 3,1,0,
0,2,0, 1,2,0, 2,2,0, 3,2,0]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
bmp.Crop(1, 2, 2, 1)
self.assertEquals(bmp.width, 2)
self.assertEquals(bmp.height, 1)
bmp.GetPixelColor(0, 0).AssertIsRGB(1, 2, 0)
bmp.GetPixelColor(1, 0).AssertIsRGB(2, 2, 0)
self.assertEquals(bmp.pixels, bytearray([1,2,0, 2,2,0]))
@benchmark.Disabled
def testHistogram(self):
pixels = [1,2,3, 1,2,3, 1,2,3, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
bmp.Crop(1, 1, 2, 2)
histogram = bmp.ColorHistogram()
for i in xrange(3):
self.assertEquals(sum(histogram[i]), bmp.width * bmp.height)
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[5], 2)
self.assertEquals(histogram.r[8], 2)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[4], 2)
self.assertEquals(histogram.g[7], 2)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 4)
@benchmark.Disabled
def testHistogramIgnoreColor(self):
pixels = [1,2,3, 1,2,3, 1,2,3, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3,
1,2,3, 8,7,6, 5,4,6, 1,2,3]
bmp = bitmap.Bitmap(3, 4, 3, pixels)
histogram = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(1, 2, 3))
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[5], 2)
self.assertEquals(histogram.r[8], 2)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[4], 2)
self.assertEquals(histogram.g[7], 2)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 4)
@benchmark.Disabled
def testHistogramIgnoreColorTolerance(self):
pixels = [1,2,3, 4,5,6,
7,8,9, 8,7,6]
bmp = bitmap.Bitmap(3, 2, 2, pixels)
histogram = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(0, 1, 2),
tolerance=1)
self.assertEquals(histogram.r[1], 0)
self.assertEquals(histogram.r[4], 1)
self.assertEquals(histogram.r[7], 1)
self.assertEquals(histogram.r[8], 1)
self.assertEquals(histogram.g[2], 0)
self.assertEquals(histogram.g[5], 1)
self.assertEquals(histogram.g[7], 1)
self.assertEquals(histogram.g[8], 1)
self.assertEquals(histogram.b[3], 0)
self.assertEquals(histogram.b[6], 2)
self.assertEquals(histogram.b[9], 1)
@benchmark.Disabled
def testHistogramDistanceIgnoreColor(self):
pixels = [1,2,3, 1,2,3,
1,2,3, 1,2,3]
bmp = bitmap.Bitmap(3, 2, 2, pixels)
hist1 = bmp.ColorHistogram(ignore_color=bitmap.RgbaColor(1, 2, 3))
hist2 = bmp.ColorHistogram()
self.assertEquals(hist1.Distance(hist2), 0)
| chromium2014/src | tools/telemetry/telemetry/core/bitmap_unittest.py | Python | bsd-3-clause | 8,155 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
MAST Utils
==========
Miscellaneous functions used throughout the MAST module.
"""
import numpy as np
import requests
import json
from urllib import parse
import astropy.coordinates as coord
from ..version import version
from ..exceptions import ResolverError, InvalidQueryError
from ..utils import commons
from . import conf
__all__ = []
def parse_type(dbtype):
"""
Takes a data type as returned by a database call and regularizes it into a
triplet of the form (human readable datatype, python datatype, default value).
Parameters
----------
dbtype : str
A data type, as returned by a database call (ex. 'char').
Returns
-------
response : tuple
Regularized type tuple of the form (human readable datatype, python datatype, default value).
For example:
_parse_type("short")
('integer', np.int64, -999)
"""
dbtype = dbtype.lower()
return {
'char': ('string', str, ""),
'string': ('string', str, ""),
'datetime': ('string', str, ""), # TODO: handle datetimes correctly
'date': ('string', str, ""), # TODO: handle datetimes correctly
'double': ('float', np.float64, np.nan),
'float': ('float', np.float64, np.nan),
'decimal': ('float', np.float64, np.nan),
'int': ('integer', np.int64, -999),
'short': ('integer', np.int64, -999),
'long': ('integer', np.int64, -999),
'number': ('integer', np.int64, -999),
'boolean': ('boolean', bool, None),
'binary': ('boolean', bool, None),
'unsignedbyte': ('byte', np.ubyte, -999)
}.get(dbtype, (dbtype, dbtype, dbtype))
def _simple_request(url, params):
"""
Light wrapper on requests.session().get basically to make monkey patched testing easier/more effective.
"""
session = requests.session()
headers = {"User-Agent": "astroquery/{} {}".format(version, session.headers['User-Agent']),
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
return response
def resolve_object(objectname):
"""
Resolves an object name to a position on the sky.
Parameters
----------
objectname : str
Name of astronomical object to resolve.
Returns
-------
response : `~astropy.coordinates.SkyCoord`
The sky position of the given object.
"""
request_args = {"service": "Mast.Name.Lookup",
"params": {'input': objectname, 'format': 'json'}}
request_string = 'request={}'.format(parse.quote(json.dumps(request_args)))
response = _simple_request("{}/api/v0/invoke".format(conf.server), request_string)
result = response.json()
if len(result['resolvedCoordinate']) == 0:
raise ResolverError("Could not resolve {} to a sky position.".format(objectname))
ra = result['resolvedCoordinate'][0]['ra']
dec = result['resolvedCoordinate'][0]['decl']
coordinates = coord.SkyCoord(ra, dec, unit="deg")
return coordinates
def parse_input_location(coordinates=None, objectname=None):
"""
Convenience function to parse user input of coordinates and objectname.
Parameters
----------
coordinates : str or `astropy.coordinates` object, optional
The target around which to search. It may be specified as a
string or as the appropriate `astropy.coordinates` object.
One and only one of coordinates and objectname must be supplied.
objectname : str, optional
The target around which to search, by name (objectname="M104")
or TIC ID (objectname="TIC 141914082").
One and only one of coordinates and objectname must be supplied.
Returns
-------
response : `~astropy.coordinates.SkyCoord`
The given coordinates, or object's location as an `~astropy.coordinates.SkyCoord` object.
"""
# Checking for valid input
if objectname and coordinates:
raise InvalidQueryError("Only one of objectname and coordinates may be specified.")
if not (objectname or coordinates):
raise InvalidQueryError("One of objectname and coordinates must be specified.")
if objectname:
obj_coord = resolve_object(objectname)
if coordinates:
obj_coord = commons.parse_coordinates(coordinates)
return obj_coord
def mast_relative_path(mast_uri):
"""
Given a MAST dataURI, return the associated relative path.
Parameters
----------
mast_uri : str
The MAST uri.
Returns
-------
response : str
The associated relative path.
"""
response = _simple_request("https://mast.stsci.edu/api/v0.1/path_lookup/",
{"uri": mast_uri})
result = response.json()
uri_result = result.get(mast_uri)
return uri_result["path"]
| imbasimba/astroquery | astroquery/mast/utils.py | Python | bsd-3-clause | 5,016 |
# Generated by Django 2.2.5 on 2020-08-03 10:37
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_dataprovider_attribute_class'),
('tasks', '0005_dataprovidertaskrecord_preview'),
]
operations = [
migrations.AddField(
model_name='dataprovidertaskrecord',
name='provider',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='task_record_providers', to='jobs.DataProvider'),
),
migrations.AlterField(
model_name='exportrun',
name='deleted',
field=models.BooleanField(db_index=True, default=False),
),
migrations.CreateModel(
name='RunZipFile',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('started_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('finished_at', models.DateTimeField(editable=False, null=True)),
('id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('uid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('data_provider_task_records', models.ManyToManyField(to='tasks.DataProviderTaskRecord')),
('downloadable_file', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tasks.FileProducingTaskResult')),
('run', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='zip_files', to='tasks.ExportRun')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExportRunFile',
fields=[
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('uid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url='/export_run_files/', location='/var/lib/eventkit/exports_stage/export_run_files'), upload_to='', verbose_name='File')),
('directory', models.CharField(blank=True, help_text='An optional directory name to store the file in.', max_length=100, null=True)),
('provider', models.ForeignKey(blank=True, help_text='An optional data provider to associate the file with.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='file_provider', to='jobs.DataProvider')),
],
options={
'abstract': False,
},
),
]
| venicegeo/eventkit-cloud | eventkit_cloud/tasks/migrations/0006_auto_20200803_1037.py | Python | bsd-3-clause | 3,280 |
import numpy as np
from numpy import diag, inf
from numpy import copy, dot
from numpy.linalg import norm
class ExceededMaxIterationsError(Exception):
def __init__(self, msg, matrix=[], iteration=[], ds=[]):
self.msg = msg
self.matrix = matrix
self.iteration = iteration
self.ds = ds
def __str__(self):
return repr(self.msg)
def nearcorr(A, tol=[], flag=0, max_iterations=100, n_pos_eig=0,
weights=None, verbose=False,
except_on_too_many_iterations=True):
"""
X = nearcorr(A, tol=[], flag=0, max_iterations=100, n_pos_eig=0,
weights=None, print=0)
Finds the nearest correlation matrix to the symmetric matrix A.
ARGUMENTS
~~~~~~~~~
A is a symmetric numpy array or a ExceededMaxIterationsError object
tol is a convergence tolerance, which defaults to 16*EPS.
If using flag == 1, tol must be a size 2 tuple, with first component
the convergence tolerance and second component a tolerance
for defining "sufficiently positive" eigenvalues.
flag = 0: solve using full eigendecomposition (EIG).
flag = 1: treat as "highly non-positive definite A" and solve
using partial eigendecomposition (EIGS). CURRENTLY NOT IMPLEMENTED
max_iterations is the maximum number of iterations (default 100,
but may need to be increased).
n_pos_eig (optional) is the known number of positive eigenvalues
of A. CURRENTLY NOT IMPLEMENTED
weights is an optional vector defining a diagonal weight matrix diag(W).
verbose = True for display of intermediate output.
CURRENTLY NOT IMPLEMENTED
except_on_too_many_iterations = True to raise an exeption when
number of iterations exceeds max_iterations
except_on_too_many_iterations = False to silently return the best result
found after max_iterations number of iterations
ABOUT
~~~~~~
This is a Python port by Michael Croucher, November 2014
Thanks to Vedran Sego for many useful comments and suggestions.
Original MATLAB code by N. J. Higham, 13/6/01, updated 30/1/13.
Reference: N. J. Higham, Computing the nearest correlation
matrix---A problem from finance. IMA J. Numer. Anal.,
22(3):329-343, 2002.
"""
# If input is an ExceededMaxIterationsError object this
# is a restart computation
if (isinstance(A, ExceededMaxIterationsError)):
ds = copy(A.ds)
A = copy(A.matrix)
else:
ds = np.zeros(np.shape(A))
eps = np.spacing(1)
if not np.all((np.transpose(A) == A)):
raise ValueError('Input Matrix is not symmetric')
if not tol:
tol = eps * np.shape(A)[0] * np.array([1, 1])
if weights is None:
weights = np.ones(np.shape(A)[0])
X = copy(A)
Y = copy(A)
rel_diffY = inf
rel_diffX = inf
rel_diffXY = inf
Whalf = np.sqrt(np.outer(weights, weights))
iteration = 0
while max(rel_diffX, rel_diffY, rel_diffXY) > tol[0]:
iteration += 1
if iteration > max_iterations:
if except_on_too_many_iterations:
if max_iterations == 1:
message = "No solution found in "\
+ str(max_iterations) + " iteration"
else:
message = "No solution found in "\
+ str(max_iterations) + " iterations"
raise ExceededMaxIterationsError(message, X, iteration, ds)
else:
# exceptOnTooManyIterations is false so just silently
# return the result even though it has not converged
return X
Xold = copy(X)
R = X - ds
R_wtd = Whalf*R
if flag == 0:
X = proj_spd(R_wtd)
elif flag == 1:
raise NotImplementedError("Setting 'flag' to 1 is currently\
not implemented.")
X = X / Whalf
ds = X - R
Yold = copy(Y)
Y = copy(X)
np.fill_diagonal(Y, 1)
normY = norm(Y, 'fro')
rel_diffX = norm(X - Xold, 'fro') / norm(X, 'fro')
rel_diffY = norm(Y - Yold, 'fro') / normY
rel_diffXY = norm(Y - X, 'fro') / normY
X = copy(Y)
return X
def proj_spd(A):
# NOTE: the input matrix is assumed to be symmetric
d, v = np.linalg.eigh(A)
A = (v * np.maximum(d, 0)).dot(v.T)
A = (A + A.T) / 2
return(A)
| mikecroucher/nearest_correlation | nearest_correlation.py | Python | bsd-3-clause | 4,572 |
#!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat,1.1],[traindat,testdat,1.2]]
def kernel_sparse_gaussian (fm_train_real=traindat,fm_test_real=testdat,width=1.1 ):
from shogun import SparseRealFeatures
import shogun as sg
feats_train=SparseRealFeatures(fm_train_real)
feats_test=SparseRealFeatures(fm_test_real)
kernel=sg.create_kernel("GaussianKernel", width=width)
kernel.init(feats_train, feats_train,)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('SparseGaussian')
kernel_sparse_gaussian (*parameter_list[0])
| shogun-toolbox/shogun | examples/undocumented/python/kernel_sparse_gaussian.py | Python | bsd-3-clause | 824 |
# $Id: __init__.py 5618 2008-07-28 08:37:32Z strank $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Writer modules.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import languages, Component
from docutils.transforms import universal
class Writer(Component):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
The `write()` method is the main entry point.
"""
component_type = 'writer'
config_section = 'writers'
def get_transforms(self):
return Component.get_transforms(self) + [
universal.Messages,
universal.FilterMessages,
universal.StripClassesAndElements,]
document = None
"""The document to write (Docutils doctree); set by `write`."""
output = None
"""Final translated form of `document` (Unicode string for text, binary
string for other forms); set by `translate`."""
language = None
"""Language module for the document; set by `write`."""
destination = None
"""`docutils.io` Output object; where to write the document.
Set by `write`."""
def __init__(self):
# Currently only used by HTML writer for output fragments:
self.parts = {}
"""Mapping of document part names to fragments of `self.output`.
Values are Unicode strings; encoding is up to the client. The 'whole'
key should contain the entire document output.
"""
def write(self, document, destination):
"""
Process a document into its final form.
Translate `document` (a Docutils document tree) into the Writer's
native format, and write it out to its `destination` (a
`docutils.io.Output` subclass object).
Normally not overridden or extended in subclasses.
"""
self.document = document
self.language = languages.get_language(
document.settings.language_code)
self.destination = destination
self.translate()
output = self.destination.write(self.output)
return output
def translate(self):
"""
Do final translation of `self.document` into `self.output`. Called
from `write`. Override in subclasses.
Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise NotImplementedError('subclass must override this method')
def assemble_parts(self):
"""Assemble the `self.parts` dictionary. Extend in subclasses."""
self.parts['whole'] = self.output
self.parts['encoding'] = self.document.settings.output_encoding
self.parts['version'] = docutils.__version__
class UnfilteredWriter(Writer):
"""
A writer that passes the document tree on unchanged (e.g. a
serializer.)
Documents written by UnfilteredWriters are typically reused at a
later date using a subclass of `readers.ReReader`.
"""
def get_transforms(self):
# Do not add any transforms. When the document is reused
# later, the then-used writer will add the appropriate
# transforms.
return Component.get_transforms(self)
_writer_aliases = {
'html': 'html4css1',
'latex': 'latex2e',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
'xml': 'docutils_xml',
's5': 's5_html'}
def get_writer_class(writer_name):
"""Return the Writer class from the `writer_name` module."""
writer_name = writer_name.lower()
if writer_name in _writer_aliases:
writer_name = _writer_aliases[writer_name]
module = __import__(writer_name, globals(), locals())
return module.Writer
| spreeker/democracygame | external_apps/docutils-snapshot/docutils/writers/__init__.py | Python | bsd-3-clause | 4,258 |
import numpy as np
import paths
import pyregion
from astropy import coordinates
from astropy import units as u
from astropy import table
from astropy.table import Table,Column
import latex_info
from latex_info import latexdict, exp_to_tex, format_float
tbl = Table.read(paths.tpath('PPV_H2CO_Temperature.ipac'), format='ascii.ipac')
def make_column(colname, errcolname, outcolname, unit, formatstr="${0:0.2f}\pm{1:0.2f}$"):
data = [formatstr.format(d,e) for d,e in zip(tbl[colname], tbl[errcolname])]
return Column(data=data, name=outcolname, unit=unit)
def make_column_asymm(colname, errlowcolname, errhighcolname, outcolname, unit, formatstr="${0:0.2f}^{{+{1:0.2f}}}_{{-{2:0.2f}}}$"):
data = [formatstr.format(d,el,eh) for d,el,eh in zip(tbl[colname],
tbl[colname]-tbl[errlowcolname],
tbl[errhighcolname]-tbl[colname])]
return Column(data=data, name=outcolname, unit=unit)
columns = {'_idx': 'Source ID',
'DespoticTem': '$T_{gas, turb}$',
'logh2column': 'log($n(H_2)$)',
}
#'spline_h2coratio321303': '$R_1$',
#'espline_h2coratio321303': '$\sigma(R_1)$',}
def format_float(st):
return exp_to_tex("{0:0.3g}".format(st))
def format_int(st):
return ("{0:d}".format(int(st)))
formats = {'DespoticTem': format_int,
'logh2column': format_float,
'v_rms': format_float,
'v_cen': format_float,
'$\sigma_v$': format_float,
'$v_{lsr}$': format_float,
'Max $T_B(3_{0,3})$': format_float,
#'$T_{gas}$': format_float,
}
outtbl = Table([tbl['_idx'],
make_column('ratio321303', 'eratio321303', '$R_1$', None, "${0:0.3f}\pm{1:0.3f}$"),
Column(tbl['v_rms']/1e3, name='$\sigma_v$', unit=u.km/u.s),
Column(tbl['v_cen']/1e3, name='$v_{lsr}$', unit=u.km/u.s),
Column(tbl['Smax303'], name='Max $T_B(3_{0,3})$', unit=u.K),
#make_column('spline_ampH2CO', 'espline_ampH2CO', '$T_B(H_2CO)$', u.K),
#make_column('logh2column', 'elogh2column', 'log($n(H_2)$)', u.cm**-2),
tbl['logh2column'],
make_column_asymm('temperature_chi2', 'tmin1sig_chi2', 'tmax1sig_chi2', '$T_{gas}$', u.K),
Column(data=tbl['DespoticTem'], name='DespoticTem', unit=u.K),
]
)
for old, new in columns.items():
outtbl.rename_column(old, new)
if old in formats:
formats[new] = formats[old]
latexdict['header_start'] = '\label{tab:dendroregions}'
latexdict['caption'] = '\\formaldehyde Parameters and Fit Properties for dendrogram-selected clumps'
latexdict['tablefoot'] = ('\par\n')
#latexdict['col_align'] = 'lllrr'
#latexdict['tabletype'] = 'longtable'
#latexdict['tabulartype'] = 'longtable'
outtbl.write(paths.tpath('dendro_props.tex'), format='ascii.latex',
latexdict=latexdict,
formats=formats,
)
outtbl[::10].write(paths.tpath('dendro_props_excerpt.tex'), format='ascii.latex',
latexdict=latexdict,
formats=formats,
)
| adamginsburg/APEX_CMZ_H2CO | analysis/texify_dendro_table.py | Python | bsd-3-clause | 3,226 |
import sys
import pytest
from numpy.testing import (
assert_, assert_array_equal, assert_raises,
)
import numpy as np
from numpy import random
class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
for mu in np.linspace(-7., 7., 5):
r = random.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = [
(2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
# Check for 64-bit systems
args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(random.hypergeometric(*arg) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
random.seed(0)
rvsn = random.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
random.seed(12345)
shuffled = list(t)
random.shuffle(shuffled)
expected = np.array([t[0], t[3], t[1], t[2]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom RandomState does not call into global state
m = random.RandomState()
res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
for i in range(3):
random.seed(i)
m.seed(4321)
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
random.multivariate_normal([0], [[0]], size=1)
random.multivariate_normal([0], [[0]], size=np.int_(1))
random.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
random.seed(1234567890)
x = random.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
random.seed(1234)
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = random.choice(a, p=probs)
assert_(c in a)
assert_raises(ValueError, random.choice, a, p=probs*0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
random.seed(1234)
a = np.array(['a', 'a' * 1000])
for _ in range(100):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
random.seed(1234)
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
random.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
random.seed(1)
orig = np.arange(3).view(N)
perm = random.permutation(orig)
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self):
return self.a
random.seed(1)
m = M()
perm = random.permutation(m)
assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
assert_array_equal(m.__array__(), np.arange(5))
def test_warns_byteorder(self):
# GH 13159
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.deprecated_call(match='non-native byteorder is not'):
random.randint(0, 200, size=10, dtype=other_byteord_dt)
def test_named_argument_initialization(self):
# GH 13669
rs1 = np.random.RandomState(123456789)
rs2 = np.random.RandomState(seed=123456789)
assert rs1.randint(0, 100) == rs2.randint(0, 100)
def test_choice_retun_dtype(self):
# GH 9867
c = np.random.choice(10, p=[.1]*10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, size=2)
assert c.dtype == np.dtype(int)
c = np.random.choice(10, replace=False, size=2)
assert c.dtype == np.dtype(int)
@pytest.mark.skipif(np.iinfo('l').max < 2**32,
reason='Cannot test with 32-bit C long')
def test_randint_117(self):
# GH 14189
random.seed(0)
expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
2588848963, 3684848379, 2340255427, 3638918503,
1819583497, 2678185683], dtype='int64')
actual = random.randint(2**32, size=10)
assert_array_equal(actual, expected)
def test_p_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(12345)
assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
[0, 0, 0, 1, 1])
def test_n_zero_stream(self):
# Regression test for gh-14522. Ensure that future versions
# generate the same variates as version 1.16.
np.random.seed(8675309)
expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
expected)
| simongibbons/numpy | numpy/random/tests/test_randomstate_regression.py | Python | bsd-3-clause | 7,541 |
#!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import idlnode
import idlparser
import logging.config
import sys
import unittest
class IDLNodeTestCase(unittest.TestCase):
def _run_test(self, syntax, content, expected):
"""Utility run tests and prints extra contextual information.
Args:
syntax -- IDL grammar to use (either idlparser.WEBKIT_SYNTAX,
WEBIDL_SYNTAX or FREMONTCUT_SYNTAX). If None, will run
multiple tests, each with a different syntax.
content -- input text for the parser.
expected -- expected parse result.
"""
if syntax is None:
self._run_test(idlparser.WEBIDL_SYNTAX, content, expected)
self._run_test(idlparser.WEBKIT_SYNTAX, content, expected)
self._run_test(idlparser.FREMONTCUT_SYNTAX, content, expected)
return
actual = None
error = None
ast = None
parseResult = None
try:
parser = idlparser.IDLParser(syntax)
ast = parser.parse(content)
node = idlnode.IDLFile(ast)
actual = node.to_dict() if node else None
except SyntaxError, e:
error = e
pass
if actual == expected:
return
else:
msg = '''
SYNTAX : %s
CONTENT :
%s
EXPECTED:
%s
ACTUAL :
%s
ERROR : %s
AST :
%s
''' % (syntax, content, expected, actual, error, ast)
self.fail(msg)
def test_empty_module(self):
self._run_test(None, 'module TestModule {};',
{'modules': [{
'id': 'TestModule'
}]})
def test_empty_interface(self):
self._run_test(
None, 'module TestModule { interface Interface1 {}; };', {
'modules': [{
'interfaces': [{
'javascript_binding_name': 'Interface1',
'doc_js_name': 'Interface1',
'id': 'Interface1'
}],
'id':
'TestModule'
}]
})
def test_gcc_preprocessor(self):
self._run_test(idlparser.WEBKIT_SYNTAX,
'#if 1\nmodule TestModule {};\n#endif\n',
{'modules': [{
'id': 'TestModule'
}]})
def test_extended_attributes(self):
self._run_test(
idlparser.WEBKIT_SYNTAX,
'module M { interface [ExAt1, ExAt2] I {};};', {
'modules': [{
'interfaces': [{
'javascript_binding_name': 'I',
'doc_js_name': 'I',
'ext_attrs': {
'ExAt1': None,
'ExAt2': None
},
'id': 'I'
}],
'id':
'M'
}]
})
def test_implements_statement(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, 'module M { X implements Y; };', {
'modules': [{
'implementsStatements': [{
'implementor': {
'id': 'X'
},
'implemented': {
'id': 'Y'
}
}],
'id':
'M'
}]
})
def test_attributes(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, '''interface I {
attribute long a1;
readonly attribute DOMString a2;
attribute any a3;
};''', {
'interfaces': [{
'javascript_binding_name':
'I',
'attributes': [{
'type': {
'id': 'long'
},
'id': 'a1',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'DOMString'
},
'is_read_only': True,
'id': 'a2',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'any'
},
'id': 'a3',
'doc_js_interface_name': 'I'
}],
'id':
'I',
'doc_js_name':
'I'
}]
})
def test_operations(self):
self._run_test(
idlparser.WEBIDL_SYNTAX, '''interface I {
[ExAttr] t1 op1();
t2 op2(in int arg1, in long arg2);
getter any item(in long index);
};''', {
'interfaces': [{
'operations':
[{
'doc_js_interface_name': 'I',
'type': {
'id': 't1'
},
'ext_attrs': {
'ExAttr': None
},
'id': 'op1'
},
{
'doc_js_interface_name':
'I',
'type': {
'id': 't2'
},
'id':
'op2',
'arguments': [{
'type': {
'id': 'int'
},
'id': 'arg1'
}, {
'type': {
'id': 'long'
},
'id': 'arg2'
}]
},
{
'specials': ['getter'],
'doc_js_interface_name': 'I',
'type': {
'id': 'any'
},
'id': 'item',
'arguments': [{
'type': {
'id': 'long'
},
'id': 'index'
}]
},
{
'is_stringifier': True,
'type': {
'id': 'name'
},
'doc_js_interface_name': 'I'
}],
'javascript_binding_name':
'I',
'id':
'I',
'doc_js_name':
'I'
}]
})
def test_constants(self):
self._run_test(
None, '''interface I {
const long c1 = 0;
const long c2 = 1;
const long c3 = 0x01;
const long c4 = 10;
const boolean b1 = false;
const boolean b2 = true;
};''', {
'interfaces': [{
'javascript_binding_name':
'I',
'doc_js_name':
'I',
'id':
'I',
'constants': [{
'type': {
'id': 'long'
},
'id': 'c1',
'value': '0',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c2',
'value': '1',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c3',
'value': '0x01',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'long'
},
'id': 'c4',
'value': '10',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'boolean'
},
'id': 'b1',
'value': 'false',
'doc_js_interface_name': 'I'
},
{
'type': {
'id': 'boolean'
},
'id': 'b2',
'value': 'true',
'doc_js_interface_name': 'I'
}]
}]
})
def test_annotations(self):
self._run_test(
idlparser.FREMONTCUT_SYNTAX,
'@Ano1 @Ano2() @Ano3(x=1) @Ano4(x,y=2) interface I {};', {
'interfaces': [{
'javascript_binding_name': 'I',
'doc_js_name': 'I',
'id': 'I',
'annotations': {
'Ano4': {
'y': '2',
'x': None
},
'Ano1': {},
'Ano2': {},
'Ano3': {
'x': '1'
}
}
}]
})
self._run_test(
idlparser.FREMONTCUT_SYNTAX, '''interface I : @Ano1 J {
@Ano2 attribute int someAttr;
@Ano3 void someOp();
@Ano3 const int someConst = 0;
};''', {
'interfaces': [{
'operations': [{
'annotations': {
'Ano3': {}
},
'type': {
'id': 'void'
},
'id': 'someOp',
'doc_js_interface_name': 'I'
}],
'javascript_binding_name':
'I',
'parents': [{
'type': {
'id': 'J'
},
'annotations': {
'Ano1': {}
}
}],
'attributes': [{
'annotations': {
'Ano2': {}
},
'type': {
'id': 'int'
},
'id': 'someAttr',
'doc_js_interface_name': 'I'
}],
'doc_js_name':
'I',
'id':
'I',
'constants': [{
'annotations': {
'Ano3': {}
},
'type': {
'id': 'int'
},
'id': 'someConst',
'value': '0',
'doc_js_interface_name': 'I'
}]
}]
})
def test_inheritance(self):
self._run_test(
None,
'interface Shape {}; interface Rectangle : Shape {}; interface Square : Rectangle, Shape {};',
{
'interfaces': [{
'javascript_binding_name': 'Shape',
'doc_js_name': 'Shape',
'id': 'Shape'
},
{
'javascript_binding_name': 'Rectangle',
'doc_js_name': 'Rectangle',
'parents': [{
'type': {
'id': 'Shape'
}
}],
'id': 'Rectangle'
},
{
'javascript_binding_name':
'Square',
'doc_js_name':
'Square',
'parents': [{
'type': {
'id': 'Rectangle'
}
}, {
'type': {
'id': 'Shape'
}
}],
'id':
'Square'
}]
})
if __name__ == "__main__":
logging.config.fileConfig("logging.conf")
if __name__ == '__main__':
unittest.main()
| dartino/dart-sdk | tools/dom/scripts/idlnode_test.py | Python | bsd-3-clause | 14,699 |
Subsets and Splits