code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
"""Document management blueprint."""
from __future__ import annotations
from flask import g
from abilian.i18n import _l
from abilian.sbe.apps.communities.blueprint import Blueprint
from abilian.sbe.apps.communities.security import is_manager
from abilian.sbe.apps.documents.actions import register_actions
from abilian.web.action import Endpoint
from abilian.web.nav import BreadcrumbItem
__all__ = ["blueprint"]
blueprint = Blueprint(
"documents", __name__, url_prefix="/docs", template_folder="../templates"
)
route = blueprint.route
blueprint.record_once(register_actions)
@blueprint.url_value_preprocessor
def init_document_values(endpoint: str, values: dict[str, int]):
g.current_tab = "documents"
g.is_manager = is_manager()
g.breadcrumb.append(
BreadcrumbItem(
label=_l("Documents"),
url=Endpoint("documents.index", community_id=g.community.slug),
)
)
| abilian/abilian-sbe | src/abilian/sbe/apps/documents/views/views.py | Python | lgpl-2.1 | 926 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import re
import shutil
import llnl.util.tty as tty
import llnl.util.lang
import spack.compiler
import spack.compilers.clang
import spack.util.executable
import spack.version
class AppleClang(spack.compilers.clang.Clang):
openmp_flag = "-Xpreprocessor -fopenmp"
@classmethod
@llnl.util.lang.memoized
def extract_version_from_output(cls, output):
ver = 'unknown'
match = re.search(
# Apple's LLVM compiler has its own versions, so suffix them.
r'^Apple (?:LLVM|clang) version ([^ )]+)',
output,
# Multi-line, since 'Apple clang' may not be on the first line
# in particular, when run as gcc, it seems to output
# "Configured with: --prefix=..." as the first line
re.M,
)
if match:
ver = match.group(match.lastindex)
return ver
@property
def cxx11_flag(self):
# Adapted from CMake's AppleClang-CXX rules
# Spack's AppleClang detection only valid from Xcode >= 4.6
if self.version < spack.version.ver('4.0.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++11 standard", "cxx11_flag", "Xcode < 4.0.0"
)
return "-std=c++11"
@property
def cxx14_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('5.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++14 standard", "cxx14_flag", "Xcode < 5.1.0"
)
elif self.version < spack.version.ver('6.1.0'):
return "-std=c++1y"
return "-std=c++14"
@property
def cxx17_flag(self):
# Adapted from CMake's rules for AppleClang
if self.version < spack.version.ver('6.1.0'):
raise spack.compiler.UnsupportedCompilerFlag(
self, "the C++17 standard", "cxx17_flag", "Xcode < 6.1.0"
)
return "-std=c++1z"
def setup_custom_environment(self, pkg, env):
"""Set the DEVELOPER_DIR environment for the Xcode toolchain.
On macOS, not all buildsystems support querying CC and CXX for the
compilers to use and instead query the Xcode toolchain for what
compiler to run. This side-steps the spack wrappers. In order to inject
spack into this setup, we need to copy (a subset of) Xcode.app and
replace the compiler executables with symlinks to the spack wrapper.
Currently, the stage is used to store the Xcode.app copies. We then set
the 'DEVELOPER_DIR' environment variables to cause the xcrun and
related tools to use this Xcode.app.
"""
super(AppleClang, self).setup_custom_environment(pkg, env)
if not pkg.use_xcode:
# if we do it for all packages, we get into big troubles with MPI:
# filter_compilers(self) will use mockup XCode compilers on macOS
# with Clang. Those point to Spack's compiler wrappers and
# consequently render MPI non-functional outside of Spack.
return
# Use special XCode versions of compiler wrappers when using XCode
# Overwrites build_environment's setting of SPACK_CC and SPACK_CXX
xcrun = spack.util.executable.Executable('xcrun')
xcode_clang = xcrun('-f', 'clang', output=str).strip()
xcode_clangpp = xcrun('-f', 'clang++', output=str).strip()
env.set('SPACK_CC', xcode_clang, force=True)
env.set('SPACK_CXX', xcode_clangpp, force=True)
xcode_select = spack.util.executable.Executable('xcode-select')
# Get the path of the active developer directory
real_root = xcode_select('--print-path', output=str).strip()
# The path name can be used to determine whether the full Xcode suite
# or just the command-line tools are installed
if real_root.endswith('Developer'):
# The full Xcode suite is installed
pass
else:
if real_root.endswith('CommandLineTools'):
# Only the command-line tools are installed
msg = 'It appears that you have the Xcode command-line tools '
msg += 'but not the full Xcode suite installed.\n'
else:
# Xcode is not installed
msg = 'It appears that you do not have Xcode installed.\n'
msg += 'In order to use Spack to build the requested application, '
msg += 'you need the full Xcode suite. It can be installed '
msg += 'through the App Store. Make sure you launch the '
msg += 'application and accept the license agreement.\n'
raise OSError(msg)
real_root = os.path.dirname(os.path.dirname(real_root))
developer_root = os.path.join(spack.stage.get_stage_root(),
'xcode-select',
self.name,
str(self.version))
xcode_link = os.path.join(developer_root, 'Xcode.app')
if not os.path.exists(developer_root):
tty.warn('Copying Xcode from %s to %s in order to add spack '
'wrappers to it. Please do not interrupt.'
% (real_root, developer_root))
# We need to make a new Xcode.app instance, but with symlinks to
# the spack wrappers for the compilers it ships. This is necessary
# because some projects insist on just asking xcrun and related
# tools where the compiler runs. These tools are very hard to trick
# as they do realpath and end up ignoring the symlinks in a
# "softer" tree of nothing but symlinks in the right places.
shutil.copytree(
real_root, developer_root, symlinks=True,
ignore=shutil.ignore_patterns(
'AppleTV*.platform', 'Watch*.platform', 'iPhone*.platform',
'Documentation', 'swift*'
))
real_dirs = [
'Toolchains/XcodeDefault.xctoolchain/usr/bin',
'usr/bin',
]
bins = ['c++', 'c89', 'c99', 'cc', 'clang', 'clang++', 'cpp']
for real_dir in real_dirs:
dev_dir = os.path.join(developer_root,
'Contents',
'Developer',
real_dir)
for fname in os.listdir(dev_dir):
if fname in bins:
os.unlink(os.path.join(dev_dir, fname))
os.symlink(
os.path.join(spack.paths.build_env_path, 'cc'),
os.path.join(dev_dir, fname))
os.symlink(developer_root, xcode_link)
env.set('DEVELOPER_DIR', xcode_link)
| rspavel/spack | lib/spack/spack/compilers/apple_clang.py | Python | lgpl-2.1 | 7,173 |
# Copyright (C) Collabora Limited 2017,2019
# Author: Guillaume Tucker <[email protected]>
# Author: dcz-collabora <[email protected]>
#
# Copyright (C) Linaro Limited 2015,2016,2017,2018,2019
# Author: Matt Hart <[email protected]>
# Author: Milo Casagrande <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Functions to import builds/defconfigs."""
try:
import simplejson as json
except ImportError:
import json
try:
from os import walk
except ImportError:
from scandir import walk
import bson
import datetime
import io
import os
import pymongo.errors
import re
import redis
import types
import models
import models.build as mbuild
import models.job as mjob
import utils
import utils.database.redisdb as redisdb
import utils.db
import utils.errors
ERR_ADD = utils.errors.add_error
ERR_UPDATE = utils.errors.update_errors
# Regex to extract the kernel version.
# Should match strings that begins as:
# 4.1-1234-g12345
# 4.1.14-rc8-1234-g12345
# The 'rc*' pattern is part of the kernel version.
# TODO: add patches count extraction as well.
KERNEL_VERSION_MATCH = re.compile(r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?)")
KERNEL_RC_VERSION_MATCH = re.compile(
r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?-{1}rc\d*)")
def _search_prev_build_doc(build_doc, database):
"""Search for a similar defconfig document in the database.
Search for an already imported defconfig/build document in the database
and return its object ID and creation date. This is done to make sure
we do not create double documents when re-importing the same data or
updating it.
:param build_doc: The new defconfig document.
:param database: The db connection.
:return The previous doc ID and its creation date, or None.
"""
doc_id = None
c_date = None
if build_doc and database:
spec = {
models.ARCHITECTURE_KEY: build_doc.arch,
models.DEFCONFIG_FULL_KEY: build_doc.defconfig_full,
models.DEFCONFIG_KEY: build_doc.defconfig,
models.GIT_BRANCH_KEY: build_doc.git_branch,
models.JOB_KEY: build_doc.job,
models.KERNEL_KEY: build_doc.kernel,
models.BUILD_ENVIRONMENT_KEY: build_doc.build_environment
}
collection = database[models.BUILD_COLLECTION]
prev_doc_count = collection.count_documents(spec, limit=2)
if prev_doc_count > 0:
if prev_doc_count == 1:
prev_doc = utils.db.find_one2(collection, spec)
doc_id = prev_doc.get(models.ID_KEY)
c_date = prev_doc.get(models.CREATED_KEY)
else:
utils.LOG.warn(
"Found multiple defconfig docs matching: {}".format(spec))
utils.LOG.error(
"Cannot keep old document ID, don't know which one to "
"use!")
return doc_id, c_date
class BuildError(Exception):
def __init__(self, code, *args, **kwargs):
self.code = code
self.from_exc = kwargs.pop('from_exc', None)
super(BuildError, self).__init__(*args, **kwargs)
def _update_job_doc(job_doc, job_id, status, build_doc, database):
"""Update the JobDocument with values from a BuildDocument.
:param job_doc: The job document to update.
:type job_doc: JobDocument
:param status: The job status value.
:type status: string
:param build_doc: A BuildDocument object.
:type build_doc: BuildDocument
"""
to_update = False
ret_val = 201
if (job_id and job_doc.id != job_id):
job_doc.id = job_id
to_update = True
if job_doc.status != status:
job_doc.status = status
to_update = True
no_git = all([
not job_doc.git_url,
not job_doc.git_commit,
not job_doc.git_describe,
not job_doc.git_describe_v
])
no_compiler = all([
not job_doc.compiler,
not job_doc.compiler_version,
not job_doc.compiler_version_ext,
not job_doc.compiler_version_full,
not job_doc.cross_compile
])
if (build_doc and no_git and no_compiler):
# Kind of a hack:
# We want to store some metadata at the job document level as well,
# like git tree, git commit...
# Since, at the moment, we do not have the metadata file at the job
# level we need to pick one from the build documents, and extract some
# values.
if isinstance(build_doc, mbuild.BuildDocument):
if (build_doc.job == job_doc.job and
build_doc.kernel == job_doc.kernel):
job_doc.git_commit = build_doc.git_commit
job_doc.git_describe = build_doc.git_describe
job_doc.git_describe_v = build_doc.git_describe_v
job_doc.kernel_version = build_doc.kernel_version
job_doc.git_url = build_doc.git_url
job_doc.compiler = build_doc.compiler
job_doc.compiler_version = build_doc.compiler_version
job_doc.compiler_version_ext = build_doc.compiler_version_ext
job_doc.compiler_version_full = build_doc.compiler_version_full
job_doc.cross_compile = build_doc.cross_compile
to_update = True
if to_update:
ret_val, _ = utils.db.save(database, job_doc)
return ret_val
def _get_or_create_job(meta, database, db_options):
"""Get or create a job in the database.
:param job: The name of the job.
:type job: str
:param kernel: The name of the kernel.
:type kernel: str
:param database: The mongodb database connection.
:param db_options: The database connection options.
:type db_options: dict
:return a 3-tuple: return value, job document and job ID.
"""
ret_val = 201
job_doc = None
job_id = None
rev = meta["bmeta"]["revision"]
tree, descr, branch = (rev[key] for key in ["tree", "describe", "branch"])
redis_conn = redisdb.get_db_connection(db_options)
# We might be importing builds in parallel through multi-processes. Keep a
# lock here when looking for a job or we might end up with multiple job
# creations.
# ToDo: rename Job as Revision since that's what it really is
lock_key = "build-import-{}-{}-{}".format(tree, descr, branch)
with redis.lock.Lock(redis_conn, lock_key, timeout=5):
p_doc = utils.db.find_one2(
database[models.JOB_COLLECTION],
{
models.JOB_KEY: tree,
models.KERNEL_KEY: descr,
models.GIT_BRANCH_KEY: branch,
})
if p_doc:
job_doc = mjob.JobDocument.from_json(p_doc)
job_id = job_doc.id
else:
job_doc = mjob.JobDocument(tree, descr, branch)
job_doc.status = models.BUILD_STATUS
job_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc)
ret_val, job_id = utils.db.save(database, job_doc)
job_doc.id = job_id
return ret_val, job_doc, job_id
def _get_build(meta, database):
"""Make a BuildDocument object and return it"""
bmeta, steps, artifacts = (meta[key] for key in [
"bmeta", "steps", "artifacts"
])
env, kernel, rev, build = (bmeta[key] for key in [
"environment", "kernel", "revision", "build"
])
doc = mbuild.BuildDocument(
rev["tree"],
rev["describe"],
kernel["defconfig"],
rev["branch"],
env["name"],
defconfig_full=kernel["defconfig_full"]
)
# Required fields
doc.arch = env["arch"]
doc.git_commit = rev["commit"]
doc.git_describe = rev["describe"]
doc.status = build["status"]
doc.git_url = rev["url"]
doc.file_server_resource = kernel["publish_path"]
doc.compiler_version_full = env["compiler_version_full"]
doc.compiler_version_ext = doc.compiler_version_full # ToDo: deprecate
# Optional fields
uname = env.get("platform", {}).get("uname")
if uname and len(uname) == 6 and not uname[5]:
uname[5] = steps[0]['cpus'].keys()[0]
doc.build_platform = uname or []
doc.build_time = build.get("duration")
doc.compiler = env.get("compiler")
doc.compiler_version = env.get("compiler_version")
doc.cross_compile = env.get("cross_compile")
doc.git_describe_v = rev.get("describe_verbose")
doc.text_offset = kernel.get("text_offset")
doc.vmlinux_bss_size = kernel.get("vmlinux_bss_size")
doc.vmlinux_data_size = kernel.get("vmlinux_data_size")
doc.vmlinux_file_size = kernel.get("vmlinux_file_size")
doc.vmlinux_text_size = kernel.get("vmlinux_text_size")
# Artifacts fields
def _find_artifacts(artifacts, step, key=None, artifact_type=None):
data = artifacts.get(step)
found = list()
if data:
for entry in data:
if key and entry.get("key") != key or \
artifact_type and entry.get("type") != artifact_type:
continue
found.append(entry)
return found
kernel_config = _find_artifacts(artifacts, 'config', 'config')
doc.kernel_config = kernel_config[0]['path'] if kernel_config else None
doc.kconfig_fragments = [
entry['path'] for entry in
_find_artifacts(artifacts, 'config', 'fragment')
]
kernel_images = _find_artifacts(artifacts, 'kernel', 'image')
doc.kernel_image = kernel_images[0]['path'] if kernel_images else None
system_map = _find_artifacts(artifacts, 'kernel', 'system_map')
doc.system_map = system_map[0]['path'] if system_map else None
modules = _find_artifacts(artifacts, 'modules', artifact_type='tarball')
doc.modules = modules[0]['path'] if modules else None
dtbs = _find_artifacts(artifacts, 'dtbs', artifact_type='directory')
doc.dtb_dir = 'dtbs' if dtbs else None
doc.dtb_dir_data = dtbs[0]['contents'] if dtbs else []
# Build log
log_artifacts = [
_find_artifacts(artifacts, step, 'log')
for step in ['kernel', 'modules']
]
doc.kernel_build_logs = [log[0]['path'] for log in log_artifacts if log]
doc.build_log = 'logs'
doc.errors = 0
doc.warnings = 0
# Constant fields
# FIXME: set in bmeta.json
doc.version = "1.1"
doc.build_type = "kernel"
# Unused fields
# FIXME: delete or make use of them if they're justified
doc.file_server_url = None
doc.kernel_image_size = None
doc.modules_size = None
doc.modules_dir = None
doc.kernel_version = None
return doc
def import_single_build(meta, db_options, base_path=utils.BASE_PATH):
"""Import a single build from the file system.
:param json_obj: The json object containing the necessary data.
:type json_obj: dictionary
:param db_options: The database connection options.
:type db_options: dictionary
:param base_path: The base path on the file system where to look for.
:type base_path: string
:return The build id, job id and errors
"""
build_id = None
job_id = None
database = utils.db.get_db_connection(db_options)
ret_val, job_doc, job_id = _get_or_create_job(meta, database, db_options)
if ret_val != 201:
return None, None, {500: ["Failed to create job document"]}
build_doc = _get_build(meta, database)
build_doc.job_id = job_doc.id
doc_id, c_date = _search_prev_build_doc(build_doc, database)
build_doc.id = doc_id
build_doc.created_on = c_date or datetime.datetime.now(tz=bson.tz_util.utc)
ret_val = _update_job_doc(
job_doc, job_id, job_doc.status, build_doc, database)
if ret_val != 201:
return None, None, {500: ["Failed to update job document"]}
ret_val, build_id = utils.db.save(database, build_doc)
if ret_val != 201:
return None, None, {500: ["Failed to save build document"]}
return build_id, job_id, {}
| kernelci/kernelci-backend | app/utils/build/__init__.py | Python | lgpl-2.1 | 12,705 |
# coding: utf-8
# Copyright (C) 2014 by Ronnie Sahlberg <[email protected]>
# Copyright (C) 2015 by Markus Rosjat <[email protected]>
# SPDX-FileCopyrightText: 2014 The python-scsi Authors
#
# SPDX-License-Identifier: LGPL-2.1-or-later
import unittest
from pyscsi.pyscsi import scsi_enum_inquiry as INQUIRY
from pyscsi.pyscsi.scsi_cdb_inquiry import Inquiry
from pyscsi.pyscsi.scsi_enum_command import sbc
from pyscsi.utils.converter import scsi_int_to_ba
from .mock_device import MockDevice, MockSCSI
class MockInquiryStandard(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x25 # QUAL:1 TYPE:5
cmd.datain[1] = 0x80 # RMB:1
cmd.datain[2] = 0x07 # VERSION:7
cmd.datain[3] = 0x23 # NORMACA:1 HISUP:0 RDF:3
cmd.datain[4] = 0x40 # ADDITIONAL LENGTH:64
cmd.datain[5] = 0xb9 # SCCS:1 ACC:0 TGPS:3 3PC:1 PROTECT:1
cmd.datain[6] = 0x71 # ENCSERV:1 VS:1 MULTIP:1 ADDR16:1
cmd.datain[7] = 0x33 # WBUS16:1 SYNC:1 CMDQUE:1 VS2:1
# t10 vendor id
cmd.datain[8:16] = bytearray(ord(c) for c in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])
# product id
cmd.datain[16:32] = bytearray(ord(c) for c in ['i', 'i', 'i', 'i', 'i', 'i', 'i', 'i',
'j', 'j', 'j', 'j', 'j', 'j', 'j', 'j'])
# product revision level
cmd.datain[32:36] = bytearray(ord(c) for c in ['r', 'e', 'v', 'n'])
cmd.datain[56] = 0x09 # CLOCKING:2 QAS:0 IUS:1
class MockLBP(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0xb2 # logical block provisioning
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x04 # page length == 4
cmd.datain[4] = 0x12 # threshold exponent
cmd.datain[5] = 0xe7 # LBPU:1 LBPWS:1 LBPWS10:1 LBPRZ:1 ANC_SUP:1 DP:1
cmd.datain[6] = 0x02 # Provisioning Type:2
cmd.datain[7] = 0x00 #
class MockUSN(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x80 # unit serial number
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x04 # page length == 4
cmd.datain[4:8] = "ABCD".encode()
class MockDevId(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x83 # device identifier
cmd.datain[2] = 0x00
cmd.datain[3] = 0x00
pos = 4
# Designation Descriptor: T10_VENDOR_ID
t10 = bytearray(8)
t10[0] = ord('T')
t10[1] = ord('e')
t10[2] = ord('s')
t10[3] = ord('t')
t10[4] = ord(' ')
t10[5] = ord('T')
t10[6] = ord('1')
t10[7] = ord('0')
dd = bytearray(4)
dd += t10
dd[0] = 0x52 # iSCSI, ASCII
dd[1] = 0xa1 # AssociatedWithTargetDevice, T10_VENDOR_ID
dd[3] = len(t10)
cmd.datain[pos:pos + len(dd)] = dd
pos += len(dd)
# Designation Descriptor: EUI-64, 8 byte version
eui = bytearray(8)
# IEEE company identifier
eui[0] = 0x11
eui[1] = 0x22
eui[2] = 0x33
# vendor specific
eui[3] = ord('a')
eui[4] = ord('b')
eui[5] = ord('c')
eui[6] = ord('d')
eui[7] = ord('e')
dd = bytearray(4)
dd += eui
dd[0] = 0x01 # BINARY
dd[1] = 0x22 # AssociatedWithTargetDevice, EUI-64
dd[2:4] = scsi_int_to_ba(len(t10), 2)
cmd.datain[pos:pos + len(dd)] = dd
pos += len(dd)
cmd.datain[2:4] = scsi_int_to_ba(pos - 4, 2)
class MockReferrals(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0xb3 # referrals
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x0c # page length: 12
cmd.datain[11] = 23
cmd.datain[15] = 37
class MockExtendedInquiry(MockDevice):
def execute(self, cmd):
cmd.datain[0] = 0x00 # QUAL:0 TYPE:0
cmd.datain[1] = 0x86 # extended inquiry
cmd.datain[2] = 0x00 #
cmd.datain[3] = 0x3c # page length: 60
cmd.datain[4] = 0x57 # activate microcode:1 spt:2 grd_chk:1
# app_chk:1 ref_chk:1
cmd.datain[5] = 0x33 # uask_sup:1 group_sup:1 prior_sup:0 headsup:0
# ordsup:1 simpsup:1
cmd.datain[6] = 0x05 # wu_sup:0 crd_sup:1 nv_sup:0 v_sup:1
cmd.datain[7] = 0x11 # p_i_i_sup:1 luiclr:1
cmd.datain[8] = 0x11 # r_sup:1 cbcs:1
cmd.datain[9] = 0x03 # multi...:3
cmd.datain[11] = 0x0f # extended...:15
cmd.datain[12] = 0xe0 # poa_sup:1 hra_sup:1 vsa_sup:1
cmd.datain[13] = 0x05 # maximum...:5
class UnmarshallInquiryTest(unittest.TestCase):
def test_main(self):
with MockSCSI(MockInquiryStandard(sbc)) as s:
cmd = s.inquiry()
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 1)
self.assertEqual(i['peripheral_device_type'], 5)
self.assertEqual(i['rmb'], 1)
self.assertEqual(i['version'], 7)
self.assertEqual(i['normaca'], 1)
self.assertEqual(i['hisup'], 0)
self.assertEqual(i['response_data_format'], 3)
self.assertEqual(i['additional_length'], 64)
self.assertEqual(i['sccs'], 1)
self.assertEqual(i['acc'], 0)
self.assertEqual(i['tpgs'], 3)
self.assertEqual(i['3pc'], 1)
self.assertEqual(i['protect'], 1)
self.assertEqual(i['encserv'], 1)
self.assertEqual(i['vs'], 1)
self.assertEqual(i['multip'], 1)
self.assertEqual(i['addr16'], 1)
self.assertEqual(i['wbus16'], 1)
self.assertEqual(i['sync'], 1)
self.assertEqual(i['cmdque'], 1)
self.assertEqual(i['vs2'], 1)
self.assertEqual(i['clocking'], 2)
self.assertEqual(i['qas'], 0)
self.assertEqual(i['ius'], 1)
self.assertEqual(i['t10_vendor_identification'].decode("utf-8"), 'abcdefgh')
self.assertEqual(i['product_identification'].decode("utf-8"), 'iiiiiiiijjjjjjjj')
self.assertEqual(i['product_revision_level'].decode("utf-8"), 'revn')
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i))
self.assertEqual(d, i)
with MockSCSI(MockLBP(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.LOGICAL_BLOCK_PROVISIONING)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['threshold_exponent'], 0x12)
self.assertEqual(i['lbpu'], 1)
self.assertEqual(i['lpbws'], 1)
self.assertEqual(i['lbpws10'], 1)
self.assertEqual(i['lbprz'], 1)
self.assertEqual(i['anc_sup'], 1)
self.assertEqual(i['dp'], 1)
self.assertEqual(i['provisioning_type'], INQUIRY.PROVISIONING_TYPE.THIN_PROVISIONED)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockUSN(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.UNIT_SERIAL_NUMBER)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['unit_serial_number'].decode("utf-8"), "ABCD")
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockReferrals(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.REFERRALS)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['user_data_segment_size'], 23)
self.assertEqual(i['user_data_segment_multiplier'], 37)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
with MockSCSI(MockExtendedInquiry(sbc)) as s:
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.EXTENDED_INQUIRY_DATA)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['activate_microcode'], 1)
self.assertEqual(i['spt'], 2)
self.assertEqual(i['grd_chk'], 1)
self.assertEqual(i['app_chk'], 1)
self.assertEqual(i['ref_chk'], 1)
self.assertEqual(i['uask_sup'], 1)
self.assertEqual(i['group_sup'], 1)
self.assertEqual(i['prior_sup'], 0)
self.assertEqual(i['headsup'], 0)
self.assertEqual(i['ordsup'], 1)
self.assertEqual(i['simpsup'], 1)
self.assertEqual(i['wu_sup'], 0)
self.assertEqual(i['crd_sup'], 1)
self.assertEqual(i['nv_sup'], 0)
self.assertEqual(i['v_sup'], 1)
self.assertEqual(i['p_i_i_sup'], 1)
self.assertEqual(i['luiclr'], 1)
self.assertEqual(i['r_sup'], 1)
self.assertEqual(i['cbcs'], 1)
self.assertEqual(i['multi_it_nexus_microcode_download'], 3)
self.assertEqual(i['extended_self_test_completion_minutes'], 15)
self.assertEqual(i['poa_sup'], 1)
self.assertEqual(i['hra_sup'], 1)
self.assertEqual(i['vsa_sup'], 1)
self.assertEqual(i['maximum_supported_sense_data_length'], 5)
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
s.device = MockDevId(sbc)
cmd = s.inquiry(evpd=1, page_code=INQUIRY.VPD.DEVICE_IDENTIFICATION)
i = cmd.result
self.assertEqual(i['peripheral_qualifier'], 0)
self.assertEqual(i['peripheral_qualifier'], 0)
dd = i['designator_descriptors']
self.assertEqual(len(dd), 2)
# T10 designation descriptor
self.assertEqual(dd[0]['association'], 2)
self.assertEqual(dd[0]['code_set'], 2)
self.assertEqual(dd[0]['designator_length'], 8)
self.assertEqual(dd[0]['designator_type'], 1)
self.assertEqual(dd[0]['piv'], 1)
self.assertEqual(dd[0]['protocol_identifier'], 5)
self.assertEqual(dd[0]['designator']['t10_vendor_id'].decode("utf-8"), 'Test T10')
self.assertEqual(dd[0]['designator']['vendor_specific_id'].decode("utf-8"), '')
# EUI-64 designation descriptor
self.assertEqual(dd[1]['association'], 2)
self.assertEqual(dd[1]['code_set'], 1)
self.assertEqual(dd[1]['designator_length'], 8)
self.assertEqual(dd[1]['designator_type'], 2)
self.assertEqual(dd[1]['piv'], 0)
self.assertFalse(hasattr(dd[1], 'protocol_identifier'))
self.assertEqual(dd[1]['designator']['ieee_company_id'], 0x112233)
self.assertEqual(dd[1]['designator']['vendor_specific_extension_id'].decode("utf-8"), 'abcde')
d = Inquiry.unmarshall_datain(Inquiry.marshall_datain(i), evpd=1)
self.assertEqual(d, i)
| rosjat/python-scsi | tests/test_unmarshall_inquiry.py | Python | lgpl-2.1 | 11,441 |
# -*- coding: utf-8 -*-
"""
irc/server.py
Copyright © 2009 Ferry Boender
Copyright © 2012 Jason R. Coombs
This server has basic support for:
* Connecting
* Channels
* Nicknames
* Public/private messages
It is MISSING support for notably:
* Server linking
* Modes (user and channel)
* Proper error reporting
* Basically everything else
It is mostly useful as a testing tool or perhaps for building something like a
private proxy on. Do NOT use it in any kind of production code or anything that
will ever be connected to by the public.
"""
#
# Very simple hacky ugly IRC server.
#
# Todo:
# - Encode format for each message and reply with events.codes['needmoreparams']
# - starting server when already started doesn't work properly. PID file is not changed, no error messsage is displayed.
# - Delete channel if last user leaves.
# - [ERROR] <socket.error instance at 0x7f9f203dfb90> (better error msg required)
# - Empty channels are left behind
# - No Op assigned when new channel is created.
# - User can /join multiple times (doesn't add more to channel, does say 'joined')
# - PING timeouts
# - Allow all numerical commands.
# - Users can send commands to channels they are not in (PART)
# Not Todo (Won't be supported)
# - Server linking.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function, absolute_import
import argparse
import logging
import socket
import select
import re
from . import client
from . import _py2_compat
from . import logging as log_util
from . import events
from . import buffer
SRV_WELCOME = "Welcome to %s v%s, the ugliest IRC server in the world." % (
__name__, client.VERSION)
log = logging.getLogger(__name__)
class IRCError(Exception):
"""
Exception thrown by IRC command handlers to notify client of a server/client error.
"""
def __init__(self, code, value):
self.code = code
self.value = value
def __str__(self):
return repr(self.value)
@classmethod
def from_name(cls, name, value):
return cls(events.codes[name], value)
class IRCChannel(object):
"""
Object representing an IRC channel.
"""
def __init__(self, name, topic='No topic'):
self.name = name
self.topic_by = 'Unknown'
self.topic = topic
self.clients = set()
class IRCClient(_py2_compat.socketserver.BaseRequestHandler):
"""
IRC client connect and command handling. Client connection is handled by
the `handle` method which sets up a two-way communication with the client.
It then handles commands sent by the client by dispatching them to the
handle_ methods.
"""
class Disconnect(BaseException): pass
def __init__(self, request, client_address, server):
self.user = None
self.host = client_address # Client's hostname / ip.
self.realname = None # Client's real name
self.nick = None # Client's currently registered nickname
self.send_queue = [] # Messages to send to client (strings)
self.channels = {} # Channels the client is in
_py2_compat.socketserver.BaseRequestHandler.__init__(self, request,
client_address, server)
def handle(self):
log.info('Client connected: %s', self.client_ident())
self.buffer = buffer.LineBuffer()
try:
while True:
self._handle_one()
except self.Disconnect:
self.request.close()
def _handle_one(self):
"""
Handle one read/write cycle.
"""
ready_to_read, ready_to_write, in_error = select.select(
[self.request], [self.request], [self.request], 0.1)
if in_error:
raise self.Disconnect()
# Write any commands to the client
while self.send_queue and ready_to_write:
msg = self.send_queue.pop(0)
self._send(msg)
# See if the client has any commands for us.
if ready_to_read:
self._handle_incoming()
def _handle_incoming(self):
try:
data = self.request.recv(1024)
except Exception:
raise self.Disconnect()
if not data:
raise self.Disconnect()
self.buffer.feed(data)
for line in self.buffer:
self._handle_line(line)
def _handle_line(self, line):
try:
log.debug('from %s: %s' % (self.client_ident(), line))
command, sep, params = line.partition(' ')
handler = getattr(self, 'handle_%s' % command.lower(), None)
if not handler:
log.info('No handler for command: %s. '
'Full line: %s' % (command, line))
raise IRCError.from_name('unknowncommand',
'%s :Unknown command' % command)
response = handler(params)
except AttributeError as e:
log.error(_py2_compat.str(e))
raise
except IRCError as e:
response = ':%s %s %s' % (self.server.servername, e.code, e.value)
log.error(response)
except Exception as e:
response = ':%s ERROR %r' % (self.server.servername, e)
log.error(response)
raise
if response:
self._send(response)
def _send(self, msg):
log.debug('to %s: %s', self.client_ident(), msg)
self.request.send(msg + '\r\n')
def handle_nick(self, params):
"""
Handle the initial setting of the user's nickname and nick changes.
"""
nick = params
# Valid nickname?
if re.search('[^a-zA-Z0-9\-\[\]\'`^{}_]', nick):
raise IRCError.from_name('erroneusnickname', ':%s' % nick)
if self.server.clients.get(nick, None) == self:
# Already registered to user
return
if nick in self.server.clients:
# Someone else is using the nick
raise IRCError.from_name('nicknameinuse', 'NICK :%s' % (nick))
if not self.nick:
# New connection and nick is available; register and send welcome
# and MOTD.
self.nick = nick
self.server.clients[nick] = self
response = ':%s %s %s :%s' % (self.server.servername,
events.codes['welcome'], self.nick, SRV_WELCOME)
self.send_queue.append(response)
response = ':%s 376 %s :End of MOTD command.' % (
self.server.servername, self.nick)
self.send_queue.append(response)
return
# Nick is available. Change the nick.
message = ':%s NICK :%s' % (self.client_ident(), nick)
self.server.clients.pop(self.nick)
self.nick = nick
self.server.clients[self.nick] = self
# Send a notification of the nick change to all the clients in the
# channels the client is in.
for channel in self.channels.values():
self._send_to_others(message, channel)
# Send a notification of the nick change to the client itself
return message
def handle_user(self, params):
"""
Handle the USER command which identifies the user to the server.
"""
params = params.split(' ', 3)
if len(params) != 4:
raise IRCError.from_name('needmoreparams',
'USER :Not enough parameters')
user, mode, unused, realname = params
self.user = user
self.mode = mode
self.realname = realname
return ''
def handle_ping(self, params):
"""
Handle client PING requests to keep the connection alive.
"""
response = ':%s PONG :%s' % (self.server.servername, self.server.servername)
return response
def handle_join(self, params):
"""
Handle the JOINing of a user to a channel. Valid channel names start
with a # and consist of a-z, A-Z, 0-9 and/or '_'.
"""
channel_names = params.split(' ', 1)[0] # Ignore keys
for channel_name in channel_names.split(','):
r_channel_name = channel_name.strip()
# Valid channel name?
if not re.match('^#([a-zA-Z0-9_])+$', r_channel_name):
raise IRCError.from_name('nosuchchannel',
'%s :No such channel' % r_channel_name)
# Add user to the channel (create new channel if not exists)
channel = self.server.channels.setdefault(r_channel_name, IRCChannel(r_channel_name))
channel.clients.add(self)
# Add channel to user's channel list
self.channels[channel.name] = channel
# Send the topic
response_join = ':%s TOPIC %s :%s' % (channel.topic_by, channel.name, channel.topic)
self.send_queue.append(response_join)
# Send join message to everybody in the channel, including yourself and
# send user list of the channel back to the user.
response_join = ':%s JOIN :%s' % (self.client_ident(), r_channel_name)
for client in channel.clients:
client.send_queue.append(response_join)
nicks = [client.nick for client in channel.clients]
response_userlist = ':%s 353 %s = %s :%s' % (self.server.servername, self.nick, channel.name, ' '.join(nicks))
self.send_queue.append(response_userlist)
response = ':%s 366 %s %s :End of /NAMES list' % (self.server.servername, self.nick, channel.name)
self.send_queue.append(response)
def handle_privmsg(self, params):
"""
Handle sending a private message to a user or channel.
"""
target, sep, msg = params.partition(' ')
if not msg:
raise IRCError.from_name('needmoreparams',
'PRIVMSG :Not enough parameters')
message = ':%s PRIVMSG %s %s' % (self.client_ident(), target, msg)
if target.startswith('#') or target.startswith('$'):
# Message to channel. Check if the channel exists.
channel = self.server.channels.get(target)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
self._send_to_others(message, channel)
else:
# Message to user
client = self.server.clients.get(target, None)
if not client:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % target)
client.send_queue.append(message)
def _send_to_others(self, message, channel):
"""
Send the message to all clients in the specified channel except for
self.
"""
other_clients = [client for client in channel.clients
if not client == self]
for client in other_clients:
client.send_queue.append(message)
def handle_topic(self, params):
"""
Handle a topic command.
"""
channel_name, sep, topic = params.partition(' ')
channel = self.server.channels.get(channel_name)
if not channel:
raise IRCError.from_name('nosuchnick', 'PRIVMSG :%s' % channel_name)
if not channel.name in self.channels:
# The user isn't in the channel.
raise IRCError.from_name('cannotsendtochan',
'%s :Cannot send to channel' % channel.name)
if topic:
channel.topic = topic.lstrip(':')
channel.topic_by = self.nick
message = ':%s TOPIC %s :%s' % (self.client_ident(), channel_name,
channel.topic)
return message
def handle_part(self, params):
"""
Handle a client parting from channel(s).
"""
for pchannel in params.split(','):
if pchannel.strip() in self.server.channels:
# Send message to all clients in all channels user is in, and
# remove the user from the channels.
channel = self.server.channels.get(pchannel.strip())
response = ':%s PART :%s' % (self.client_ident(), pchannel)
if channel:
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.channels.pop(pchannel)
else:
response = ':%s 403 %s :%s' % (self.server.servername, pchannel, pchannel)
self.send_queue.append(response)
def handle_quit(self, params):
"""
Handle the client breaking off the connection with a QUIT command.
"""
response = ':%s QUIT :%s' % (self.client_ident(), params.lstrip(':'))
# Send quit message to all clients in all channels user is in, and
# remove the user from the channels.
for channel in self.channels.values():
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
def handle_dump(self, params):
"""
Dump internal server information for debugging purposes.
"""
print("Clients:", self.server.clients)
for client in self.server.clients.values():
print(" ", client)
for channel in client.channels.values():
print(" ", channel.name)
print("Channels:", self.server.channels)
for channel in self.server.channels.values():
print(" ", channel.name, channel)
for client in channel.clients:
print(" ", client.nick, client)
def client_ident(self):
"""
Return the client identifier as included in many command replies.
"""
return client.NickMask.from_params(self.nick, self.user,
self.server.servername)
def finish(self):
"""
The client conection is finished. Do some cleanup to ensure that the
client doesn't linger around in any channel or the client list, in case
the client didn't properly close the connection with PART and QUIT.
"""
log.info('Client disconnected: %s', self.client_ident())
response = ':%s QUIT :EOF from client' % self.client_ident()
for channel in self.channels.values():
if self in channel.clients:
# Client is gone without properly QUITing or PARTing this
# channel.
for client in channel.clients:
client.send_queue.append(response)
channel.clients.remove(self)
self.server.clients.pop(self.nick)
log.info('Connection finished: %s', self.client_ident())
def __repr__(self):
"""
Return a user-readable description of the client
"""
return '<%s %s!%s@%s (%s)>' % (
self.__class__.__name__,
self.nick,
self.user,
self.host[0],
self.realname,
)
class IRCServer(_py2_compat.socketserver.ThreadingMixIn,
_py2_compat.socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
channels = {}
"Existing channels (IRCChannel instances) by channel name"
clients = {}
"Connected clients (IRCClient instances) by nick name"
def __init__(self, *args, **kwargs):
self.servername = 'localhost'
self.channels = {}
self.clients = {}
_py2_compat.socketserver.TCPServer.__init__(self, *args, **kwargs)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--address", dest="listen_address",
default='127.0.0.1', help="IP on which to listen")
parser.add_argument("-p", "--port", dest="listen_port", default=6667,
type=int, help="Port on which to listen")
log_util.add_arguments(parser)
return parser.parse_args()
def main():
options = get_args()
log_util.setup(options)
log.info("Starting irc.server")
#
# Start server
#
try:
bind_address = options.listen_address, options.listen_port
ircserver = IRCServer(bind_address, IRCClient)
log.info('Listening on {listen_address}:{listen_port}'.format(
**vars(options)))
ircserver.serve_forever()
except socket.error as e:
log.error(repr(e))
raise SystemExit(-2)
if __name__ == "__main__":
main()
| sim0629/irc | irc/server.py | Python | lgpl-2.1 | 17,734 |
########################################################################
#
# University of Southampton IT Innovation Centre, 2011
#
# Copyright in this library belongs to the University of Southampton
# University Road, Highfield, Southampton, UK, SO17 1BJ
#
# This software may not be used, sold, licensed, transferred, copied
# or reproduced in whole or in part in any manner or form or in or
# on any media by any person other than in accordance with the terms
# of the Licence Agreement supplied with the software, or otherwise
# without the prior written consent of the copyright owners.
#
# This software is distributed WITHOUT ANY WARRANTY, without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE, except where stated in the Licence Agreement supplied with
# the software.
#
# Created By : Mark McArdle
# Created Date : 2011-03-25
# Created for Project : PrestoPrime
#
########################################################################
import os.path
from celery.task import task
from celery.task.sets import subtask
from django.core.files import File
import logging
import subprocess
import string
import shutil
import pycurl
import tempfile
import re
import pycurl
import sys
class Storage:
def __init__(self):
self.contents = []
def store(self, buf):
self.contents.append(buf) #= "%s%i: %s" % (self.contents, self.line, buf)
def __str__(self):
return ", ".join(self.contents)
@task
def copyfromurl(inputs,outputs,options={},callbacks=[]):
url = options["url"]
logging.info(url)
tfile = tempfile.NamedTemporaryFile('wb',delete=False)
retrieved_headers = Storage()
f = open(tfile.name,'w')
c = pycurl.Curl()
c.setopt(c.URL, str(url))
c.setopt(pycurl.FOLLOWLOCATION, 1)
c.setopt(c.WRITEFUNCTION, f.write)
c.setopt(c.HEADERFUNCTION, retrieved_headers.store)
c.perform()
status = c.getinfo(c.HTTP_CODE)
c.close()
f.close()
logging.debug(retrieved_headers)
filename = "Imported File"
for header in retrieved_headers.contents:
if header.lower().startswith("content-disposition"):
filename = re.match(".*filename=(?P<filename>.*)", header).group('filename')
if status > 400:
logging.warn("Copy From URL %s return error status code '%s' " % (url, status))
return { "message" : "Copy from url failed error_code '%s'" % status }
else:
mfileid = inputs[0]
from dataservice.models import MFile
mfile = MFile.objects.get(id=mfileid)
filename = mfile.service.get_unique_name(filename)
mfile.update_mfile(filename, file=File(open(tfile.name, 'r')))
mfile.save()
for callback in callbacks:
subtask(callback).delay()
return { "message" : "Copy from url was successful"}
# Blender Command Line API
#
# Render a Picture
# blender -b file.blend -o //file -F JPEG -x 1 -f 1
#
# Render a movie
# blender -b file.blend -x 1 -o //file -F MOVIE -s 003 -e 005 -a
#
# Render a Series
# blender -b file.blend -x 1 -o //file -F "PNG" -s ss -e ee -a
@task
def render_blender(inputs,outputs,options={},callbacks=[]):
padding = 4
frame = options["frame"]
if options.has_key("fname"):
fname = options["format"]
else:
fname="image"
if options.has_key("format"):
format = options["format"]
else:
format="PNG"
mfileid = inputs[0]
from dataservice.models import MFile
mf = MFile.objects.get(id=mfileid)
inputfile = mf.file.path
outputfile = outputs[0]
logging.info("Processing render job %s frame: %s " % (inputfile,frame))
if not os.path.exists(inputfile):
logging.info("Scene %s does not exist" % inputfile)
return False
[outputdir,ffff]= os.path.split(outputfile)
hashes = "#" * padding
outputformat = "%s/%s.%s" % (outputdir,fname,hashes)
ss= string.zfill(str(frame), padding)
args = ["blender","-b",inputfile,"-x","1","-o",outputformat,"-F",format.upper(),"-s",ss,"-e",ss,"-a"]
logging.info(args)
n = str(frame).zfill(padding)
resultfile = os.path.join(outputdir,"%s.%s.%s"%(fname,n,format.lower()))
ret = subprocess.call(args)
if resultfile != outputfile:
logging.debug("result file %s is not outputfile %s ... Moving" % (resultfile, outputfile))
shutil.move(resultfile, outputfile)
for callback in callbacks:
subtask(callback).delay()
return ret
| mmcardle/MServe | django-mserve/jobservice/tasks.py | Python | lgpl-2.1 | 4,498 |
#!/usr/bin/env python
"""
setup.py file for augeas
"""
import os
prefix = os.environ.get("prefix", "/usr")
from distutils.core import setup
setup (name = 'python-augeas',
version = '0.3.0',
author = "Harald Hoyer",
author_email = "[email protected]",
description = """Python bindings for Augeas""",
py_modules = [ "augeas" ],
url = "http://augeas.net/",
)
| giraldeau/python-augeas | setup.py | Python | lgpl-2.1 | 420 |
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/Users/igrokhotkov/projects/esp8266/esptools/crosstool-NG/builds/xtensa-lx106-elf/share/gcc-4.8.2/python'
libdir = '/Users/igrokhotkov/projects/esp8266/esptools/crosstool-NG/builds/xtensa-lx106-elf/xtensa-lx106-elf/lib'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
| toddtreece/esp8266-Arduino | tools/macosx/xtensa-lx106-elf/xtensa-lx106-elf/sysroot/lib/libstdc++.a-gdb.py | Python | lgpl-2.1 | 2,463 |
from configparser import RawConfigParser
import os
import re
from subprocess import call, check_call, Popen, PIPE, STDOUT
import sys
from errors import Errors
from general import (
exists_and_is_directory, shellquote, print_stderr
)
from githelpers import has_objects_and_refs
class OptionFrom:
'''enum-like values to indicate the source of different options, used in
directory_to_backup_from, git_directory_from and branch_from'''
COMMAND_LINE = 1
CONFIGURATION_FILE = 2
DEFAULT_VALUE = 3
string_versions = { COMMAND_LINE : "command line",
CONFIGURATION_FILE : "configuration file",
DEFAULT_VALUE : "default value" }
class GibSetup:
def __init__(self, command_line_options):
self.configuration_file = '.gib.conf'
self.directory_to_backup = None
self.directory_to_backup_from = None
self.git_directory = None
self.git_directory_from = None
self.branch = None
self.branch_from = None
if command_line_options.directory:
self.directory_to_backup = command_line_options.directory
self.directory_to_backup_from = OptionFrom.COMMAND_LINE
else:
if 'HOME' not in os.environ:
# Then we can't use HOME as default directory:
print_stderr("The HOME environment variable was not set")
sys.exit(Errors.STRANGE_ENVIRONMENT)
self.directory_to_backup = os.environ['HOME']
self.directory_to_backup_from = OptionFrom.DEFAULT_VALUE
# We need to make sure that this is an absolute path before
# changing directory:
self.directory_to_backup = os.path.abspath(self.directory_to_backup)
if not exists_and_is_directory(self.directory_to_backup):
sys.exit(Errors.DIRECTORY_TO_BACKUP_MISSING)
# Now we know the directory that we're backing up, try to load the
# config file:
configuration = RawConfigParser()
configuration.read(os.path.join(self.directory_to_backup,
self.configuration_file))
# Now set the git directory:
if command_line_options.git_directory:
self.git_directory = command_line_options.git_directory
self.git_directory_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','git_directory'):
self.git_directory = configuration.get(
'repository','git_directory'
)
self.git_directory_from = OptionFrom.CONFIGURATION_FILE
else:
self.git_directory = os.path.join(self.directory_to_backup,'.git')
self.git_directory_from = OptionFrom.DEFAULT_VALUE
if not os.path.isabs(self.git_directory):
print_stderr("The git directory must be an absolute path.")
sys.exit(Errors.GIT_DIRECTORY_RELATIVE)
# And finally the branch:
if command_line_options.branch:
self.branch = command_line_options.branch
self.branch_from = OptionFrom.COMMAND_LINE
elif configuration.has_option('repository','branch'):
self.branch = configuration.get('repository','branch')
self.branch_from = OptionFrom.CONFIGURATION_FILE
else:
self.branch = 'master'
self.branch_from = OptionFrom.DEFAULT_VALUE
# Check that the git_directory ends in '.git':
if not re.search('\.git/*$',self.git_directory):
message = "The git directory ({}) did not end in '.git'"
print_stderr(message.format(self.git_directory))
sys.exit(Errors.BAD_GIT_DIRECTORY)
# Also check that it actually exists:
if not os.path.exists(self.git_directory):
message = "The git directory '{}' does not exist."
print_stderr(message.format(self.git_directory))
sys.exit(Errors.GIT_DIRECTORY_MISSING)
def get_directory_to_backup(self):
return self.directory_to_backup
def get_git_directory(self):
return self.git_directory
def get_file_list_directory(self):
return os.path.join(
self.get_git_directory(),
'file-lists'
)
def get_branch(self):
return self.branch
def print_settings(self):
print_stderr('''Settings for backup:
backing up the directory {} (set from the {})
... to the branch "{}" (set from the {})
... in the git repository {} (set from the {})'''.format(
self.directory_to_backup,
OptionFrom.string_versions[self.directory_to_backup_from],
self.branch,
OptionFrom.string_versions[self.branch_from],
self.git_directory,
OptionFrom.string_versions[self.git_directory_from]),
)
def get_invocation(self):
'''Return an invocation that would run the script with options
that will set directory_to_backup, git_directory and branch as on
this invocation. After init has been called, we can just specify
the directory to backup, since the configuration file .gib.conf in
that directory will store the git_directory and the branch. If
the directory to backup is just the current user's home directory,
then that doesn't need to be specified either.'''
invocation = sys.argv[0]
if self.directory_to_backup != os.environ['HOME']:
invocation += " " + "--directory="
invocation += shellquote(self.directory_to_backup)
return invocation
def git(self,rest_of_command):
'''Create an list (suitable for passing to subprocess.call or
subprocess.check_call) which runs a git command with the correct
git directory and work tree'''
return [ "git",
"--git-dir="+self.git_directory,
"--work-tree="+self.directory_to_backup ] + rest_of_command
def git_for_shell(self):
'''Returns a string with shell-safe invocation of git which can be used
in calls that are subject to shell interpretation.'''
command = "git --git-dir="+shellquote(self.git_directory)
command += " --work-tree="+shellquote(self.directory_to_backup)
return command
def git_initialized(self):
'''Returns True if it seems as if the git directory has already
been intialized, and returns False otherwise'''
return has_objects_and_refs(self.git_directory)
def abort_if_not_initialized(self):
'''Check that the git repository exists and exit otherwise'''
if not self.git_initialized():
message = "You don't seem to have initialized {} for backup."
print_stderr(message.format(self.directory_to_backup))
message = "Please use '{} init' to initialize it"
print_stderr(message.format(self.get_invocation()))
sys.exit(Errors.REPOSITORY_NOT_INITIALIZED)
def check_ref(self,ref):
'''Returns True if a ref can be resolved to a commit and False
otherwise.'''
return 0 == call(
self.git(["rev-parse","--verify",ref]),
stdout=open('/dev/null','w'),
stderr=STDOUT
)
def check_tree(self,tree):
'''Returns True if 'tree' can be understood as a tree, e.g. with
"git ls-tree" or false otherwise'''
with open('/dev/null','w') as null:
return 0 == call(
self.git(["ls-tree",tree]),
stdout=null,
stderr=STDOUT
)
def set_HEAD_to(self,ref):
'''Update head to point to a particular branch, without touching
the index or the working tree'''
check_call(
self.git(["symbolic-ref","HEAD","refs/heads/{}".format(ref)])
)
def currently_on_correct_branch(self):
'''Return True if HEAD currently points to 'self.branch', and
return False otherwise.'''
p = Popen(self.git(["symbolic-ref","HEAD"]),stdout=PIPE)
c = p.communicate()
if 0 != p.returncode:
print_stderr("Finding what HEAD points to failed")
sys.exit(Errors.FINDING_HEAD)
result = c[0].decode().strip()
if self.branch == result:
return True
elif ("refs/heads/"+self.branch) == result:
return True
else:
return False
def switch_to_correct_branch(self):
self.set_HEAD_to(self.branch)
self.abort_unless_HEAD_exists()
# Also reset the index to match HEAD. Otherwise things go
# horribly wrong when switching from backing up one computer to
# another, since the index is still that from the first one.
msg = "Now working on a new branch, so resetting the index to match..."
print_stderr(msg)
check_call(self.git(["read-tree","HEAD"]))
def config_value(self,key):
'''Retrieve the git config value for "key", or return
None if it is not defined'''
p = Popen(self.git(["config",key]),stdout=PIPE)
c = p.communicate()
if 0 == p.returncode:
# Then check that the option is right:
return c[0].decode().strip()
else:
return None
def set_config_value(self,key,value):
check_call(self.git(["config",key,value]))
def unset_config_value(self,key):
call(self.git(["config","--unset",key]))
def abort_unless_particular_config(self,key,required_value):
'''Unless the git config has "required_value" set for "key", exit.'''
current_value = self.config_value(key)
if current_value:
if current_value != required_value:
message = "The current value for {} is {}, should be: {}"
print_stderr(message.format(
key,
current_value,
required_value
))
sys.exit(Errors.GIT_CONFIG_ERROR)
else:
message = "The {} config option was not set, setting to {}"
print_stderr(message.format(key,required_value))
self.set_config_value(key,required_value)
def abort_unless_no_auto_gc(self):
'''Exit unless git config has gc.auto set to "0"'''
self.abort_unless_particular_config("gc.auto","0")
def abort_unless_HEAD_exists(self):
if not self.check_ref("HEAD"):
message = '''The branch you are trying to back up to does not exist.
(Perhaps you haven't run "{} init")'''
print_stderr(message.format(self.get_invocation()))
sys.exit(Errors.NO_SUCH_BRANCH)
| mhl/gib | gitsetup.py | Python | lgpl-2.1 | 10,749 |
#!/usr/bin/python3
#
# examples/xfixes-selection-notify.py -- demonstrate the XFIXES extension
# SelectionNotify event.
#
# Copyright (C) 2019
# Tony Crisci <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python 2/3 compatibility.
from __future__ import print_function
import sys
import os
import time
# Change path so we find Xlib
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Xlib.display import Display
from Xlib.ext import xfixes
def main(argv):
if len(sys.argv) != 2:
sys.exit('usage: {0} SELECTION\n\n'
'SELECTION is typically PRIMARY, SECONDARY or CLIPBOARD.\n'
.format(sys.argv[0]))
display = Display()
sel_name = sys.argv[1]
sel_atom = display.get_atom(sel_name)
if not display.has_extension('XFIXES'):
if display.query_extension('XFIXES') is None:
print('XFIXES extension not supported', file=sys.stderr)
return 1
xfixes_version = display.xfixes_query_version()
print('Found XFIXES version %s.%s' % (
xfixes_version.major_version,
xfixes_version.minor_version,
), file=sys.stderr)
screen = display.screen()
mask = xfixes.XFixesSetSelectionOwnerNotifyMask | \
xfixes.XFixesSelectionWindowDestroyNotifyMask | \
xfixes.XFixesSelectionClientCloseNotifyMask
display.xfixes_select_selection_input(screen.root, sel_atom, mask)
while True:
e = display.next_event()
print(e)
if (e.type, e.sub_code) == display.extension_event.SetSelectionOwnerNotify:
print('SetSelectionOwner: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionWindowDestroyNotify:
print('SelectionWindowDestroy: owner=0x{0:08x}'.format(e.owner.id))
elif (e.type, e.sub_code) == display.extension_event.SelectionClientCloseNotify:
print('SelectionClientClose: owner=0x{0:08x}'.format(e.owner.id))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| python-xlib/python-xlib | examples/xfixes-selection-notify.py | Python | lgpl-2.1 | 2,764 |
from sys import argv
import logging
from MonkeyScraper import MonkeyScraper
LOG_FILENAME = 'MonkeyScraper.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def main(username, password, survey_url):
"""
Creates a MonkeyScraper, logs in, and scrapes the survey at the provided url
:param username: str: surveymonkey username
:param password: str: surveymonkey password
:param survey_url: str: the "analyze" page url for your survey
:return:
"""
# scraper = MonkeyScraper()
# scraper.init()
# scraper.log_in(username=username, password=password)
# scraper.scrape(survey_url)
# scraper.log_out()
# scraper.close()
with MonkeyScraper(username=username, password=password) as scraper:
survey = scraper.scrape(survey_url)
if __name__ == '__main__':
main(*argv[1:]) | cacampbell/MonkeyScraper | monkey_scraper.py | Python | lgpl-3.0 | 850 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test suite for language_check."""
from __future__ import unicode_literals
import unittest
import warnings
from collections import namedtuple
import language_check
class TestLanguageTool(unittest.TestCase):
CheckTest = namedtuple('CheckTest', ('text', 'matches'))
Match = namedtuple('Match', ('fromy', 'fromx', 'ruleId'))
check_tests = {
'en': [
CheckTest(
('Paste your own text here... or check this text too see '
'a few of the problems that that LanguageTool can detect. '
'Did you notice that their is no spelcheckin included?'),
[
Match(0, 47, 'TOO_TO'),
Match(0, 132, 'THEIR_IS'),
]
),
],
'fr': [
CheckTest(
('Se texte est un exemple pour pour vous montrer '
'le fonctionnement de LanguageTool. '
'notez que LanguageTool ne comporte pas '
'de correcteur orthographique.'),
[
Match(0, 0, 'SE_CE'),
Match(0, 3, 'TE_NV'),
Match(0, 24, 'FRENCH_WORD_REPEAT_RULE'),
Match(0, 82, 'UPPERCASE_SENTENCE_START'),
]
),
CheckTest(
'je me rappelle de tout sans aucun soucis!',
[
Match(0, 0, 'UPPERCASE_SENTENCE_START'),
Match(0, 6, 'RAPPELER_DE'),
Match(0, 28, 'ACCORD_NOMBRE'),
Match(0, 34, 'FRENCH_WHITESPACE'),
]
),
],
}
correct_tests = {
'en-US': {
'that would of been to impressive.':
'That would have been too impressive.',
},
'fr': {
'il monte en haut si il veut.':
'Il monte s’il veut.',
},
}
def test_check(self):
lang_check = language_check.LanguageTool()
for language, tests in self.check_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, expected_matches in tests:
matches = lang_check.check(text)
for expected_match in expected_matches:
for match in matches:
if (
(match.fromy, match.fromx, match.ruleId) ==
(expected_match.fromy, expected_match.fromx,
expected_match.ruleId)
):
break
else:
raise IndexError(
'can’t find {!r}'.format(expected_match))
def test_correct(self):
lang_check = language_check.LanguageTool()
for language, tests in self.correct_tests.items():
try:
lang_check.language = language
except ValueError:
version = language_check.get_version()
warnings.warn(
'LanguageTool {} doesn’t support language {!r}'
.format(version, language)
)
for text, result in tests.items():
self.assertEqual(lang_check.correct(text), result)
def test_languages(self):
self.assertIn('en', language_check.get_languages())
def test_version(self):
self.assertTrue(language_check.get_version())
def test_get_build_date(self):
self.assertTrue(language_check.get_build_date())
def test_get_directory(self):
path = language_check.get_directory()
language_check.set_directory(path)
self.assertEqual(path, language_check.get_directory())
def test_disable_spellcheck(self):
sentence_with_misspelling = 'This is baad.'
lang_check = language_check.LanguageTool()
self.assertTrue(lang_check.check(sentence_with_misspelling))
lang_check.disable_spellchecking()
self.assertFalse(lang_check.check(sentence_with_misspelling))
lang_check.enable_spellchecking()
self.assertTrue(lang_check.check(sentence_with_misspelling))
def test_README_with_unicode(self):
tool = language_check.LanguageTool('en-US')
text = ('A sentence with a error in the '
'Hitchhiker’s Guide tot he Galaxy')
matches = tool.check(text)
self.assertEqual(len(matches), 2)
self.assertEqual((matches[0].fromy, matches[0].fromx),
(0, 16))
self.assertEqual((matches[0].ruleId, matches[0].replacements),
('EN_A_VS_AN', ['an']))
self.assertEqual((matches[1].fromy, matches[1].fromx),
(0, 50))
self.assertEqual((matches[1].ruleId, matches[1].replacements),
('TOT_HE', ['to the']))
corrected = language_check.correct(text, matches)
self.assertEqual(corrected, 'A sentence with an error in the '
'Hitchhiker’s Guide to the Galaxy')
if __name__ == '__main__':
unittest.main()
| myint/language-check | test.py | Python | lgpl-3.0 | 5,483 |
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Web Editor Utils
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
| muk-it/muk_web | muk_web_editor_utils/__init__.py | Python | lgpl-3.0 | 979 |
# -*- coding: utf-8 -*-
# 2016-10-20T16:00+08:00
import fnmatch
import glob
import itertools
import os
import re
import subprocess
import sys
import fileutil
# Match version numbers of these formats:
# 1.2.3
# 1.2.3.4
version_number_re = r'([0-9]+(?:.[0-9]+){2,3})'
# Match version numbers of this format:
incomplete_version_number_re = r'^[0-9]+(?:.[0-9]+){2}$'
# Match a line, in Changelog.txt, which contains a valid version number
version_line_re = r'^### v{0}.*$'.format(version_number_re)
def get_topmost_version_line(changelog_file):
with open(changelog_file, 'r', encoding=fileutil.get_file_encoding(changelog_file, 'utf-8')) as fp:
return next(filter(lambda line: re.match(version_line_re, line), fp.readlines()))
def get_version_number(version_line):
match_res = re.match(version_line_re, version_line)
assert match_res, 'Invalid version line'
if match_res:
return match_res.groups()[0]
# 1.2.3 -> 1.2.3.0
def canonicalize_version_number(version_number):
assert re.match(r'^{0}$'.format(version_number_re), version_number), 'Invalid version number format(neither x.x.x nor x.x.x.x)'
if re.match(incomplete_version_number_re, version_number):
version_number += '.0'
return version_number
def perror(*args, **kwargs):
sys.stderr.write(*args, **kwargs)
sys.exit(1)
def quote_path(path):
if path.startswith('"') and path.endswith('"'):
return path
return '"{0}"'.format(path)
def is_dll_or_exe(file):
assert os.path.isfile(file)
return fnmatch.fnmatch(file, '*.dll') or fnmatch.fnmatch(file, '*.exe')
def _get_full_path(candidate_path, file_name):
if candidate_path is None:
candidate_path = ''
if os.path.isfile(candidate_path):
return candidate_path
elif os.path.isdir(candidate_path):
return os.path.join(candidate_path, file_name)
else:
return os.path.join(os.path.dirname(sys.argv[0]), file_name)
def _iterate_module_files_legacy(module_path):
assert os.path.isdir(module_path)
yield from filter(is_dll_or_exe,
map(lambda item: os.path.join(module_path, item),
os.listdir(module_path)))
def _iterate_module_files_new(module_path):
assert os.path.isdir(module_path)
yield from filter(is_dll_or_exe,
filter(os.path.isfile,
map(lambda item_name: os.path.join(module_path, item_name),
map(lambda item: item.name,
os.scandir(module_path)))))
# `os.scandir` is new in Python 3.5, and Python 3.5 needs Windows Vista or higher.
if sys.version_info >= (3, 5):
iterate_module_files_v1 = _iterate_module_files_new
run_subprocess = subprocess.run
else:
iterate_module_files_v1 = _iterate_module_files_legacy
run_subprocess = subprocess.call
_module_patterns = '*.dll', '*.exe'
def iterate_module_files_v2(module_path):
assert os.path.isdir(module_path)
for pattern in _module_patterns:
pattern = os.path.join(module_path, pattern)
yield from glob.iglob(pattern)
def iterate_module_files_v3(module_path):
assert os.path.isdir(module_path)
yield from itertools.chain.from_iterable(
glob.iglob(pattern) for pattern in map(lambda pattern: os.path.join(module_path, pattern), _module_patterns))
def main():
"""
Usage:
SetPEVersion.py (--module-path=<PATH>) [--changelog=FILE] [--stampver=FILE] [--debug]
SetPEVersion.py -h | --help
SetPEVersion.py -v | --version
Options:
-c FILE --changelog=FILE Specify the full path of "Changelog.txt"
-s FILE --stampver=FILE Specify the full path of "StampVer.exe"
-m PATH --module-path=PATH Specify a single module file(DLL or EXE) or a directory that contains module files
-d --debug Show more messages for debug purpose
-h --help Show this help message
-v --version Show version message
"""
import docopt
import pprint
args = docopt.docopt(main.__doc__, version='SetPEVersion v0.1.0')
changelog = _get_full_path(args['--changelog'], 'Changelog.txt')
stampver = _get_full_path(args['--stampver'], 'StampVer.exe')
if not os.path.isfile(changelog):
perror('Changelog file not found at "{0}".'.format(changelog))
if not os.path.isfile(stampver):
perror('StampVer.exe not found at "{0}".'.format(changelog))
modules = []
if args['--module-path']:
if os.path.isfile(args['--module-path']):
modules.append(args['--module-path'])
elif os.path.isdir(args['--module-path']):
modules.extend(iterate_module_files_v3(args['--module-path']))
else:
perror('Invalid module path "{0}": Neither an existing file nor an existing directory.'.format(args['--module-path']))
else:
perror('"--module-path" option is required.')
# Get the topmost line which contains a valid version number from Changelog.txt
topmost_version_line = get_topmost_version_line(changelog)
version_number = canonicalize_version_number(get_version_number(topmost_version_line))
if args['--debug']:
print('-' * 79)
print(args)
print(changelog)
print(stampver)
print(version_number)
pprint.pprint(modules)
print('-' * 79)
for module in modules:
# Code below does work for `StampVer.exe`.
#cmd_args = (stampver, '-k', '-f"{0}"'.format(version_number), '-p"{0}"'.format(version_number), module)
#subprocess.run(cmd_args)
#
# so I have to quote those arguments all by myself
cmd_args = ' '.join((quote_path(stampver), '-k', '-f"{0}"'.format(version_number), '-p"{0}"'.format(version_number), quote_path(module)))
run_subprocess(cmd_args)
if __name__ == '__main__':
main()
# References:
# Ongoing-Study/cpp/msvc_cmdline_args/msvc_cmdline_args.cpp
# [Python glob multiple filetypes](http://stackoverflow.com/questions/4568580/python-glob-multiple-filetypes)
# https://github.com/telegramdesktop/tdesktop/blob/dev/Telegram/build/set_version.py
| myd7349/Ongoing-Study | python/SetPEVersion.py | Python | lgpl-3.0 | 6,242 |
import sys
import os
default_variant = 'PySide'
env_api = os.environ.get('QT_API', 'pyqt')
if '--pyside' in sys.argv:
variant = 'PySide'
elif '--pyqt4' in sys.argv:
variant = 'PyQt4'
elif env_api == 'pyside':
variant = 'PySide'
elif env_api == 'pyqt':
variant = 'PyQt4'
else:
variant = default_variant
if variant == 'PySide':
from PySide import QtGui, QtCore
# This will be passed on to new versions of matplotlib
os.environ['QT_API'] = 'pyside'
def QtLoadUI(uifile):
from PySide import QtUiTools
loader = QtUiTools.QUiLoader()
uif = QtCore.QFile(uifile)
uif.open(QtCore.QFile.ReadOnly)
result = loader.load(uif)
uif.close()
return result
elif variant == 'PyQt4':
import sip
api2_classes = [
'QData', 'QDateTime', 'QString', 'QTextStream',
'QTime', 'QUrl', 'QVariant',
]
for cl in api2_classes:
sip.setapi(cl, 2)
from PyQt4 import QtGui, QtCore
QtCore.Signal = QtCore.pyqtSignal
QtCore.QString = str
os.environ['QT_API'] = 'pyqt'
def QtLoadUI(uifile):
from PyQt4 import uic
return uic.loadUi(uifile)
else:
raise ImportError("Python Variant not specified")
__all__ = [QtGui, QtCore, QtLoadUI, variant] | s910324/Sloth | Untitled Folder/QtVariant.py | Python | lgpl-3.0 | 1,292 |
import _metagam3d
from _metagam3d import AxisAlignment, AlignmentType
from metagam3d.channels import blocking
from metagam3d.scripts import m3d_expr
from concurrence import Tasklet
class LoadError(Exception):
pass
class Object(_metagam3d.Object):
def __init__(self, objid):
_metagam3d.Object.__init__(self, objid)
self._params = {}
def param(self, paramid):
"Get parameter object for given parameter id"
try:
return self._params[paramid]
except KeyError:
pass
param = ObjectParam(self, paramid)
self._params[paramid] = param
return param
def load(self, filename, flags=0):
"Load and return new subobject from file"
objid = _metagam3d._loadObject(filename, self.id, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(self, axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(self.id, axisAlignment, alignment))
def getParam(self, paramid, t):
return self.param(paramid).getValue(t)
def setParam(self, paramid, val):
if type(val) is not _metagam3d.DynamicValue:
if type(val) is not _metagam3d.Variant:
val = _metagam3d.Variant(val)
val = _metagam3d.DynamicValue(val)
self.param(paramid).setValue(val)
def setParam3(self, paramid, x, y, z):
self.setParam(paramid, _metagam3d.Vec3d(x, y, z))
def setParamExpr(self, paramid, expr, till=None):
self.param(paramid).setValue(m3d_expr(expr, till))
def assignMaterial(self, geodeName, ambient=0, diffuse=0, specular=0, emission=0, shininess=0):
_metagam3d._assignMaterial(self.id, geodeName, ambient, diffuse, specular, emission, shininess)
def createConsole(self, cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(self.id, cols, rows, fontSize))
def createLine(self):
return Object(_metagam3d._createLine(self.id))
def destroyAfter(self, t):
Tasklet.new(self._destroyAfter)(t)
def _destroyAfter(self, t):
Tasklet.sleep(t)
self.destroy()
class Console(Object):
def println(self, elements):
line = _metagam3d.ConsoleLine()
for el in elements:
line.add(_metagam3d.ConsoleLineElement(el[0], el[1]))
_metagam3d._printConsole(self.id, line)
class ObjectParam(_metagam3d.ObjectParam):
def __init__(self, obj, paramid):
_metagam3d.ObjectParam.__init__(self, obj.id, paramid)
self._obj = obj
@property
def obj(self):
return self._obj
def load(filename, flags=0):
"Load root level object from file"
objid = _metagam3d._loadObject(filename, 0, flags)
if objid is None:
raise LoadError("Error loading %s" % filename)
return Object(objid)
def createText(axisAlignment=AxisAlignment.XY_PLANE, alignment=AlignmentType.CENTER_CENTER):
"Create text object"
return Object(_metagam3d._createText(0, axisAlignment, alignment))
def createConsole(cols=80, rows=25, fontSize=1.0):
return Console(_metagam3d._createConsole(0, cols, rows, fontSize))
def createLine():
return Object(_metagam3d._createLine(0))
| JoyTeam/metagam3d | python/metagam3d/objects.py | Python | lgpl-3.0 | 3,360 |
#
# Author: Martin Sandve Alnes
# Date: 2008-10-03
#
from ufl import (Coefficient, TestFunction, TrialFunction, VectorElement, dot,
dx, grad, triangle)
element = VectorElement("Lagrange", triangle, 1)
u = TrialFunction(element)
v = TestFunction(element)
w = Coefficient(element)
a = dot(dot(w, grad(u)), v) * dx
| FEniCS/ufl | demo/ExplicitConvection.py | Python | lgpl-3.0 | 332 |
#!/usr/bin/python
import sys, signal, logging, time, RPi.GPIO as GPIO
FLOATSW_HIGH_WL = 26 # high water level float switch
WATER_VALVE = 10 # GPIO port for the Water Electo valve, High by default after boot
VALVE_CHGSTATE_TIMER = 25 # Electro valve needs roughly 20 seconds to switch from open to close and vice versa
logger = None
def Setup():
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/rodi.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s',"%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(WATER_VALVE, GPIO.OUT)
GPIO.setup(FLOATSW_HIGH_WL, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, initial = GPIO.HIGH)
if not sys.stdout.isatty():
sys.stderr = open('/var/log/rodi_stderr.log', 'a')
sys.stdout = open('/var/log/rodi_stdout.log', 'a')
def Alert(message):
global logger
logger.info(message) # log the event
print(message)
logger.handlers[0].flush()
def Close_valve():
GPIO.output(WATER_VALVE, False)
Alert("Closing the RO/DI valve")
def Open_valve():
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened")
sys.exit(5)
else:
Alert("Opening the RO/DI valve")
GPIO.output(WATER_VALVE, True)
time.sleep(VALVE_CHGSTATE_TIMER)
def Refilling():
if GPIO.input(WATER_VALVE) == True:
return True
else:
return False
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
if not len(sys.argv) > 1:
print("You must provide one numerical argument to this function (duration in seconds). Exiting.")
sys.exit(1)
if sys.argv[1] != "close" and sys.argv[1] != "stop" and not sys.argv[1].isdigit():
print("Value is neither 'close', 'stop' or a refill duration expressed in seconds")
sys.exit(1)
i = 0
killer = GracefulKiller()
Setup()
if sys.argv[1] == "close" or sys.argv[1] == "stop":
Close_valve()
if str.count(subprocess.check_output(["ps", "aux"]), "rodi") > 1:
Alert("Warning, we were called while another instance of rodi.py was already in Memory")
sys.exit(1)
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump already high, refilling would be dangerous, exiting")
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened while high water in the sump, closing.")
Close_valve()
sys.exit(3)
if sys.argv[1].isdigit():
Alert("Not already refilling, sump water level normal, proceeding.")
Alert("Refilling for " + sys.argv[1] + " seconds")
try:
Open_valve()
while i<VALVE_CHGSTATE_TIMER+int(sys.argv[1]):
time.sleep(1)
i=i+1
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump is now high, stopping the refill")
Close_valve()
sys.exit(3)
break
if killer.kill_now:
Alert("Caught a Sigterm, Sigkill or CTRL+C, exiting.")
Close_valve()
sys.exit(2)
break
Alert("Refill done, exiting.")
Close_valve()
sys.exit(0)
except (RuntimeError, IOError):
Alert("Caught an exception, exiting.")
Close_valve()
sys.exit(4)
# Exit code :
# 5 : already refilling or cannot create lock file
# 4 : Caught an exception
# 3 : water is high either at start or during the refill
# 2 : a sigkill, sigterm or keyboard CTRL+C signal was received
# 1 : incorrect parameter received
# 0 : all went fine
| aquamonitor/Aquamonitor | rodi.py | Python | lgpl-3.0 | 4,109 |
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
try:
import gnomecanvas
except:
import gnome.canvas as gnomecanvas
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.Utils import *
from ecell.ui.model_editor.ResizeableText import *
class ComplexLine:
def __init__( self, anObject, aCanvas ):
self.theCanvas = aCanvas
self.parentObject = anObject
self.graphUtils = self.parentObject.getGraphUtils()
self.shapeMap = {}
self.lastmousex = 0
self.lastmousey = 0
self.buttonpressed = False
self.firstdrag=False
def show ( self ):
self.theRoot = self.parentObject.theCanvas.getRoot()
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate()
self.__sortByZOrder( self.shapeDescriptorList )
self.isSelected = False
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[SD_TYPE] == CV_TEXT:
self.createText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.createLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.createBpath( aDescriptor )
self.isSelected = False
def repaint ( self ):
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).reCalculate()
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.__sortByZOrder( self.shapeDescriptorList )
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[SD_TYPE] == CV_TEXT:
self.redrawText( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_LINE:
self.redrawLine( aDescriptor )
elif aDescriptor[SD_TYPE] == CV_BPATH:
self.redrawBpath( aDescriptor )
def reName( self ):
self.shapeDescriptorList = self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptorList()
self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).renameLabel( self.parentObject.getProperty( CO_NAME ) )
aDescriptor = self.shapeDescriptorList["textbox"]
self.renameText( aDescriptor )
def delete( self ):
for aShapeName in self.shapeMap.keys():
self.shapeMap[ aShapeName ].destroy()
def selected( self ):
self.isSelected = True
def unselected( self ):
self.isSelected = False
def outlineColorChanged( self ):
self.fillColorChanged()
def fillColorChanged( self ):
# find shapes with outline color
anRGB = copyValue( self.parentObject.getProperty( OB_FILL_COLOR ) )
if self.isSelected:
for i in range(0,3):
anRGB[i] = 32768 + anRGB[i]
for aKey in self.shapeDescriptorList.keys():
aDescriptor = self.shapeDescriptorList[aKey]
if aDescriptor[ SD_COLOR ] == SD_FILL:
aColor = self.graphUtils.getGdkColorByRGB( anRGB )
if aDescriptor[SD_TYPE] in CV_LINE:
self.changeLineColor( aDescriptor[ SD_NAME ] , aColor )
elif aDescriptor[SD_TYPE] in CV_BPATH:
self.changeLineColorB( aDescriptor[ SD_NAME ] , aColor )
def createBpath(self, aDescriptor):
aSpecific= aDescriptor[SD_SPECIFIC]
# get pathdef
pathdef= aSpecific[BPATH_PATHDEF]
pd = gnomecanvas.path_def_new(pathdef)
aGdkColor = self.getGdkColor( aDescriptor )
#cheCk: 1starg > the Bpath, 2ndarg > Bpath width(def 3), 3rdarg > Color of Bpath(def black)
bpath = self.theRoot.add(gnomecanvas.CanvasBpath, width_units=3,
outline_color_gdk = aGdkColor)
bpath.set_bpath(pd)
self.addHandlers( bpath, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = bpath
#cheCk: createLine is in charge of the Simple Line, displaying it width, colour ..blabla..
#regardless of whether it is the arrowheads or the middle stuffs (MS), it creates all
#but, if the MS is a bpath (eg. curvedLineSD) it will overwrite the middle line, I THINK OLI
def createLine( self, aDescriptor ):
lineSpec = aDescriptor[SD_SPECIFIC]
( X1, X2, Y1, Y2 ) = [lineSpec[0], lineSpec[2], lineSpec[1], lineSpec[3] ]
aGdkColor = self.getGdkColor( aDescriptor )
firstArrow = lineSpec[4]
secondArrow = lineSpec[5]
aLine = self.theRoot.add( gnomecanvas.CanvasLine,points=[X1,Y1,X2,Y2], width_units=lineSpec[ 6 ], fill_color_gdk = aGdkColor, first_arrowhead = firstArrow, last_arrowhead = secondArrow,arrow_shape_a=5, arrow_shape_b=5, arrow_shape_c=5 )
self.addHandlers( aLine, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aLine
def changeLineColor ( self, shapeName, aColor ):
aShape = self.shapeMap[ shapeName ]
aShape.set_property('fill_color_gdk', aColor )
def changeLineColorB ( self, shapeName, aColor ):
aShape = self.shapeMap[ shapeName ]
aShape.set_property('outline_color_gdk', aColor )
def createText( self, aDescriptor ):
textSpec = aDescriptor[SD_SPECIFIC]
(X1, Y1) = ( textSpec[TEXT_ABSX], textSpec[TEXT_ABSY] )
aGdkColor = self.getGdkColor( aDescriptor )
aText = ResizeableText( self.theRoot, self.theCanvas, X1, Y1, aGdkColor, textSpec[TEXT_TEXT], gtk.ANCHOR_NW )
self.addHandlers( aText, aDescriptor[ SD_NAME ] )
self.shapeMap[ aDescriptor[ SD_NAME ] ] = aText
def redrawLine( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
x1 = aSpecific[0]
y1 = aSpecific[1]
x2 = aSpecific[2]
y2 = aSpecific[3]
hasFirstArrow = aSpecific[4]
hasLastArrow = aSpecific[5]
aShape.set_property( 'points', (x1, y1, x2, y2) )
aShape.set_property('first_arrowhead', hasFirstArrow )
aShape.set_property('last_arrowhead', hasLastArrow )
def redrawBpath( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
pathdef = aDescriptor[ SD_SPECIFIC ][BPATH_PATHDEF]
pd=gnomecanvas.path_def_new(pathdef)
aShape.set_bpath(pd)
def redrawText( self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
x = aSpecific[TEXT_ABSX]
y = aSpecific[TEXT_ABSY]
aShape.set_property( 'x', x )
aShape.set_property( 'y', y )
def renameText (self, aDescriptor ):
aShape = self.shapeMap[ aDescriptor[ SD_NAME ] ]
aSpecific = aDescriptor[ SD_SPECIFIC ]
label = aSpecific[ TEXT_TEXT ]
aShape.set_property( 'text', label )
def getGdkColor( self, aDescriptor ):
aColorType = aDescriptor[ SD_COLOR ]
if aColorType == SD_FILL:
queryProp = OB_FILL_COLOR
elif aColorType == CV_TEXT:
queryProp = OB_TEXT_COLOR
anRGBColor = self.parentObject.getProperty( queryProp )
return self.graphUtils.getGdkColorByRGB( anRGBColor )
def __sortByZOrder ( self, desclist ):
keys = desclist.keys()
fn = lambda x, y: ( x[SD_Z] < y[SD_Z] ) - ( y[SD_Z] < x[SD_Z] )
keys.sort(fn)
def leftClick( self, shapeName, x, y, shift_pressed = False ):
# usually select
self.parentObject.doSelect( shift_pressed )
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD:
self.changeCursor( shapeName, x, y, True )
def rightClick ( self, shapeName, x, y, anEvent, shift ):
# usually show menu
if not self.parentObject.isSelected:
self.parentObject.doSelect( shift )
self.parentObject.showMenu( anEvent)
def getFirstDrag(self):
return self.firstdrag
def setFirstDrag(self,aValue):
self.firstdrag=aValue
def mouseDrag( self, shapeName, deltax, deltay, origx, origy ):
# decide whether resize or move or draw arrow
if self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_MOVINGLINE:
'''
if shapeName == SHAPE_TYPE_MULTIBCURVE_LINE:
self.parentObject.getArrowType(SHAPE_TYPE_MULTIBCURVE_LINE)
#Accessing BPATH_DEF now, the coords like above
bpathDefcheCk = self.parentObject.theSD.theDescriptorList[SHAPE_TYPE_MULTIBCURVE_LINE][SD_SPECIFIC][BPATH_PATHDEF]
self.parentObject.thePropertyMap[CO_CONTROL_POINTS] = bpathDefcheCk
bpathDefcheCk[1][1] = deltax
bpathDefcheCk[1][2] = deltay
bpathDefcheCk[1][3] = deltax
bpathDefcheCk[1][4] = deltay
bpathDefcheCk[2][1] = deltax
bpathDefcheCk[2][2] = deltay
bpathDefcheCk[2][3] = deltax
bpathDefcheCk[2][4] = deltay
#bpathDefcheCk[2][1,2,3,4] = [deltax,deltay,deltax,deltay]
'''
elif self.getShapeDescriptor(shapeName)[SD_FUNCTION] == SD_ARROWHEAD:
if not self.firstdrag:
self.firstdrag=True
self.parentObject.arrowheadDragged( shapeName,deltax, deltay, origx, origy)
def checkConnection( self ):
self.parentObject.checkConnection()
def doubleClick( self, shapeName ):
self.parentObject.popupEditor()
def getShapeDescriptor( self, shapeName ):
return self.parentObject.getProperty( OB_SHAPEDESCRIPTORLIST ).getDescriptor( shapeName )
def addHandlers( self, canvasObject, aName ):
canvasObject.connect('event', self.rect_event, aName )
def releaseButton( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y, False )
self.parentObject.mouseReleased( shapeName,x, y)
def mouseEntered( self, shapeName, x, y ):
self.changeCursor( shapeName, x, y )
def changeCursor( self, shapeName, x, y, buttonpressed = False):
aFunction = self.getShapeDescriptor(shapeName)[SD_FUNCTION]
aCursorType = self.parentObject.getCursorType( aFunction, x, y , buttonpressed)
self.theCanvas.setCursor( aCursorType )
def rect_event( self, *args ):
event = args[1]
item = args[0]
shapeName = args[2]
if event.type == gtk.gdk.BUTTON_PRESS:
if event.state>k.gdk.SHIFT_MASK == gtk.gdk.SHIFT_MASK:
shift_press = True
else:
shift_press = False
if event.button == 1:
self.lastmousex = event.x
self.lastmousey = event.y
self.buttonpressed = True
self.leftClick( shapeName, event.x, event.y, shift_press )
elif event.button == 3:
self.rightClick(shapeName, event.x, event.y, event, shift_press )
elif event.type == gtk.gdk.BUTTON_RELEASE:
if event.button == 1:
self.buttonpressed = False
self.releaseButton(shapeName, event.x, event.y )
elif event.type == gtk.gdk.MOTION_NOTIFY:
self.buttonpressed=(event.state>k.gdk.BUTTON1_MASK)>0
if not self.buttonpressed:
return
oldx = self.lastmousex
oldy = self.lastmousey
deltax = event.x - oldx
deltay = event.y - oldy
self.lastmousex = event.x
self.lastmousey = event.y
self.mouseDrag( shapeName, deltax, deltay, oldx, oldy )
elif event.type == gtk.gdk._2BUTTON_PRESS:
if event.button == 1:
self.doubleClick( shapeName )
elif event.type == gtk.gdk.ENTER_NOTIFY:
self.mouseEntered( shapeName, event.x, event.y )
| ecell/ecell3 | ecell/frontend/model-editor/ecell/ui/model_editor/ComplexLine.py | Python | lgpl-3.0 | 13,350 |
#!/usr/bin/env python
# Copyright (C) 2011-2014 Swift Navigation Inc.
# Contact: Fergus Noble <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import base64
import struct
__version__ = "0.23"
SBP_PREAMBLE = 0x55
crc16_tab = [0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0]
def crc16(s, crc=0):
"""CRC16 implementation acording to CCITT standards.
"""
for ch in s:
crc = ((crc<<8)&0xFFFF) ^ crc16_tab[ ((crc>>8)&0xFF) ^ (ord(ch)&0xFF) ]
crc &= 0xFFFF
return crc
class SBP(object):
"""Swift Binary Protocol container.
"""
def __init__(self, msg_type=None, sender=None,
length=None, payload=None, crc=None):
self.preamble = SBP_PREAMBLE
self.msg_type = msg_type
self.sender = sender
self.length = length
self.payload = payload
self.crc = crc
def __eq__(self, other):
return self.__dict__ == other.__dict__
def pack(self):
"""Pack to framed binary message.
"""
framed_msg = struct.pack('<BHHB',
self.preamble,
self.msg_type,
self.sender,
len(self.payload))
framed_msg += self.payload
crc = crc16(framed_msg[1:], 0)
framed_msg += struct.pack('<H', crc)
return framed_msg
def __repr__(self):
p = (self.preamble, self.msg_type, self.sender, self.length,
self.payload, self.crc)
fmt = "<SBP (preamble=0x%X, msg_type=0x%X, sender=%s, length=%d, payload=%s, crc=0x%X)>"
return fmt % p
@staticmethod
def from_json_dict(data):
msg_type = data['msg_type']
sender = data['sender']
length = data['length']
payload = base64.standard_b64decode(data['payload'])
crc = data['crc']
return SBP(msg_type, sender, length, payload, crc)
def to_json_dict(self):
return {'preamble': self.preamble,
'msg_type': self.msg_type,
'sender': self.sender,
'length': self.length,
'payload': base64.standard_b64encode(self.payload),
'crc': self.crc}
| fnoble/libsbp | python/sbp/__init__.py | Python | lgpl-3.0 | 4,591 |
#Initialisation
from time import sleep
from NaoCommunication import *
nao=NaoControle(Nao())
# 1 - Decrire le resultat de ce morceau de code
# ...
for a in range(16):
if a%2==0:
nao.reglerCouleur(a,a*15,50,50)
else :
nao.reglerCouleur(a,255,0,0)
sleep(0.1)
for a in range(15,-1,-1):
nao.eteindreLed(a)
sleep(0.1)
# 2 - Decrire le resultat de ce deuxieme morceau de code
# ...
for a in range(15,-1,-1):
nao.allumerLed(a)
sleep(0.1)
for a in range(0,16,1):
nao.eteindreLed(a)
sleep(0.1)
# 3 - A partir des exemples precedents, ecrire un code qui
# allume alternativement les deux leds 1 seconde chacune
# pendant 10 secondes.
| AdrienVR/NaoSimulator | TPINFO/Partie3/exercice1.py | Python | lgpl-3.0 | 659 |
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Ian Horn <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from gnss_analysis.runner import run as single_run
import pandas as pd
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser(description='RTK Filter SITL tests.')
parser.add_argument('infile', help='Specify the HDF5 file to use for input.')
parser.add_argument('outfile', help='Specify the HDF5 file to output into.')
parser.add_argument('baselineX', help='The baseline north component.')
parser.add_argument('baselineY', help='The baseline east component.')
parser.add_argument('baselineZ', help='The baseline down component.')
parser.add_argument('--NED', action='store_true')
parser.add_argument('-k', '--key',
default='table', nargs=1,
help='The key for the output table to insert into.')
parser.add_argument('-r', '--row',
default=None, nargs=1,
help='The key for the output table to insert into.')
args = parser.parse_args()
hdf5_filename_in = args.infile
hdf5_filename_out = args.outfile
baselineX = args.baselineX
baselineY = args.baselineY
baselineZ = args.baselineZ
baseline = np.array(map(float, [baselineX, baselineY, baselineZ]))
out_key = args.key
row = args.row
if row is None:
row = hdf5_filename_in
reports = single_run(hdf5_filename_in, baseline, baseline_is_NED=args.NED)
out_store = pd.HDFStore(hdf5_filename_out)
if ('/' + out_key) in out_store.keys():
out_df = out_store[out_key]
else:
out_df = pd.DataFrame()
new_cols = [col for col in reports.keys() if col not in out_df.columns]
for new_col in new_cols:
out_df[new_col] = pd.Series(np.nan * np.empty_like(out_df.index),
index=out_df.index)
out_df.loc[row] = pd.Series(reports)
out_store[out_key] = out_df
out_store.close()
if __name__ == "__main__":
main()
| imh/gnss-analysis | gnss_analysis/agg_run.py | Python | lgpl-3.0 | 2,348 |
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Lightweight pure-Python neural network library.
@file neuralnet.py
@package pybooster.neuralnet
@version 2019.12.23
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section DESCRIPTION
@code{.py}
from pybooster.neuralnet import NeuroCode
data = [ # The input and output of an XOR gate
([0, 0], [0]), # The first list in the tuple represents the input(s)
([0, 1], [1]), # The last list in the tuple represents the output(s)
([1, 0], [1]),
([1, 1], [0])
] # Provide sample input and expected output
net = NeuroCode(
data, # The data table created above
layers = [4, 3], # Number of nodes in each hidden layers (between input and output)
iterations = 40000, # Maximum training iterations
rate = 0.1 # Learning rate
)
net.train() # Returns (correctness, iterations)
output = net.run([1, 0]) # Execute neuralnet
net.writedump(r'xor_code.py') # Save the generated code
net.neurocode2cfile(r'neural_xor.c', r'neural_xor') # Save the generated code as plain C code
net.neurocode2javafile(r'neural_xor.java', r'neural_xor') # Save the generated code as plain Java code
net.neurocode2pythonfile(r'neural_xor.py', r'neural_xor') # Save the generated code as plain Python code
@endcode
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
# pylint: disable=C0103
from base64 import b64decode, b64encode
from math import exp, floor
from pickle import dumps, loads # nosec
from random import Random
from typing import Any, Dict, Generator, List, Tuple
from zlib import compress, decompress
__all__: list = [
r'flatten',
r'NeuroCode'
]
def flatten(_lst: list) -> Generator[list, None, None]:
"""Flatten list of lists."""
for _sublist in _lst:
if isinstance(_sublist, list):
for _sublist in flatten(_sublist):
yield _sublist
else:
yield _sublist
def _indent(txt: str, chars: int) -> str:
"""Indent the given code."""
result: str = r''
d: str = r' ' * chars
for line in txt.split('\n'):
result += (d + line + '\n')
return result
class NeuroCode: # pylint: disable=C0200,R0902
"""Neurocode class."""
def __init__(self, data: list, layers: list, iterations: int = 40000, rate: float = 0.2) -> None:
"""Initialize Neurocode-learning.
@param[in] data A list of lists of the input data
@param[in] layers Specify the number of hidden layers in the network and the size of each layer. For example, `layers = [3, 4]` makes two hidden layers, the first with 3 nodes and the second with 4 nodes. By default, one hidden layer is used with a size proportionate to the size of the input array
@param[in] iterations Number of times to run the training
@param[in] rate Learning rate (float less than 1.0)
"""
# Setup input data
input_size: int = len(data[0][0])
output_size: int = len(data[0][1])
# Settings
self.hidden_layers = [max(3, int(floor(input_size / 2)))] if not layers else layers
self.sizes: List[Any] = list(flatten([input_size, self.hidden_layers, output_size]))
self.iterations: int = iterations
self.rate: float = rate if rate < 1.0 else 0.4
self.io_rules: list = data
self.io_rules_len: int = len(data)
self.outputlayer: int = len(self.sizes) - 1
self.error_threshold: float = 0.0001
neural_rand = Random()
# Training State
self.deltas: List[Any] = [[]] * (self.outputlayer + 1)
self.changes: List[Any] = [[]] * (self.outputlayer + 1)
self.errors: List[Any] = [[]] * (self.outputlayer + 1)
self.outputs: List[Any] = [[]] * (self.outputlayer + 1)
self.biases: List[Any] = [[]] * (self.outputlayer + 1)
self.weights: List[Any] = [[]] * (self.outputlayer + 1)
for layer in range(self.outputlayer + 1):
_size = self.sizes[layer]
self.deltas[layer] = [0] * _size
self.errors[layer] = [0] * _size
self.outputs[layer] = [0] * _size
if layer > 0:
self.biases[layer] = [(neural_rand.random() * 0.4) - 0.2 for i in range(_size)]
self.weights[layer] = [0] * _size
self.changes[layer] = self.weights[layer]
for node in range(_size):
_prev_size = self.sizes[layer - 1]
self.weights[layer][node] = [(neural_rand.random() * 0.4) - 0.2 for j in range(_prev_size)]
self.changes[layer][node] = [0] * _prev_size
def train(self) -> Tuple[float, int]: # noqa: C901
"""Neurocode training (core function)."""
error: float = 1.0
used_iterations: int = 0
for i in range(self.iterations):
used_iterations = i
if error <= self.error_threshold: # Error Threshold
break
_sum = 0.0
for d in self.io_rules:
self.run(d[0])
self._calculate_deltas(d[1])
# Adjust Weights
for _layer in range(1, self.outputlayer + 1):
incoming = self.outputs[_layer - 1]
for _node in range(self.sizes[_layer]):
delta = self.deltas[_layer][_node]
for k in range(len(incoming)):
change = (self.rate * delta * incoming[k]) + (0.1 * self.changes[_layer][_node][k]) # 0.1 = momentum
self.changes[_layer][_node][k] = change
self.weights[_layer][_node][k] = change + self.weights[_layer][_node][k]
self.biases[_layer][_node] = self.biases[_layer][_node] + (self.rate * delta)
_errsum = 0.0
for err in self.errors[self.outputlayer]:
_errsum += err ** 2.0
_sum += _errsum / len(self.errors[self.outputlayer])
error = _sum / self.io_rules_len
return (error, used_iterations)
def run(self, _input: List[Any]) -> list:
"""Forward Propagation; Execute neuralnet."""
output = self.outputs[0] = _input # Set output state of input layer
for _layer in range(1, self.outputlayer + 1):
for _node in range(self.sizes[_layer]):
weights = self.weights[_layer][_node]
_sum = self.biases[_layer][_node]
for k in range(len(weights)):
_sum += weights[k] * _input[k]
self.outputs[_layer][_node] = 1.0 / (1.0 + exp(-_sum))
_input = self.outputs[_layer]
output = _input
return output
def _calculate_deltas(self, target: list) -> None:
"""Backward Propagation."""
layer: int = self.outputlayer
while layer >= 0:
for node in range(self.sizes[layer]):
output = self.outputs[layer][node]
if layer == self.outputlayer:
error = target[node] - output
else:
deltas = self.deltas[layer + 1]
error = 0.0
for k in range(len(deltas)):
error += (deltas[k] * self.weights[layer + 1][k][node])
self.errors[layer][node] = error
self.deltas[layer][node] = (error * output) * (1 - output)
layer -= 1
def bestof(self, generations: int = 16) -> bytes:
"""Return the best neuralnet from the given amount produced as a byte string."""
rounds: int = generations
best_result: float = 1.0 # Store the best error-rate
best_neuralnet: bytes = b''
while rounds != 0:
result = self.train()
if result[0] < best_result:
best_result = result[0]
best_neuralnet = self.dump()
rounds -= 1
return best_neuralnet
def dump(self) -> bytes:
"""Pickle neural-network and compress it using Zlib."""
return b64encode(compress(dumps(self), 9))
def writedump(self, _filename: str) -> None:
"""Pickle neural-network, compress it using Zlib, and then write it to a file."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _file:
_file.write(str(b64encode(compress(dumps(self), 9), altchars=br'-_'), encoding=r'utf-8'))
def neurocode2pythonfile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as Python code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_python_function(_neuroname))
def neurocode2cfile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as C code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_c_function(_neuroname))
def neurocode2javafile(self, _filename: str, _neuroname: str) -> None:
"""Write the Neurocode to a file as Java code."""
with open(_filename, mode=r'wt', encoding=r'utf-8') as _code:
_code.write(self.to_java_method(_neuroname))
@staticmethod
def load(_str: str) -> object:
"""Load the given compressed+pickled neural-network."""
return loads(decompress(b64decode(bytes(_str, encoding=r'utf-8'), altchars=br'-_')))
def to_python_function(self, fnname: str = r'nn_run', indent: int = 0) -> str:
"""Convert the neural-network to Python code."""
fn: str = fr'def {fnname}(i):\n'
for _layer in range(1, self.outputlayer + 1):
fn += ' o = [\n' if _layer < self.outputlayer else ' return [\n'
size = self.sizes[_layer]
for n in range(size):
term: str = fr'{-self.biases[_layer][n]}'
length = len(self.weights[_layer][n])
for k in range(length):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * i[{k}]'
fn += fr' 1 / (1 + math.exp({term}))' + (',\n' if n != size - 1 else '\n')
fn += ' ]\n'
if _layer != self.outputlayer:
fn += ' i = o\n'
return _indent(fn, indent)
def to_java_method(self, fnname: str = r'nn_run', static: bool = False, scope: str = r'protected', indent: int = 4) -> str:
"""Convert the neural-network to Java code."""
fn: str = scope + (r' static ' if static else r' ') + fr'double[] {fnname}(double[] i){{\n'
fn += ' double[] o;\n'
for _layer in range(1, self.outputlayer + 1):
fn += ' o = new double[]{\n' if _layer < self.outputlayer else ' return new double[]{\n'
size = self.sizes[_layer]
for n in range(size):
term: str = fr'{-self.biases[_layer][n]}'
length = len(self.weights[_layer][n])
for k in range(length):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * i[{k}]'
fn += fr' 1 / (1 + Math.exp({term}))' + (',\n' if n != size - 1 else '\n')
fn += ' };\n'
if _layer != self.outputlayer:
fn += ' i = o;\n'
fn += r'}'
return _indent(fn, indent)
def to_c_function(self, fnname: str = r'nn_run', indent: int = 0) -> str: # pylint: disable=R0914
"""Convert the neural-network to C code."""
terms: Dict[str, str] = {}
lterms: List[str] = []
for k in range(self.sizes[0]):
lterms.append(fr'o0_{k}')
terms[lterms[-1]] = fr'i[{k}]'
oterms: dict = {}
for _layer in range(1, self.outputlayer + 1):
for n in range(self.sizes[_layer]):
term: str = fr'{-self.biases[_layer][n]}'
for k in range(len(self.weights[_layer][n])):
w = self.weights[_layer][n][k]
term += (r'-' if w > 0 else r'+') + fr'{abs(w)} * o{_layer - 1}_{k}'
v = fr'(1.0 / (1.0 + exp({term})))'
for _str in lterms:
v = v.replace(_str, terms[_str])
lterms.append(fr'o{_layer}_{n}')
terms[lterms[-1]] = v
if _layer == self.outputlayer:
oterms[fr'o{_layer}_{n}'] = fr'o[{n}]'
del k, lterms
fn: str = fr'void {fnname}(double* i, double* o){{\n'
for _str, v in oterms.items():
fn += f' {v} = {terms[_str]};\n'
fn += '}\n'
return _indent(fn, indent)
| DevynCJohnson/Pybooster | pylib/neuralnet.py | Python | lgpl-3.0 | 13,720 |
'''
Created on 05.11.2013
@author: gena
'''
from __future__ import print_function
from PyQt4 import QtCore
from escore.plate import Plate
from escore.approximations import indexByName
class PlateRecord(object):
def __init__(self, plate, name,path):
self.plate=plate
self.name=name
self.path=path
class PlateManager(QtCore.QObject):
'''
PlateManager holds all plates, and handles related actions,
such as plate open,save,close,select, etc
'''
signalPlateListUpdated=QtCore.pyqtSignal(QtCore.QStringList)
signalCurrentPlateSet=QtCore.pyqtSignal(object)
signalCurrentIndexChanged=QtCore.pyqtSignal(int)
signalApproximationSelected = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(PlateManager, self).__init__(parent)
self.plates=[]
self.currentPlateIndex = -1
self.defaultApproximationIndex=0
def getFileInfo(self,fileName):
fileInfo=QtCore.QFileInfo(fileName)
return fileInfo.baseName(), fileInfo.dir()
def openPlate(self, fileName):
plates = Plate.loadFromFile(fileName)
for number,plate in enumerate(plates):
plate.setParent(self)
if plate.approximation is None:
print("set default approximation for plate",self.defaultApproximationIndex)
plate.setApproximation(self.defaultApproximationIndex)
name,path = self.getFileInfo(fileName)
if len(plates)>1:
name+='_'+str(number+1)
plateRecord=PlateRecord(plate,name,path)
self.plates.append(plateRecord)
plate.signalApplyReference.connect(self.applyReference)
self.signalPlateListUpdated.emit(self.names())
if not self.isEmpty():
self.setCurrentPlate(0)
def setApproximation(self, index):
if self.defaultApproximationIndex==index:
return
self.defaultApproximationIndex=index
if self.currentPlateIndex >= 0 :
self.plates[self.currentPlateIndex].plate.setApproximation(index)
self.signalApproximationSelected.emit(index)
def openPlates(self, fileNameList):
for fileName in fileNameList :
self.openPlate(fileName)
def savePlateAs(self,fileName):
if self.currentPlateIndex < 0 :
return
plateRecord=self.plates[self.currentPlateIndex]
plateRecord.plate.saveToFile(fileName)
plateRecord.name,plateRecord.path = self.getFileInfo(fileName)
self.signalPlateListUpdated.emit(self.names())
def savePlateWithDefaultName(self, index):
plateRecord=self.plates[index]
fileInfo=QtCore.QFileInfo(plateRecord.path,plateRecord.name+'.csv')
plateRecord.plate.saveToFile(fileInfo.filePath())
def savePlate(self):
if self.currentPlateIndex < 0 :
return
self.savePlateWithDefaultName(self.currentPlateIndex)
def saveAllPlates(self):
for index in range(len(self.plates)):
self.savePlateWithDefaultName(index)
def removePlate(self):
if self.currentPlateIndex < 0 :
return
self.signalCurrentPlateSet.emit(None)
self.plates[self.currentPlateIndex].plate.signalApplyReference.disconnect()
del self.plates[self.currentPlateIndex]
self.signalPlateListUpdated.emit(self.names())
if not self.isEmpty():
self.setCurrentPlate(0)
def isDirty(self):
return self.plates[self.currentPlateIndex].plate.dirty
def isEmpty(self):
return self.plates == []
def names(self):
return QtCore.QStringList([QtCore.QString(record.name) for record in self.plates])
def setCurrentPlate(self, index):
if self.currentPlateIndex == index :
return
self.currentPlateIndex = index
if index >= 0:
plate = self.plates[index].plate
appindex= indexByName(plate.approximation.name)
self.defaultApproximationIndex = appindex
self.signalApproximationSelected.emit(appindex)
else :
plate = None
self.signalCurrentIndexChanged.emit(self.currentPlateIndex)
self.signalCurrentPlateSet.emit(plate)
def applyReference(self, reference):
print('Applying reference to all plates')
sender = self.sender()
for plateRecord in self.plates:
plate = plateRecord.plate
if not plate is sender:
plate.setReference(reference)
| GennadiyZakharov/elisasolver | src/escore/platemanager.py | Python | lgpl-3.0 | 4,677 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#
"""\
# Modifies the pydoc contained in Python to use the member function filelink
# for filelink generation, so it can be later overridden.
# See also http://bugs.python.org/issue902061
"""
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: mypydoc.py,v 1.9 2009/10/07 20:52:24 rliebscher Exp $"
import sys, inspect
from string import join, split, strip
import pydoc
from pydoc import visiblename, pkgutil, getdoc, isdata
class MyHTMLDoc(pydoc.HTMLDoc):
"""Formatter class for HTML documentation."""
def filelink(self, url, path):
"""Create link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
# modified
filelink = self.filelink(url, path)
# end modified
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
# --------------------------------------- interactive interpreter interface
pydoc.html = MyHTMLDoc()
if __name__ == '__main__': pydoc.cli()
| arruda/pyfuzzy | doc/mypydoc.py | Python | lgpl-3.0 | 7,131 |
# -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.conf import settings
def get_site_url(request, slash=False):
domain = Site.objects.get_current().domain
protocol = 'https' if request.is_secure() else 'http'
root = "%s://%s" % (protocol, domain)
if slash:
root += '/'
return root
def absolute(request):
urls = {
'ABSOLUTE_ROOT': request.build_absolute_uri('/')[:-1],
'ABSOLUTE_ROOT_URL': request.build_absolute_uri('/'),
}
if 'django.contrib.sites' in settings.INSTALLED_APPS:
urls['SITE_ROOT'] = get_site_url(request)
urls['SITE_ROOT_URL'] = get_site_url(request, True)
return urls | noirbizarre/django-absolute | absolute/context_processors.py | Python | lgpl-3.0 | 703 |
#!/usr/bin/env python
# Copyright (c) 2014 CNRS
# Author: Steve Tonneau
#
# This file is part of hpp-rbprm-corba.
# hpp-rbprm-corba is free software: you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# hpp-manipulation-corba is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Lesser Public License for more details. You should have
# received a copy of the GNU Lesser General Public License along with
# hpp-manipulation-corba. If not, see
# <http://www.gnu.org/licenses/>.
from hpp.corbaserver.rbprm import Client as RbprmClient
from hpp.corbaserver import Client as BasicClient
import hpp.gepetto.blender.exportmotion as em
## Corba clients to the various servers
#
class CorbaClient:
"""
Container for corba clients to various interfaces.
"""
def __init__ (self):
self.basic = BasicClient ()
self.rbprm = RbprmClient ()
## Load and handle a RbprmDevice robot for rbprm planning
#
# A RbprmDevice robot is a dual representation of a robots. One robot describes the
# trunk of the robot, and a set of robots describe the range of motion of each limb of the robot.
class Builder (object):
## Constructor
def __init__ (self, load = True):
self.tf_root = "base_link"
self.rootJointType = dict()
self.client = CorbaClient ()
self.load = load
## Virtual function to load the robot model.
#
# \param urdfName urdf description of the robot trunk,
# \param urdfNameroms either a string, or an array of strings, indicating the urdf of the different roms to add.
# \param rootJointType type of root joint among ("freeflyer", "planar",
# "anchor"),
# \param meshPackageName name of the meshpackage from where the robot mesh will be loaded
# \param packageName name of the package from where the robot will be loaded
# \param urdfSuffix optional suffix for the urdf of the robot package
# \param srdfSuffix optional suffix for the srdf of the robot package
def loadModel (self, urdfName, urdfNameroms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix):
if(isinstance(urdfNameroms, list)):
for urdfNamerom in urdfNameroms:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNamerom, rootJointType, packageName, urdfNamerom, urdfSuffix, srdfSuffix)
else:
self.client.rbprm.rbprm.loadRobotRomModel(urdfNameroms, rootJointType, packageName, urdfNameroms, urdfSuffix, srdfSuffix)
self.client.rbprm.rbprm.loadRobotCompleteModel(urdfName, rootJointType, packageName, urdfName, urdfSuffix, srdfSuffix)
self.name = urdfName
self.displayName = urdfName
self.tf_root = "base_link"
self.rootJointType = rootJointType
self.jointNames = self.client.basic.robot.getJointNames ()
self.allJointNames = self.client.basic.robot.getAllJointNames ()
self.client.basic.robot.meshPackageName = meshPackageName
self.meshPackageName = meshPackageName
self.rankInConfiguration = dict ()
self.rankInVelocity = dict ()
self.packageName = packageName
self.urdfName = urdfName
self.urdfSuffix = urdfSuffix
self.srdfSuffix = srdfSuffix
rankInConfiguration = rankInVelocity = 0
for j in self.jointNames:
self.rankInConfiguration [j] = rankInConfiguration
rankInConfiguration += self.client.basic.robot.getJointConfigSize (j)
self.rankInVelocity [j] = rankInVelocity
rankInVelocity += self.client.basic.robot.getJointNumberDof (j)
## Init RbprmShooter
#
def initshooter (self):
return self.client.rbprm.rbprm.initshooter ()
## Sets limits on robot orientation, described according to Euler's ZYX rotation order
#
# \param bounds 6D vector with the lower and upperBound for each rotation axis in sequence
def boundSO3 (self, bounds):
return self.client.rbprm.rbprm.boundSO3 (bounds)
## Specifies a preferred affordance for a given rom.
# This constrains the planner to accept a rom configuration only if
# it collides with a surface the normal of which has these properties.
#
# \param rom name of the rome,
# \param affordances list of affordance names
def setAffordanceFilter (self, rom, affordances):
return self.client.rbprm.rbprm.setAffordanceFilter (rom, affordances)
## Specifies a rom constraint for the planner.
# A configuration will be valid if and only if the considered rom collides
# with the environment.
#
# \param romFilter array of roms indicated by name, which determine the constraint.
def setFilter (self, romFilter):
return self.client.rbprm.rbprm.setFilter (romFilter)
## Export a computed path for blender
#
# \param problem the problem associated with the path computed for the robot
# \param stepsize increment along the path
# \param pathId if of the considered path
# \param filename name of the output file where to save the output
def exportPath (self, viewer, problem, pathId, stepsize, filename):
em.exportPath(viewer, self.client.basic.robot, problem, pathId, stepsize, filename)
## \name Degrees of freedom
# \{
## Get size of configuration
# \return size of configuration
def getConfigSize (self):
return self.client.basic.robot.getConfigSize ()
# Get size of velocity
# \return size of velocity
def getNumberDof (self):
return self.client.basic.robot.getNumberDof ()
## \}
## \name Joints
#\{
## Get joint names in the same order as in the configuration.
def getJointNames (self):
return self.client.basic.robot.getJointNames ()
## Get joint names in the same order as in the configuration.
def getAllJointNames (self):
return self.client.basic.robot.getAllJointNames ()
## Get joint position.
def getJointPosition (self, jointName):
return self.client.basic.robot.getJointPosition (jointName)
## Set static position of joint in its parent frame
def setJointPosition (self, jointName, position):
return self.client.basic.robot.setJointPosition (jointName, position)
## Get joint number degrees of freedom.
def getJointNumberDof (self, jointName):
return self.client.basic.robot.getJointNumberDof (jointName)
## Get joint number config size.
def getJointConfigSize (self, jointName):
return self.client.basic.robot.getJointConfigSize (jointName)
## set bounds for the joint
def setJointBounds (self, jointName, inJointBound):
return self.client.basic.robot.setJointBounds (jointName, inJointBound)
## Set bounds on the translation part of the freeflyer joint.
#
# Valid only if the robot has a freeflyer joint.
def setTranslationBounds (self, xmin, xmax, ymin, ymax, zmin, zmax):
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_x", [xmin, xmax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_y", [ymin, ymax])
self.client.basic.robot.setJointBounds \
(self.displayName + "base_joint_z", [zmin, zmax])
## Get link position in joint frame
#
# Joints are oriented in a different way as in urdf standard since
# rotation and uni-dimensional translation joints act around or along
# their x-axis. This method returns the position of the urdf link in
# world frame.
#
# \param jointName name of the joint
# \return position of the link in world frame.
def getLinkPosition (self, jointName):
return self.client.basic.robot.getLinkPosition (jointName)
## Get link name
#
# \param jointName name of the joint,
# \return name of the link.
def getLinkName (self, jointName):
return self.client.basic.robot.getLinkName (jointName)
## \}
## \name Access to current configuration
#\{
## Set current configuration of composite robot
#
# \param q configuration of the composite robot
def setCurrentConfig (self, q):
self.client.basic.robot.setCurrentConfig (q)
## Get current configuration of composite robot
#
# \return configuration of the composite robot
def getCurrentConfig (self):
return self.client.basic.robot.getCurrentConfig ()
## Shoot random configuration
# \return dofArray Array of degrees of freedom.
def shootRandomConfig(self):
return self.client.basic.robot.shootRandomConfig ()
## \}
## \name Bodies
# \{
## Get the list of objects attached to a joint.
# \param inJointName name of the joint.
# \return list of names of CollisionObject attached to the body.
def getJointInnerObjects (self, jointName):
return self.client.basic.robot.getJointInnerObjects (jointName)
## Get list of collision objects tested with the body attached to a joint
# \param inJointName name of the joint.
# \return list of names of CollisionObject
def getJointOuterObjects (self, jointName):
return self.client.basic.robot.getJointOuterObjects (jointName)
## Get position of robot object
# \param objectName name of the object.
# \return transformation as a hpp.Transform object
def getObjectPosition (self, objectName):
return Transform (self.client.basic.robot.getObjectPosition
(objectName))
## \brief Remove an obstacle from outer objects of a joint body
#
# \param objectName name of the object to remove,
# \param jointName name of the joint owning the body,
# \param collision whether collision with object should be computed,
# \param distance whether distance to object should be computed.
def removeObstacleFromJoint (self, objectName, jointName, collision,
distance):
return self.client.basic.obstacle.removeObstacleFromJoint \
(objectName, jointName, collision, distance)
## \}
## \name Collision checking and distance computation
# \{
## Test collision with obstacles and auto-collision.
#
# Check whether current configuration of robot is valid by calling
# CkwsDevice::collisionTest ().
# \return whether configuration is valid
# \note Deprecated. Use isConfigValid instead.
def collisionTest (self):
print "Deprecated. Use isConfigValid instead"
return self.client.basic.robot.collisionTest ()
## Check the validity of a configuration.
#
# Check whether a configuration of robot is valid.
# \param cfg a configuration
# \return whether configuration is valid
def isConfigValid (self, cfg):
return self.client.basic.robot.isConfigValid (cfg)
## Compute distances between bodies and obstacles
#
# \return list of distances,
# \return names of the objects belonging to a body
# \return names of the objects tested with inner objects,
# \return closest points on the body,
# \return closest points on the obstacles
# \note outer objects for a body can also be inner objects of another
# body.
def distancesToCollision (self):
return self.client.basic.robot.distancesToCollision ()
## \}
## \}
## \name Mass and inertia
# \{
## Get mass of robot
def getMass (self):
return self.client.basic.robot.getMass ()
## Get position of center of mass
def getCenterOfMass (self):
return self.client.basic.robot.getCenterOfMass ()
## Get Jacobian of the center of mass
def getJacobianCenterOfMass (self):
return self.client.basic.robot.getJacobianCenterOfMass ()
##\}
## Get the dimension of the extra configuration space
def getDimensionExtraConfigSpace(self):
return self.client.basic.robot.getDimensionExtraConfigSpace()
## Convert a direction vector to a quaternion (use Eigen::Quaterniond::FromTwoVectors with Z vector)
# \param u the vector director
def quaternionFromVector(self,vector):
return self.client.basic.robot.quaternionFromVector(vector)
| rlefevre1/hpp-rbprm-corba | src/hpp/corbaserver/rbprm/rbprmbuilder.py | Python | lgpl-3.0 | 12,303 |
import os
import numpy as np
import flopy
ml = flopy.modflow.Modflow.load('l2a_2k.nam', version='mf2005', verbose=True)
delx = ml.dis.delr.array
dely = ml.dis.delc.array
# get data from the lst file
f = open('l2a_2k.lst', 'r')
for line in f:
if 'LAYER # ROW # COLUMN # LAKE # INTERFACE TYPE LAKEBED LEAKANCE' in line:
break
cdata = []
for idx, line in enumerate(f):
if (len(line.strip()) < 1):
break
cdata.append(line)
f.close()
tpth = 'mf5.conn.dat'
f = open(tpth, 'w')
for c in cdata:
f.write(c)
f.close()
dir_dict = {1:'HORIZONTAL',
2:'HORIZONTAL',
3:'HORIZONTAL',
4:'HORIZONTAL',
6:'VERTICAL'}
dtype = [('k', np.int), ('i', np.int), ('j', np.int),
('lake', np.int), ('itype', np.int),
('bedleak', np.float)]
cdata = np.loadtxt(tpth, dtype=dtype)
cdata['k'] -= 1
cdata['i'] -= 1
cdata['j'] -= 1
nlakes = np.unique(cdata['lake'])
print(nlakes)
lake_cnt = {}
for lake in nlakes:
lake_cnt[lake] = 0
print(lake_cnt)
dtype2 = [('iconn', np.int), ('belev', np.float), ('telev', np.float),
('dx', np.float), ('width', np.float)]
cdata2 = np.zeros((cdata.shape[0]), dtype=dtype2)
# fill cdata2
for idx in range(cdata.shape[0]):
k = cdata['k'][idx]
i = cdata['i'][idx]
j = cdata['j'][idx]
ilak = cdata['lake'][idx]
lake_cnt[ilak] += 1
itype = cdata['itype'][idx]
cdir = dir_dict[itype]
belev = 0.
telev = 0.
if cdir == 'HORIZONTAL':
if itype == 1 or itype == 2:
dx = 0.5 * delx[j]
width = dely[i]
elif itype == 3 or itype == 4:
dx = 0.5 * dely[i]
width = delx[j]
else:
dx = 0.
width = 0.
cdata2['iconn'][idx] = lake_cnt[ilak]
cdata2['belev'][idx] = belev
cdata2['telev'][idx] = telev
cdata2['dx'][idx] = dx
cdata2['width'][idx] = width
#
tpth = 'mf6.conn.dat'
f = open(tpth, 'w')
f.write('begin lakes\n')
c = '# lakeno strt lakeconn boundname'
f.write('{}\n'.format(c))
for lake in nlakes:
f.write(' LAKE {:10d}{:10.3g}{:10d} LAKE_{:03d}\n'.format(lake, 130., lake_cnt[lake], lake))
f.write('end lakes\n\n')
f.write('begin lake_connections\n')
# header
## lakeno iconn layer row column ctype bedleak belev telev dx width
c = '# lakeno iconn layer row ' + \
'column ctype bedleak belev '+ \
'telev dx width'
f.write('{}\n'.format(c))
# data
for idx in range(cdata.shape[0]):
itype = cdata['itype'][idx]
c = ' LAKE'
c += ' {:10d}{:10d}{:10d}{:10d}{:10d}'.format(cdata['lake'][idx],
cdata2['iconn'][idx],
cdata['k'][idx]+1,
cdata['i'][idx]+1,
cdata['j'][idx]+1)
c += '{:>15s} '.format(dir_dict[itype])
c += '{:10.3g}'.format(cdata['bedleak'][idx])
c += '{:10.3g}'.format(cdata2['belev'][idx])
c += '{:10.3g}'.format(cdata2['telev'][idx])
c += '{:10.3g}'.format(cdata2['dx'][idx])
c += '{:10.3g}'.format(cdata2['width'][idx])
f.write('{}\n'.format(c))
f.write('end lake_connections\n\n')
f.close()
| jdhughes-usgs/river-eel | examples/mf6/test045_lake2tr_dev/mf2005/mf6lakeconn.py | Python | lgpl-3.0 | 3,316 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.problems.base import NonlinearProblem
from rbnics.problems.elliptic import EllipticProblem
from rbnics.backends import product, sum, transpose
NonlinearEllipticProblem_Base = NonlinearProblem(EllipticProblem)
class NonlinearEllipticProblem(NonlinearEllipticProblem_Base):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call to parent
NonlinearEllipticProblem_Base.__init__(self, V, **kwargs)
# Form names for nonlinear problems
self.terms = ["a", "c", "dc", "f", "s"]
self.terms_order = {"a": 2, "c": 1, "dc": 2, "f": 1, "s": 1}
class ProblemSolver(NonlinearEllipticProblem_Base.ProblemSolver):
def residual_eval(self, solution):
problem = self.problem
assembled_operator = dict()
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["c"] = sum(product(problem.compute_theta("c"), problem.operator["c"]))
assembled_operator["f"] = sum(product(problem.compute_theta("f"), problem.operator["f"]))
return assembled_operator["a"] * solution + assembled_operator["c"] - assembled_operator["f"]
def jacobian_eval(self, solution):
problem = self.problem
assembled_operator = dict()
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["dc"] = sum(product(problem.compute_theta("dc"), problem.operator["dc"]))
return assembled_operator["a"] + assembled_operator["dc"]
# Perform a truth evaluation of the output
def _compute_output(self):
self._output = transpose(self._solution) * sum(product(self.compute_theta("s"), self.operator["s"]))
| mathLab/RBniCS | rbnics/problems/nonlinear_elliptic/nonlinear_elliptic_problem.py | Python | lgpl-3.0 | 1,932 |
from __future__ import division
from __future__ import print_function
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from PIL import ImageEnhance
import nwcsaf
import numpy as np
from satpy import Scene, find_files_and_readers
from datetime import datetime, timedelta
from copy import deepcopy
import netCDF4
import subprocess
import sys
import inspect
import logging
LOG = logging.getLogger(__name__)
LOG.setLevel(50)
#CRITICAL 50 #ERROR 40 #WARNING 30 #INFO 20 #DEBUG 10 #NOTSET 0
import matplotlib.pyplot as plt
#from satpy.utils import debug_on
#debug_on()
##import warnings
#warnings.filterwarnings("ignore")
def get_last_SEVIRI_date(RSS, delay=0, time_slot=None):
'''
input: RSS
logical variable True or False
specifies if you like get
(RSS=True) the last rapid scan observation date (every 5 min)
(RSS=False) the last full disk observation date (every 15 min)
(delay=INT) number of minutes to substract before finding the date (good if data needs a few min before arriving)
(time_slot) If not given, take last time
otherwise search scanning time of SEVIRI before given time_slot
output:
date structure with the date of the last SEVIRI observation
'''
from time import gmtime
LOG.info("*** start get_last_SEVIRI_date ("+inspect.getfile(inspect.currentframe())+")")
# if rapid scan service than 5min otherwise 15
if RSS:
nmin = 5
else:
nmin = 15
if (time_slot is None):
# get the current time
gmt = gmtime()
#print ("GMT time: "+ str(gmt))
# or alternatively
# utc = datetime.utcnow()
# convert to datetime format
t0 = datetime(gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, gmt.tm_min, 0)
LOG.debug(" current time = "+str(t0))
else:
t0 = time_slot + timedelta(seconds=nmin*60) # we substract one scanning time later, so we can add it here
LOG.debug(" reference time = "+str(t0))
# apply delay (if it usually takes 5 min for the data to arrive, use delay 5min)
if delay != 0:
t0 -= timedelta(minutes=delay)
LOG.debug(" applying delay "+str(delay)+" min delay, time = "+ str(t0))
LOG.debug(" round by scanning time "+str(nmin)+" min, RSS = "+str(RSS))
#tm_min2 = gmt.tm_min - (gmt.tm_min % nmin)
minute1 = t0.minute - (t0.minute % nmin)
# define current date rounded by one scan time
#date1 = datetime(gmt.tm_year, gmt.tm_mon, gmt.tm_mday, gmt.tm_hour, tm_min2 , 0)
t1 = datetime(t0.year, t0.month, t0.day, t0.hour, minute1, 0)
LOG.debug(" end time of last scan: "+str(t1))
# substracting one scan time (as the start time of scan is returned)
t1 -= timedelta(seconds=nmin*60)
LOG.info(" start time of last scan: "+str(t1))
return t1
def rewrite_xy_axis(netCDF_file):
print("... re-place values on the x and y axis with lon/lat values in "+netCDF_file)
ds = netCDF4.Dataset(netCDF_file, 'r+')
lat = ds["latitude"][:,0]
ds["y"][:] = lat.data
ds["y"].units = 'Degrees North'
lon = ds["longitude"][0,:]
ds["x"][:] = lon.data
ds["x"].units = 'Degrees East'
ds.close()
###############################################################################################
###############################################################################################
if __name__ == '__main__':
sat='MSG4'
if len(sys.argv) == 1:
start_time = get_last_SEVIRI_date(False, delay=6)
base_dir_sat = "/data/cinesat/in/eumetcast1/"
base_dir_nwc = "/data/cinesat/in/eumetcast1/"
#base_dir_nwc = "/data/cinesat/in/safnwc_v2016/"
elif len(sys.argv) == 6:
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
hour = int(sys.argv[4])
minute = int(sys.argv[5])
start_time = datetime(year, month, day, hour, minute)
base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/case-studies/%Y/%m/%d/")
#base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/%Y/%m/%d/")
base_dir_nwc = start_time.strftime("/data/OWARNA/hau/database/meteosat/SAFNWC/%Y/%m/%d/CT/")
else:
start_time = datetime(2020, 10, 7, 16, 0)
base_dir_sat = start_time.strftime("/data/COALITION2/database/meteosat/radiance_HRIT/%Y/%m/%d/")
base_dir_nwc = start_time.strftime("/data/COALITION2/database/meteosat/SAFNWC_v2016/%Y/%m/%d/CT/")
print("... processing time ", start_time)
show_interactively=False
save_black_white_png=False
print("")
print("")
print("*** Creating LSCL (low stratus confidence level) product")
print("")
# read MSG (full disk service) L2
#################################
print("... read "+sat+" L1.5 data")
print(" search for HRIT files in "+base_dir_sat)
files_sat = find_files_and_readers(sensor='seviri',
start_time=start_time, end_time=start_time,
base_dir=base_dir_sat,
reader='seviri_l1b_hrit')
files = deepcopy(files_sat['seviri_l1b_hrit'])
#print(" found SEVIRI files: ", files_sat)
for f in files:
if not (sat in f):
files_sat['seviri_l1b_hrit'].remove(f)
continue
if ("HRV" in f) or ("VIS006" in f) or ("VIS008" in f) or ("IR_016" in f) or ("IR_039" in f):
files_sat['seviri_l1b_hrit'].remove(f)
continue
if ("WV_062" in f) or ("WV_073" in f) or ("IR_097" in f) or ("IR_108" in f) or ("IR_134" in f):
files_sat['seviri_l1b_hrit'].remove(f)
continue
global_scene = Scene(reader="seviri_l1b_hrit", filenames=files_sat)
global_scene.load(['IR_087','IR_120'])
# read NWCSAF files
########################
print("... read "+sat+" NWCSAF CTTH")
print(" search for NWCSAF files in "+base_dir_nwc)
files_nwc = find_files_and_readers(sensor='seviri',
start_time=start_time, end_time=start_time,
base_dir=base_dir_nwc, reader='nwcsaf-geo')
print(" found NWCSAF files: ", files_nwc)
files = deepcopy(files_nwc['nwcsaf-geo'])
for f in files:
# remove files from other satellites
if not (sat in f):
files_nwc['nwcsaf-geo'].remove(f)
continue
# remove CTTH files
if ("CTTH" in f):
files_nwc['nwcsaf-geo'].remove(f)
continue
global_nwc = Scene(filenames=files_nwc)
global_nwc.load(['ct']) # "CT"
# loop over areas, resample and create products
# create netCDF file for area cosmo1
# create png file for area cosmo1_150 (50% more pixels)
############################################################
#for area in ['SeviriDisk00Cosmo',"cosmo1x150"]:
#for area in ['cosmo1', 'cosmo1eqc3km']:
for area in ['cosmo1eqc3km']:
#for area in ['cosmo1x150', 'cosmo1eqc3km']:
# resample MSG L2
##################
print("")
print("=======================")
print("resample to "+area)
local_scene = global_scene.resample(area)
# fake a new channel
print("fake a new channel")
local_scene['lscl'] = deepcopy(local_scene['IR_120'])
#local_scene['lscl'].wavelength=""
#local_scene['lscl'].standard_name="low_stratus_confidence_level"
#local_scene['lscl'].calibration="brightness_temperature_difference"
#print(local_scene['IR_120'])
#print(dir(local_scene['IR_120']))
#print(local_scene['IR_120'].standard_name)
#print(type(local_scene['IR_120'].standard_name))
#local_scene['lscl'].standard_name = "toa_brightness_temperature_difference"
#print(local_scene['lscl'])
##############################################
# calculate lscl "low stratus confidence level
# see MSc Thesis of Anna Ehrler (chapter 3.2.1 to 3.2.2)
##############################################
th_liquid_cloud = 1.8 # K
# cloud_confidence_range
ccr = 1.0 # K
local_scene['lscl'].values = (th_liquid_cloud - (local_scene['IR_120']-local_scene['IR_087']) - ccr) / (-2. * ccr)
#local_scene['lscl'].area_def = local_scene['IR_120'].area_def
# print(global_nwc)
local_nwc = global_nwc.resample(area)
# delete values for high clouds
###########################################
# !!! ONLY NWCSAF VERSION 2016 and 2018 !!!
# !!! Numbers are different for v2013
# ct:comment = "1: Cloud-free land; 2: Cloud-free sea; 3: Snow over land; 4: Sea ice; 5: Very low clouds;
# 6: Low clouds; 7: Mid-level clouds; 8: High opaque clouds; 9: Very high opaque clouds;
# 10: Fractional clouds; 11: High semitransparent thin clouds; 12: High semitransparent meanly thick clouds;
# 13: High semitransparent thick clouds; 14: High semitransparent above low or medium clouds; 15: High semitransparent above snow/ice" ;
for _ct_ in [7,8,9,10,11,12,13,14,15]:
print("replace cloud type",_ct_)
local_scene['lscl'].values = np.where(local_nwc['ct'].values==_ct_, np.nan, local_scene['lscl'].values)
if show_interactively:
fig, ax = plt.subplots(figsize=(13, 7))
pos = plt.imshow(local_scene['lscl'].values, vmin=0, vmax=1)
fig.colorbar(pos)
plt.title(start_time.strftime('low stratus confidence level, %y-%m-%d %H:%MUTC'))
plt.show()
if save_black_white_png:
local_scene.save_dataset('lscl', './lscl_'+area+'.png')
print(dir(local_scene.save_dataset))
print('display ./lscl_'+area+'.png &')
# save png file for SATLive
##############################
if area=="cosmo1x150" or area=="cosmo1":
png_file = start_time.strftime('/data/cinesat/out/MSG_lscl-'+area+'_%y%m%d%H%M.png')
from trollimage.colormap import spectral, greys, ylorrd, rdgy
imgarr = np.array(local_scene['lscl'].data)
from trollimage.image import Image as Timage
img = Timage(imgarr, mode="L")
img.colorize( rdgy.reverse() )
img.save(png_file)
# local_scene.save_dataset( 'lscl', png_file )
from pyresample.utils import load_area
swiss = load_area("/opt/users/hau/monti-pytroll/etc/areas.def", area)
from pycoast import ContourWriterAGG
cw = ContourWriterAGG('/opt/users/common/shapes')
cw.add_borders_to_file(png_file, swiss, outline="green", resolution='i', level=3, width=2)
img = Image.open(png_file)
draw = ImageDraw.Draw(img)
draw.rectangle([(0, 0), (img.size[0]*0.7, 25)], fill=(0,0,0,200))
font = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf", 18)
title = start_time.strftime(" "+sat[0:3]+"-"+sat[3]+', %y-%m-%d %H:%MUTC, low stratus confidence level')
draw.text( (1, 1), title, "green" , font=font) # (255,255,255)
img.save(png_file)
print("display " + png_file +" &")
if area=="cosmo1x150":
scpID="-i ~/.ssh/id_rsa_las"
scpOutputDir="las@zueub241:/srn/las/www/satellite/DATA/MSG_"+"lscl"+"-"+area+"_/"
scp_command = "/usr/bin/scp "+scpID+" "+png_file+" "+scpOutputDir+" 2>&1 &"
print(scp_command)
subprocess.call(scp_command, shell=True)
elif area=="cosmo1":
scpID="-i ~/.ssh/id_rsa_tsa"
scpOutputDir="[email protected]:/scratch/hamann/DayNightFog/"
print("... scp "+png_file+" to "+scpOutputDir)
subprocess.call("/usr/bin/scp "+scpID+" "+png_file+" "+scpOutputDir+" 2>&1 &", shell=True)
# save netCDF file for APN
##############################
if area=='cosmo1eqc3km':
netCDF_file = start_time.strftime('/data/cinesat/out/MSG_lscl-'+area+'_%y%m%d%H%M.nc')
print("... save result in: "+ netCDF_file)
print("include_lonlats=True")
local_scene.save_dataset('lscl', netCDF_file, include_lonlats=True, writer='cf',
exclude_attrs=['raw_metadata'], epoch='seconds since 1970-01-01 00:00:00') #, writer='cf'
#import netCDF4 as nc
#file_input = nc.Dataset(netCDF_file, 'r+')
#print(file_input.variables.keys())
#lonlats = local_scene['lscl'].area.get_lonlats()
#lons = file_input.createVariable('longitues', 'single', ('y', 'x'))
#lats = file_input.createVariable('latitudes', 'single', ('y', 'x'))
#lons[:] = lonlats[0][:,:]
#lats[:] = lonlats[1][:,:]
#local_scene.save_datasets(['lscl'], filename=netCDF_file, include_lonlats=True) #, writer='cf'
print("... ncview " + netCDF_file +" &")
rewrite_xy_axis(netCDF_file)
scpID="-i ~/.ssh/id_rsa_tsa"
#scpOutputDir="[email protected]:/scratch/hamann/DayNightFog/"
scpOutputDir="[email protected]:/scratch/hamann/DayNightFog_Filter-CT-7-15/"
print("... scp "+netCDF_file+" to "+scpOutputDir)
subprocess.call("/usr/bin/scp "+scpID+" "+netCDF_file+" "+scpOutputDir+" 2>&1 &", shell=True)
| meteoswiss-mdr/monti-pytroll | scripts/demo_satpy_fog.py | Python | lgpl-3.0 | 13,803 |
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <[email protected]>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {"ifc_class": None}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
return self.file.create_entity(self.settings["ifc_class"])
| IfcOpenShell/IfcOpenShell | src/ifcopenshell-python/ifcopenshell/api/profile/add_parameterized_profile.py | Python | lgpl-3.0 | 1,101 |
import tornado.testing
from testexample import ExampleApp
class TestExampleApp(tornado.testing.AsyncHTTPTestCase,
tornado.testing.LogTrapTestCase):
def get_app(self):
return ExampleApp()
def test_home(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
def test_ticker(self):
response = self.fetch('/ticker')
self.assertEqual(response.code, 200)
| wmalinowski/test-example | testexample/test/test_example_app.py | Python | unlicense | 442 |
low_primes = {1,3,5,7,11,13}
low_primes.add(17) # It will be {1,3,5,7,11,13,17}
low_primes.update({19,23},{2,29}) # It will be {1,2,3,5,7,11,13,17,19,23,29}, sorted order
while low_primes:
print(low_primes.pop()/3) #It will pop the first one (1) out, but because it is within a while loop, it will eventually pop everything out
| joshavenue/python_notebook | notebook2/set_1.py | Python | unlicense | 366 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: utf_8.py
""" Python 'utf-8' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
encode = codecs.utf_8_encode
def decode(input, errors='strict'):
return codecs.utf_8_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_8_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_8_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_8_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_8_decode
def getregentry():
return codecs.CodecInfo(name='utf-8', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/encodings/utf_8.py | Python | unlicense | 1,084 |
#!/usr/bin/env python
#encoding=utf-8
import os
try:
f = file('blah','r')
except IOError,e:
print 'could not open file:',e
def safe_float(obj):
try:
return float(obj)
except ValueError:
pass
ccfile = None
log = file('log.txt','w+')
try:
ccfile = file('card.txt','r')
txns = ccfile.readlines()
ccfile.close()
except IOError:
log.write('no txns this month%s' % os.linesep)
finally:
log.close()
if ccfile:
ccfile.close()
| licongyu95/learning_python | core_python_programming/cap10/cap10.py | Python | unlicense | 487 |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared import ALERT_PROCESSOR_NAME
from streamalert_cli.terraform.common import infinitedict
from streamalert_cli.terraform.lambda_module import generate_lambda
def generate_alert_processor(config):
"""Generate Terraform for the Alert Processor
Args:
config (dict): The loaded config from the 'conf/' directory
Returns:
dict: Alert Processor dict to be marshaled to JSON
"""
prefix = config['global']['account']['prefix']
result = infinitedict()
# Set variables for the IAM permissions module
result['module']['alert_processor_iam'] = {
'source': './modules/tf_alert_processor_iam',
'account_id': config['global']['account']['aws_account_id'],
'region': config['global']['account']['region'],
'prefix': prefix,
'role_id': '${module.alert_processor_lambda.role_id}',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}',
'output_lambda_functions': [
# Strip qualifiers: only the function name is needed for the IAM permissions
func.split(':')[0] for func in list(config['outputs'].get('aws-lambda', {}).values())
],
'output_s3_buckets': list(config['outputs'].get('aws-s3', {}).values()),
'output_sns_topics': list(config['outputs'].get('aws-sns', {}).values()),
'output_sqs_queues': list(config['outputs'].get('aws-sqs', {}).values())
}
# Set variables for the Lambda module
result['module']['alert_processor_lambda'] = generate_lambda(
'{}_streamalert_{}'.format(config['global']['account']['prefix'], ALERT_PROCESSOR_NAME),
'streamalert.alert_processor.main.handler',
config['lambda']['alert_processor_config'],
config,
environment={
'ALERTS_TABLE': '{}_streamalert_alerts'.format(prefix),
'AWS_ACCOUNT_ID': config['global']['account']['aws_account_id'],
'STREAMALERT_PREFIX': prefix
}
)
return result
| airbnb/streamalert | streamalert_cli/terraform/alert_processor.py | Python | apache-2.0 | 2,636 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-22 22:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('socialnet', '0029_auto_20161121_0543'),
]
operations = [
migrations.AddField(
model_name='author',
name='displayname',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| CMPUT404F16T06/CMPUT404Project | mysite/socialnet/migrations/0030_author_displayname.py | Python | apache-2.0 | 480 |
class Coordinates:
""" WhiteSource model for artifact's coordinates. """
def __init__(self, group_id, artifact_id, version_id):
self.groupId = group_id
self.artifactId = artifact_id
self.versionId = version_id
def create_project_coordinates(distribution):
""" Creates a 'Coordinates' instance for the user package"""
dist_name = distribution.get_name()
dist_version = distribution.get_version()
coordinates = Coordinates(group_id=None, artifact_id=dist_name, version_id=dist_version)
return coordinates | whitesource/python-plugin | agent/api/model/Coordinates.py | Python | apache-2.0 | 559 |
# Класс-помощник для работы с сессией
class SessionHelper:
def __init__(self, app):
self.app = app
# Функция входа на сайт
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
# Функция выхода с сайта
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
# Функция удаления фикстуры после завершения теста
def destroy(self):
self.app.wd.quit()
# Функция проверки выхода с сайта
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
# Функция проверки входа на сайт
def is_logged_in(self):
wd = self.app.wd
# Если на странице есть элемент с текстом "Logout", то пользователь вошел на сайт
return len(wd.find_elements_by_link_text("Logout")) > 0
# Функция проверки имени с которым произошел вход на сайт
def is_logged_in_as(self, username):
wd = self.app.wd
# Если на странице есть элемент с текстом который соответсвует имени пользователя, то есть логин
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")"
# Функция проверки логина во время прогона тестов
def ensure_login(self, username, password):
wd = self.app.wd
# Если пользователь вошел на сайт
if self.is_logged_in():
# И если пользователь вошел на сайт под ожидаемым именем
if self.is_logged_in_as(username):
# Тогда ничего не делаем
return
else:
# Иначе производим выход с сайта, для последующего входа
self.logout()
self.login(username, password) | kochetov-a/python_training | fixture/session.py | Python | apache-2.0 | 2,654 |
"""This test checks that Nevergrad is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.nevergrad import NevergradSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from nevergrad.optimization import optimizerlib
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
config = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
}
}
instrumentation = 2
parameter_names = ["height", "width"]
# With nevergrad v0.2.0+ the following is also possible:
# from nevergrad import instrumentation as inst
# instrumentation = inst.Instrumentation(
# height=inst.var.Array(1).bounded(0, 200).asfloat(),
# width=inst.var.OrderedDiscrete([0, 10, 20, 30, 40, 50]))
# parameter_names = None # names are provided by the instrumentation
optimizer = optimizerlib.OnePlusOne(instrumentation)
algo = NevergradSearch(
optimizer,
parameter_names,
max_concurrent=4,
metric="mean_loss",
mode="min")
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="nevergrad",
search_alg=algo,
scheduler=scheduler,
**config)
| stephanie-wang/ray | python/ray/tune/examples/nevergrad_example.py | Python | apache-2.0 | 1,858 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for the bfloat16 Python type."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import math
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
# pylint: disable=unused-import,g-bad-import-order
from tensorflow.python.framework import dtypes
from tensorflow.python.lib.core import _pywrap_bfloat16
from tensorflow.python.platform import test
bfloat16 = _pywrap_bfloat16.TF_bfloat16_type()
def numpy_assert_allclose(a, b, **kwargs):
a = a.astype(np.float32) if a.dtype == bfloat16 else a
b = b.astype(np.float32) if b.dtype == bfloat16 else b
return np.testing.assert_allclose(a, b, **kwargs)
epsilon = float.fromhex("1.0p-7")
# Values that should round trip exactly to float and back.
FLOAT_VALUES = [
0.0, 1.0, -1, 0.5, -0.5, epsilon, 1.0 + epsilon, 1.0 - epsilon,
-1.0 - epsilon, -1.0 + epsilon, 3.5, 42.0, 255.0, 256.0,
float("inf"),
float("-inf"),
float("nan")
]
class Bfloat16Test(parameterized.TestCase):
"""Tests the non-numpy Python methods of the bfloat16 type."""
def testRoundTripToFloat(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, float(bfloat16(v)))
def testRoundTripNumpyTypes(self):
for dtype in [np.float16, np.float32, np.float64]:
np.testing.assert_equal(-3.75, dtype(bfloat16(dtype(-3.75))))
np.testing.assert_equal(1.5, float(bfloat16(dtype(1.5))))
np.testing.assert_equal(4.5, dtype(bfloat16(np.array(4.5, dtype))))
np.testing.assert_equal(
np.array([2, 5, -1], bfloat16), bfloat16(np.array([2, 5, -1], dtype)))
def testRoundTripToInt(self):
for v in [-256, -255, -34, -2, -1, 0, 1, 2, 10, 47, 128, 255, 256, 512]:
self.assertEqual(v, int(bfloat16(v)))
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + dtype.__name__,
"dtype": dtype
} for dtype in [bfloat16, np.float16, np.float32, np.float64]))
def testRoundTripToNumpy(self, dtype):
for v in FLOAT_VALUES:
np.testing.assert_equal(v, bfloat16(dtype(v)))
np.testing.assert_equal(v, dtype(bfloat16(dtype(v))))
np.testing.assert_equal(v, dtype(bfloat16(np.array(v, dtype))))
if dtype != bfloat16:
np.testing.assert_equal(
np.array(FLOAT_VALUES, dtype),
bfloat16(np.array(FLOAT_VALUES, dtype)).astype(dtype))
def testStr(self):
self.assertEqual("0", str(bfloat16(0.0)))
self.assertEqual("1", str(bfloat16(1.0)))
self.assertEqual("-3.5", str(bfloat16(-3.5)))
self.assertEqual("0.0078125", str(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", str(bfloat16(float("inf"))))
self.assertEqual("-inf", str(bfloat16(float("-inf"))))
self.assertEqual("nan", str(bfloat16(float("nan"))))
def testRepr(self):
self.assertEqual("0", repr(bfloat16(0)))
self.assertEqual("1", repr(bfloat16(1)))
self.assertEqual("-3.5", repr(bfloat16(-3.5)))
self.assertEqual("0.0078125", repr(bfloat16(float.fromhex("1.0p-7"))))
self.assertEqual("inf", repr(bfloat16(float("inf"))))
self.assertEqual("-inf", repr(bfloat16(float("-inf"))))
self.assertEqual("nan", repr(bfloat16(float("nan"))))
def testHash(self):
self.assertEqual(0, hash(bfloat16(0.0)))
self.assertEqual(0x3f80, hash(bfloat16(1.0)))
self.assertEqual(0x7fc0, hash(bfloat16(float("nan"))))
# Tests for Python operations
def testNegate(self):
for v in FLOAT_VALUES:
np.testing.assert_equal(-v, float(-bfloat16(v)))
def testAdd(self):
np.testing.assert_equal(0, float(bfloat16(0) + bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) + bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) + bfloat16(-1)))
np.testing.assert_equal(5.5, float(bfloat16(2) + bfloat16(3.5)))
np.testing.assert_equal(1.25, float(bfloat16(3.5) + bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("inf")) + bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("-inf")) + bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) + bfloat16(float("nan")))))
# Test type promotion against Numpy scalar values.
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float16(2.25)))
self.assertEqual(np.float32, type(np.float16(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32, type(bfloat16(3.5) + np.float32(2.25)))
self.assertEqual(np.float32, type(np.float32(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3.5) + np.float64(2.25)))
self.assertEqual(np.float64, type(np.float64(3.5) + bfloat16(2.25)))
self.assertEqual(np.float64, type(bfloat16(3.5) + float(2.25)))
self.assertEqual(np.float64, type(float(3.5) + bfloat16(2.25)))
self.assertEqual(np.float32,
type(bfloat16(3.5) + np.array(2.25, np.float32)))
self.assertEqual(np.float32,
type(np.array(3.5, np.float32) + bfloat16(2.25)))
def testSub(self):
np.testing.assert_equal(0, float(bfloat16(0) - bfloat16(0)))
np.testing.assert_equal(1, float(bfloat16(1) - bfloat16(0)))
np.testing.assert_equal(2, float(bfloat16(1) - bfloat16(-1)))
np.testing.assert_equal(-1.5, float(bfloat16(2) - bfloat16(3.5)))
np.testing.assert_equal(5.75, float(bfloat16(3.5) - bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(-2.25) - bfloat16(float("inf"))))
np.testing.assert_equal(
float("inf"), float(bfloat16(-2.25) - bfloat16(float("-inf"))))
self.assertTrue(math.isnan(float(bfloat16(3.5) - bfloat16(float("nan")))))
def testMul(self):
np.testing.assert_equal(0, float(bfloat16(0) * bfloat16(0)))
np.testing.assert_equal(0, float(bfloat16(1) * bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) * bfloat16(-1)))
np.testing.assert_equal(-7.875, float(bfloat16(3.5) * bfloat16(-2.25)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) * bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) * bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) * bfloat16(float("nan")))))
def testDiv(self):
self.assertTrue(math.isnan(float(bfloat16(0) / bfloat16(0))))
np.testing.assert_equal(float("inf"), float(bfloat16(1) / bfloat16(0)))
np.testing.assert_equal(-1, float(bfloat16(1) / bfloat16(-1)))
np.testing.assert_equal(-1.75, float(bfloat16(3.5) / bfloat16(-2)))
np.testing.assert_equal(
float("-inf"), float(bfloat16(float("inf")) / bfloat16(-2.25)))
np.testing.assert_equal(
float("inf"), float(bfloat16(float("-inf")) / bfloat16(-2.25)))
self.assertTrue(math.isnan(float(bfloat16(3.5) / bfloat16(float("nan")))))
def testLess(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v < w, bfloat16(v) < bfloat16(w))
def testLessEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v <= w, bfloat16(v) <= bfloat16(w))
def testGreater(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v > w, bfloat16(v) > bfloat16(w))
def testGreaterEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v >= w, bfloat16(v) >= bfloat16(w))
def testEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v == w, bfloat16(v) == bfloat16(w))
def testNotEqual(self):
for v in FLOAT_VALUES:
for w in FLOAT_VALUES:
self.assertEqual(v != w, bfloat16(v) != bfloat16(w))
def testNan(self):
a = np.isnan(bfloat16(float("nan")))
self.assertTrue(a)
numpy_assert_allclose(np.array([1.0, a]), np.array([1.0, a]))
a = np.array([bfloat16(1.34375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=bfloat16)
b = np.array(
[bfloat16(1.3359375),
bfloat16(1.4375),
bfloat16(float("nan"))],
dtype=bfloat16)
numpy_assert_allclose(
a, b, rtol=0.1, atol=0.1, equal_nan=True, err_msg="", verbose=True)
def testSort(self):
values_to_sort = np.float32(FLOAT_VALUES)
sorted_f32 = np.sort(values_to_sort)
sorted_bf16 = np.sort(values_to_sort.astype(bfloat16))
np.testing.assert_equal(sorted_f32, np.float32(sorted_bf16))
BinaryOp = collections.namedtuple("BinaryOp", ["op"])
UNARY_UFUNCS = [
np.negative, np.positive, np.absolute, np.fabs, np.rint, np.sign,
np.conjugate, np.exp, np.exp2, np.expm1, np.log, np.log10, np.log1p,
np.log2, np.sqrt, np.square, np.cbrt, np.reciprocal, np.sin, np.cos, np.tan,
np.arcsin, np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, np.arcsinh,
np.arccosh, np.arctanh, np.deg2rad, np.rad2deg, np.floor, np.ceil, np.trunc
]
BINARY_UFUNCS = [
np.add, np.subtract, np.multiply, np.divide, np.logaddexp, np.logaddexp2,
np.floor_divide, np.power, np.remainder, np.fmod, np.heaviside, np.arctan2,
np.hypot, np.maximum, np.minimum, np.fmax, np.fmin, np.copysign
]
BINARY_PREDICATE_UFUNCS = [
np.equal, np.not_equal, np.less, np.greater, np.less_equal,
np.greater_equal, np.logical_and, np.logical_or, np.logical_xor
]
class Bfloat16NumPyTest(parameterized.TestCase):
"""Tests the NumPy integration of the bfloat16 type."""
def testDtype(self):
self.assertEqual(bfloat16, np.dtype(bfloat16))
def testDeepCopyDoesNotAlterHash(self):
# For context, see https://github.com/google/jax/issues/4651. If the hash
# value of the type descriptor is not initialized correctly, a deep copy
# can change the type hash.
dtype = np.dtype(bfloat16)
h = hash(dtype)
_ = copy.deepcopy(dtype)
self.assertEqual(h, hash(dtype))
def testArray(self):
x = np.array([[1, 2, 3]], dtype=bfloat16)
self.assertEqual(bfloat16, x.dtype)
self.assertEqual("[[1 2 3]]", str(x))
np.testing.assert_equal(x, x)
numpy_assert_allclose(x, x)
self.assertTrue((x == x).all())
def testComparisons(self):
x = np.array([401408, 7, -32], dtype=np.float32)
bx = x.astype(bfloat16)
y = np.array([82432, 7, 0], dtype=np.float32)
by = y.astype(bfloat16)
np.testing.assert_equal(x == y, bx == by)
np.testing.assert_equal(x != y, bx != by)
np.testing.assert_equal(x < y, bx < by)
np.testing.assert_equal(x > y, bx > by)
np.testing.assert_equal(x <= y, bx <= by)
np.testing.assert_equal(x >= y, bx >= by)
def testEqual2(self):
a = np.array([401408], bfloat16)
b = np.array([82432], bfloat16)
self.assertFalse(a.__eq__(b))
def testCasts(self):
for dtype in [
np.float16, np.float32, np.float64, np.int8, np.int16, np.int32,
np.int64, np.complex64, np.complex128, np.uint8, np.uint16, np.uint32,
np.uint64, np.intc, np.int_, np.longlong, np.uintc, np.ulonglong
]:
x = np.array([[1, 2, 3]], dtype=dtype)
y = x.astype(bfloat16)
z = y.astype(dtype)
self.assertTrue(np.all(x == y))
self.assertEqual(bfloat16, y.dtype)
self.assertTrue(np.all(x == z))
self.assertEqual(dtype, z.dtype)
def testConformNumpyComplex(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([1.1, 2.2 + 2.2j, 3.3], dtype=dtype)
y_np = x.astype(np.float32)
y_tf = x.astype(bfloat16)
numpy_assert_allclose(y_np, y_tf, atol=2e-2)
z_np = y_np.astype(dtype)
z_tf = y_tf.astype(dtype)
numpy_assert_allclose(z_np, z_tf, atol=2e-2)
def testArange(self):
np.testing.assert_equal(
np.arange(100, dtype=np.float32).astype(bfloat16),
np.arange(100, dtype=bfloat16))
np.testing.assert_equal(
np.arange(-10.5, 7.8, 0.5, dtype=np.float32).astype(bfloat16),
np.arange(-10.5, 7.8, 0.5, dtype=bfloat16))
np.testing.assert_equal(
np.arange(-0., -7., -0.25, dtype=np.float32).astype(bfloat16),
np.arange(-0., -7., -0.25, dtype=bfloat16))
np.testing.assert_equal(
np.arange(-16384., 16384., 64., dtype=np.float32).astype(bfloat16),
np.arange(-16384., 16384., 64., dtype=bfloat16))
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters(({
"testcase_name": "_" + op.__name__,
"op": op
} for op in UNARY_UFUNCS))
def testUnaryUfunc(self, op):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7, 10).astype(bfloat16)
numpy_assert_allclose(
op(x).astype(np.float32), op(x.astype(np.float32)), rtol=1e-2)
@parameterized.named_parameters(({
"testcase_name": "_" + op.__name__,
"op": op
} for op in BINARY_UFUNCS))
def testBinaryUfunc(self, op):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7, 10).astype(bfloat16)
y = rng.randn(4, 1, 7, 10).astype(bfloat16)
numpy_assert_allclose(
op(x, y).astype(np.float32),
op(x.astype(np.float32), y.astype(np.float32)),
rtol=1e-2)
@parameterized.named_parameters(({
"testcase_name": "_" + op.__name__,
"op": op
} for op in BINARY_PREDICATE_UFUNCS))
def testBinaryPredicateUfunc(self, op):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(bfloat16)
y = rng.randn(4, 1, 7).astype(bfloat16)
np.testing.assert_equal(
op(x, y), op(x.astype(np.float32), y.astype(np.float32)))
@parameterized.named_parameters(({
"testcase_name": "_" + op.__name__,
"op": op
} for op in [np.isfinite, np.isinf, np.isnan, np.signbit, np.logical_not]))
def testPredicateUfunc(self, op):
rng = np.random.RandomState(seed=42)
shape = (3, 7, 10)
posinf_flips = rng.rand(*shape) < 0.1
neginf_flips = rng.rand(*shape) < 0.1
nan_flips = rng.rand(*shape) < 0.1
vals = rng.randn(*shape)
vals = np.where(posinf_flips, np.inf, vals)
vals = np.where(neginf_flips, -np.inf, vals)
vals = np.where(nan_flips, np.nan, vals)
vals = vals.astype(bfloat16)
np.testing.assert_equal(op(vals), op(vals.astype(np.float32)))
def testDivmod(self):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(bfloat16)
y = rng.randn(4, 1, 7).astype(bfloat16)
o1, o2 = np.divmod(x, y)
e1, e2 = np.divmod(x.astype(np.float32), y.astype(np.float32))
numpy_assert_allclose(o1, e1, rtol=1e-2)
numpy_assert_allclose(o2, e2, rtol=1e-2)
def testModf(self):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(bfloat16)
o1, o2 = np.modf(x)
e1, e2 = np.modf(x.astype(np.float32))
numpy_assert_allclose(o1.astype(np.float32), e1, rtol=1e-2)
numpy_assert_allclose(o2.astype(np.float32), e2, rtol=1e-2)
def testLdexp(self):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(bfloat16)
y = rng.randint(-50, 50, (1, 7))
numpy_assert_allclose(
np.ldexp(x, y).astype(np.float32),
np.ldexp(x.astype(np.float32), y),
rtol=1e-2,
atol=1e-6)
def testFrexp(self):
rng = np.random.RandomState(seed=42)
x = rng.randn(3, 7).astype(bfloat16)
mant1, exp1 = np.frexp(x)
mant2, exp2 = np.frexp(x.astype(np.float32))
np.testing.assert_equal(exp1, exp2)
numpy_assert_allclose(mant1, mant2, rtol=1e-2)
def testNextAfter(self):
one = np.array(1., dtype=bfloat16)
two = np.array(2., dtype=bfloat16)
zero = np.array(0., dtype=bfloat16)
nan = np.array(np.nan, dtype=bfloat16)
np.testing.assert_equal(np.nextafter(one, two) - one, epsilon)
np.testing.assert_equal(np.nextafter(one, zero) - one, -epsilon / 2)
np.testing.assert_equal(np.isnan(np.nextafter(nan, one)), True)
np.testing.assert_equal(np.isnan(np.nextafter(one, nan)), True)
np.testing.assert_equal(np.nextafter(one, one), one)
smallest_denormal = float.fromhex("1.0p-133")
np.testing.assert_equal(np.nextafter(zero, one), smallest_denormal)
np.testing.assert_equal(np.nextafter(zero, -one), -smallest_denormal)
for a, b in itertools.permutations([0., -0., nan], 2):
np.testing.assert_equal(
np.nextafter(
np.array(a, dtype=np.float32), np.array(b, dtype=np.float32)),
np.nextafter(
np.array(a, dtype=bfloat16), np.array(b, dtype=bfloat16)))
if __name__ == "__main__":
absltest.main()
| cxxgtxy/tensorflow | tensorflow/python/lib/core/bfloat16_test.py | Python | apache-2.0 | 17,140 |
# Copyright 2022 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of training functions."""
import inspect
import json
import os
import time
from absl import logging
from ddsp.training import cloud
import gin
import tensorflow.compat.v2 as tf
# ---------------------- Helper Functions --------------------------------------
def get_strategy(tpu='', cluster_config=''):
"""Create a distribution strategy for running on accelerators.
For CPU, single-GPU, or multi-GPU jobs on a single machine, call this function
without args to return a MirroredStrategy.
For TPU jobs, specify an address to the `tpu` argument.
For multi-machine GPU jobs, specify a `cluster_config` argument of the cluster
configuration.
Args:
tpu: Address of the TPU. No TPU if left blank.
cluster_config: Should be specified only for multi-worker jobs.
Task specific dictionary for cluster config dict in the TF_CONFIG format.
https://www.tensorflow.org/guide/distributed_training#setting_up_tf_config_environment_variable
If passed as a string, will be parsed to a dictionary. Two components
should be specified: cluster and task. Cluster provides information about
the training cluster, which is a dict consisting of different types of
jobs such as chief and worker. Task is information about the current task.
For example: "{"cluster": {"worker": ["host1:port", "host2:port"]},
"task": {"type": "worker", "index": 0}}"
Returns:
A distribution strategy. MirroredStrategy by default. TPUStrategy if `tpu`
arg is specified. MultiWorkerMirroredStrategy if `cluster_config` arg is
specified.
"""
if tpu:
logging.info('Use TPU at %s', tpu)
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
elif cluster_config:
if not isinstance(cluster_config, dict):
cluster_config = json.loads(cluster_config)
cluster_spec = tf.train.ClusterSpec(cluster_config['cluster'])
resolver = tf.distribute.cluster_resolver.SimpleClusterResolver(
cluster_spec=cluster_spec,
task_type=cluster_config['task']['type'],
task_id=cluster_config['task']['index'],
num_accelerators={'GPU': len(tf.config.list_physical_devices('GPU'))},
rpc_layer='grpc')
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=resolver)
else:
logging.info('Defaulting to MirroredStrategy')
strategy = tf.distribute.MirroredStrategy()
return strategy
def expand_path(file_path):
return os.path.expanduser(os.path.expandvars(file_path))
def get_latest_file(dir_path, prefix='operative_config-', suffix='.gin'):
"""Returns latest file with pattern '/dir_path/prefix[iteration]suffix'.
Args:
dir_path: Path to the directory.
prefix: Filename prefix, not including directory.
suffix: Filename suffix, including extension.
Returns:
Path to the latest file
Raises:
FileNotFoundError: If no files match the pattern
'/dir_path/prefix[int]suffix'.
"""
dir_path = expand_path(dir_path)
dir_prefix = os.path.join(dir_path, prefix)
search_pattern = dir_prefix + '*' + suffix
file_paths = tf.io.gfile.glob(search_pattern)
if not file_paths:
raise FileNotFoundError(
f'No files found matching the pattern \'{search_pattern}\'.')
try:
# Filter to get highest iteration, no negative iterations.
get_iter = lambda fp: abs(int(fp.split(dir_prefix)[-1].split(suffix)[0]))
latest_file = max(file_paths, key=get_iter)
return latest_file
except ValueError as verror:
raise FileNotFoundError(
f'Files found with pattern \'{search_pattern}\' do not match '
f'the pattern \'{dir_prefix}[iteration_number]{suffix}\'.\n\n'
f'Files found:\n{file_paths}') from verror
def get_latest_checkpoint(checkpoint_path):
"""Helper function to get path to latest checkpoint.
Args:
checkpoint_path: Path to the directory containing model checkpoints, or
to a specific checkpoint (e.g. `/path/to/model.ckpt-iteration`).
Returns:
Path to latest checkpoint.
Raises:
FileNotFoundError: If no checkpoint is found.
"""
checkpoint_path = expand_path(checkpoint_path)
is_checkpoint = tf.io.gfile.exists(checkpoint_path + '.index')
if is_checkpoint:
# Return the path if it points to a checkpoint.
return checkpoint_path
else:
# Search using 'checkpoints' file.
# Returns None if no 'checkpoints' file, or directory doesn't exist.
ckpt = tf.train.latest_checkpoint(checkpoint_path)
if ckpt:
return ckpt
else:
# Last resort, look for '/path/ckpt-[iter].index' files.
ckpt_f = get_latest_file(checkpoint_path, prefix='ckpt-', suffix='.index')
return ckpt_f.split('.index')[0]
# ---------------------------------- Gin ---------------------------------------
def get_latest_operative_config(restore_dir):
"""Finds the most recently saved operative_config in a directory.
Args:
restore_dir: Path to directory with gin operative_configs. Will also work
if passing a path to a file in that directory such as a checkpoint.
Returns:
Filepath to most recent operative config.
Raises:
FileNotFoundError: If no config is found.
"""
try:
return get_latest_file(
restore_dir, prefix='operative_config-', suffix='.gin')
except FileNotFoundError:
return get_latest_file(
os.path.dirname(restore_dir), prefix='operative_config-', suffix='.gin')
def write_gin_config(summary_writer, save_dir, step):
""""Writes gin operative_config to save_dir and tensorboard."""
config_str = gin.operative_config_str()
# Save the original config string to a file.
base_name = 'operative_config-{}'.format(step)
fname = os.path.join(save_dir, base_name + '.gin')
with tf.io.gfile.GFile(fname, 'w') as f:
f.write(config_str)
# Formatting hack copied from gin.tf.GinConfigSaverHook.
def format_for_tensorboard(line):
"""Convert a single line to markdown format."""
if not line.startswith('#'):
return ' ' + line
line = line[2:]
if line.startswith('===='):
return ''
if line.startswith('None'):
return ' # None.'
if line.endswith(':'):
return '#### ' + line
return line
# Convert config string to markdown.
md_lines = []
for line in config_str.splitlines():
md_line = format_for_tensorboard(line)
if md_line is not None:
md_lines.append(md_line)
md_config_str = '\n'.join(md_lines)
# Add to tensorboard.
with summary_writer.as_default():
text_tensor = tf.convert_to_tensor(md_config_str)
tf.summary.text(name='gin/' + base_name, data=text_tensor, step=step)
summary_writer.flush()
def gin_register_keras_layers():
"""Registers all keras layers and Sequential to be referenceable in gin."""
# Register sequential model.
gin.external_configurable(tf.keras.Sequential, 'tf.keras.Sequential')
# Register all the layers.
for k, v in inspect.getmembers(tf.keras.layers):
# Duck typing for tf.keras.layers.Layer since keras uses metaclasses.
if hasattr(v, 'variables'):
gin.external_configurable(v, f'tf.keras.layers.{k}')
# ------------------------ Training Loop ---------------------------------------
@gin.configurable
def train(data_provider,
trainer,
batch_size=32,
num_steps=1000000,
steps_per_summary=300,
steps_per_save=300,
save_dir='/tmp/ddsp',
restore_dir='/tmp/ddsp',
early_stop_loss_value=None,
report_loss_to_hypertune=False):
"""Main training loop.
Args:
data_provider: DataProvider object for training data.
trainer: Trainer object built with Model to train.
batch_size: Total batch size.
num_steps: Number of training steps.
steps_per_summary: Number of training steps per summary save.
steps_per_save: Number of training steps per checkpoint save.
save_dir: Directory where checkpoints and summaries will be saved.
If empty string, no checkpoints or summaries will be saved.
restore_dir: Directory where latest checkpoints for resuming the training
are stored. If there are no checkpoints in this directory, training will
begin anew.
early_stop_loss_value: Early stopping. When the total_loss reaches below this
value training stops. If None training will run for num_steps steps.
report_loss_to_hypertune: Report loss values to hypertune package for
hyperparameter tuning, such as on Google Cloud AI-Platform.
"""
# Get a distributed dataset iterator.
dataset = data_provider.get_batch(batch_size, shuffle=True, repeats=-1)
dataset = trainer.distribute_dataset(dataset)
dataset_iter = iter(dataset)
# Build model, easiest to just run forward pass.
trainer.build(next(dataset_iter))
# Load latest checkpoint if one exists in load directory.
try:
trainer.restore(restore_dir)
except FileNotFoundError:
logging.info('No existing checkpoint found in %s, skipping '
'checkpoint loading.', restore_dir)
if save_dir:
# Set up the summary writer and metrics.
summary_dir = os.path.join(save_dir, 'summaries', 'train')
summary_writer = tf.summary.create_file_writer(summary_dir)
# Save the gin config.
write_gin_config(summary_writer, save_dir, trainer.step.numpy())
else:
# Need to create a dummy writer, even if no save_dir is provided.
summary_writer = tf.summary.create_noop_writer()
# Train.
with summary_writer.as_default():
tick = time.time()
for iteration in range(num_steps):
step = trainer.step # Step is not iteration if restarting a model.
# Take a step.
losses = trainer.train_step(dataset_iter)
# Create training loss metrics when starting/restarting training.
if iteration == 0:
loss_names = list(losses.keys())
logging.info('Creating metrics for %s', loss_names)
avg_losses = {name: tf.keras.metrics.Mean(name=name, dtype=tf.float32)
for name in loss_names}
# Update metrics.
for k, v in losses.items():
avg_losses[k].update_state(v)
# Log the step.
log_str = 'step: {}\t'.format(int(step.numpy()))
for k, v in losses.items():
log_str += '{}: {:.2f}\t'.format(k, v)
logging.info(log_str)
# Write Summaries.
if step % steps_per_summary == 0 and save_dir:
# Speed.
steps_per_sec = steps_per_summary / (time.time() - tick)
tf.summary.scalar('steps_per_sec', steps_per_sec, step=step)
tick = time.time()
# Metrics.
for k, metric in avg_losses.items():
tf.summary.scalar('losses/{}'.format(k), metric.result(), step=step)
metric.reset_states()
# Report metrics for hyperparameter tuning if enabled.
if report_loss_to_hypertune:
cloud.report_metric_to_hypertune(losses['total_loss'], step.numpy())
# Stop the training when the loss reaches given value
if (early_stop_loss_value is not None and
losses['total_loss'] <= early_stop_loss_value):
logging.info('Total loss reached early stopping value of %s',
early_stop_loss_value)
# Write a final checkpoint.
if save_dir:
trainer.save(save_dir)
summary_writer.flush()
break
# Save Model.
if step % steps_per_save == 0 and save_dir:
trainer.save(save_dir)
summary_writer.flush()
logging.info('Training Finished!')
| magenta/ddsp | ddsp/training/train_util.py | Python | apache-2.0 | 12,295 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 10,
"""How often to log results to the console.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| jiaphuan/models | tutorials/image/cifar10/cifar10_train.py | Python | apache-2.0 | 4,491 |
# -*- coding: utf-8 -*-
"""The parsers and plugins interface classes."""
import abc
import os
from plaso.lib import errors
class BaseFileEntryFilter(object):
"""File entry filter interface."""
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry (dfvfs.FileEntry): a file entry.
Returns:
bool: True if the file entry matches the filter.
"""
class FileNameFileEntryFilter(BaseFileEntryFilter):
"""File name file entry filter."""
def __init__(self, filename):
"""Initializes a file entry filter.
Args:
filename (str): name of the file.
"""
super(FileNameFileEntryFilter, self).__init__()
self._filename = filename.lower()
def Match(self, file_entry):
"""Determines if a file entry matches the filter.
Args:
file_entry (dfvfs.FileEntry): a file entry.
Returns:
bool: True if the file entry matches the filter.
"""
if not file_entry:
return False
filename = file_entry.name.lower()
return filename == self._filename
class BaseParser(object):
"""The parser interface."""
# The name of the parser. This is the name that is used in the registration
# and used for parser/plugin selection, so this needs to be concise and unique
# for all plugins/parsers, such as 'Chrome', 'Safari' or 'UserAssist'.
NAME = 'base_parser'
# Data format supported by the parser plugin. This information is used by
# the parser manager to generate parser and plugin information.
DATA_FORMAT = ''
# List of filters that should match for the parser to be applied.
FILTERS = frozenset()
# Every derived parser class that implements plugins should define
# its own _plugin_classes dict:
# _plugin_classes = {}
# We deliberately don't define it here to make sure the plugins of
# different parser classes don't end up in the same dict.
_plugin_classes = None
def __init__(self):
"""Initializes a parser.
By default all plugins will be enabled. To only enable specific plugins
use the EnablePlugins method and pass it a list of strings containing
the names of the plugins to enable.
The default plugin, named "{self.NAME:s}_default", if it exists,
is always enabled and cannot be disabled.
"""
super(BaseParser, self).__init__()
self._default_plugin = None
self._plugins = None
self.EnablePlugins([])
@classmethod
def DeregisterPlugin(cls, plugin_class):
"""Deregisters a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class (type): class of the plugin.
Raises:
KeyError: if plugin class is not set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError(
'Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
def EnablePlugins(self, plugin_includes):
"""Enables parser plugins.
Args:
plugin_includes (list[str]): names of the plugins to enable, where None
or an empty list represents all plugins. Note the default plugin, if
it exists, is always enabled and cannot be disabled.
"""
self._plugins = []
if not self._plugin_classes:
return
default_plugin_name = '{0:s}_default'.format(self.NAME)
for plugin_name, plugin_class in self._plugin_classes.items():
if plugin_name == default_plugin_name:
self._default_plugin = plugin_class()
continue
if plugin_includes and plugin_name not in plugin_includes:
continue
plugin_object = plugin_class()
self._plugins.append(plugin_object)
# TODO: move this to a filter.
# pylint: disable=redundant-returns-doc
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: a format specification or None if not available.
"""
return
@classmethod
def GetPluginObjectByName(cls, plugin_name):
"""Retrieves a specific plugin object by its name.
Args:
plugin_name (str): name of the plugin.
Returns:
BasePlugin: a plugin object or None if not available.
"""
plugin_class = cls._plugin_classes.get(plugin_name, None)
if plugin_class:
return plugin_class()
return None
@classmethod
def GetPlugins(cls):
"""Retrieves the registered plugins.
Yields:
tuple[str, type]: name and class of the plugin.
"""
for plugin_name, plugin_class in cls._plugin_classes.items():
yield plugin_name, plugin_class
@classmethod
def RegisterPlugin(cls, plugin_class):
"""Registers a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class (type): class of the plugin.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError((
'Plugin class already set for name: {0:s}.').format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def RegisterPlugins(cls, plugin_classes):
"""Registers plugin classes.
Args:
plugin_classes (list[type]): classes of plugins.
Raises:
KeyError: if plugin class is already set for the corresponding name.
"""
for plugin_class in plugin_classes:
cls.RegisterPlugin(plugin_class)
@classmethod
def SupportsPlugins(cls):
"""Determines if a parser supports plugins.
Returns:
bool: True if the parser supports plugins.
"""
return cls._plugin_classes is not None
class FileEntryParser(BaseParser):
"""The file entry parser interface."""
def Parse(self, parser_mediator):
"""Parsers the file entry and extracts event objects.
Args:
parser_mediator (ParserMediator): a parser mediator.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_entry = parser_mediator.GetFileEntry()
if not file_entry:
raise errors.UnableToParseFile('Invalid file entry')
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileEntry(parser_mediator, file_entry)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileEntry(self, parser_mediator, file_entry):
"""Parses a file entry.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_entry (dfvfs.FileEntry): a file entry to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
class FileObjectParser(BaseParser):
"""The file-like object parser interface."""
# The initial file offset. Set this value to None if no initial
# file offset seek needs to be performed.
_INITIAL_FILE_OFFSET = 0
def Parse(self, parser_mediator, file_object):
"""Parses a single file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dvfvs.FileIO): a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
if not file_object:
raise errors.UnableToParseFile('Invalid file object')
if self._INITIAL_FILE_OFFSET is not None:
file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET)
parser_mediator.AppendToParserChain(self)
try:
self.ParseFileObject(parser_mediator, file_object)
finally:
parser_mediator.PopFromParserChain()
@abc.abstractmethod
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a file-like object.
Args:
parser_mediator (ParserMediator): a parser mediator.
file_object (dvfvs.FileIO): a file-like object to parse.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
| Onager/plaso | plaso/parsers/interface.py | Python | apache-2.0 | 8,018 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authenticates user for accessing the ISB-CGC Endpoint APIs.
#
# May be run from the command line or in scripts/ipython.
#
# The credentials file can be copied to any machine from which you want
# to access the API.
#
# 1. Command Line
# python ./isb_auth.py saves the user's credentials;
# OPTIONAL:
# -v for verbose (returns token!)
# -s FILE sets credentials file [default: ~/.isb_credentials]
# -u URL-only: for use over terminal connections;
# gives user a URL to paste into their browser,
# and asks for an auth code in return
#
# 2. Python
# import isb_auth
# isb_auth.get_credentials()
#
# # optional: to store credentials in a different location
# from oauth2client.file import Storage
# import isb_auth
# import os
#
# storage_file = os.path.join(os.path.expanduser("~"), "{USER_CREDENTIALS_FILE_NAME}")
# storage = Storage(storage_file)
# isb_auth.get_credentials(storage=storage)
#
from __future__ import print_function
from argparse import ArgumentParser
import os
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
from oauth2client.file import Storage
VERBOSE = False
# for native application - same as settings.INSTALLED_APP_CLIENT_ID
CLIENT_ID = '586186890913-atr969tu3lf7u574khjjplb45fgpq1bg.apps.googleusercontent.com'
# NOTE: this is NOT actually a 'secret' -- we're using the 'installed
# application' OAuth pattern here
CLIENT_SECRET = 'XeBxiK7NQ0yvAkAnRIKufkFE'
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), '.isb_credentials')
def maybe_print(msg):
if VERBOSE:
print(msg)
def get_credentials(storage=None, oauth_flow_args=[]):
noweb = '--noauth_local_webserver'
if __name__ != '__main__' and noweb not in oauth_flow_args:
oauth_flow_args.append(noweb)
if storage is None:
storage = Storage(DEFAULT_STORAGE_FILE)
credentials = storage.get()
if not credentials or credentials.invalid:
maybe_print('credentials missing/invalid, kicking off OAuth flow')
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE)
flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force'
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args))
return credentials
def main():
global VERBOSE
args = parse_args()
oauth_flow_args = [args.noauth_local_webserver] if args.noauth_local_webserver else []
VERBOSE = args.verbose
maybe_print('--verbose: printing extra information')
storage = Storage(args.storage_file)
credentials = get_credentials(storage, oauth_flow_args)
maybe_print('credentials stored in ' + args.storage_file)
maybe_print('access_token: ' + credentials.access_token)
maybe_print('refresh_token: ' + credentials.refresh_token)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--storage_file', '-s', default=DEFAULT_STORAGE_FILE, help='storage file to use for the credentials (default is {})'.format(DEFAULT_STORAGE_FILE))
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='display credentials storage location, access token, and refresh token')
parser.set_defaults(verbose=False)
parser.add_argument('--noauth_local_webserver','-u', action='store_const', const='--noauth_local_webserver')
return parser.parse_args()
if __name__ == '__main__':
main() | isb-cgc/ISB-CGC-Webapp | scripts/isb_auth.py | Python | apache-2.0 | 4,266 |
#!/usr/bin/env python
import rospy
from basics.msg import Complex
from random import random
rospy.init_node('message_publisher')
pub = rospy.Publisher('complex', Complex)
rate = rospy.Rate(2)
while not rospy.is_shutdown():
msg = Complex()
msg.real = random()
msg.imaginary = random()
pub.publish(msg)
rate.sleep()
| osrf/rosbook | code/basics/src/message_publisher.py | Python | apache-2.0 | 344 |
# -*- coding: utf-8 -*-
import mock
import pytest
from urlparse import urlparse
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf.models.licenses import NodeLicense
from osf.utils.sanitize import strip_html
from osf.utils import permissions
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
CollectionFactory,
CommentFactory,
NodeLicenseRecordFactory,
PrivateLinkFactory,
PreprintFactory,
IdentifierFactory,
InstitutionFactory,
)
from rest_framework import exceptions
from tests.base import fake
from tests.utils import assert_items_equal, assert_latest_log, assert_latest_log_not
from website.views import find_bookmark_collection
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeDetail:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user):
return ProjectFactory(
title='Project One',
is_public=True,
creator=user)
@pytest.fixture()
def project_private(self, user):
return ProjectFactory(
title='Project Two',
is_public=False,
creator=user)
@pytest.fixture()
def component_public(self, user, project_public):
return NodeFactory(parent=project_public, creator=user, is_public=True)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_component_public(self, component_public):
return '/{}nodes/{}/'.format(API_BASE, component_public._id)
@pytest.fixture()
def permissions_read(self):
return ['read']
@pytest.fixture()
def permissions_write(self):
return ['read', 'write']
@pytest.fixture()
def permissions_admin(self):
return ['read', 'admin', 'write']
def test_return_project_details(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
permissions_read, permissions_admin):
# test_return_public_project_details_logged_out
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_public_project_details_contributor_logged_in
res = app.get(url_public, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_public_project_details_non_contributor_logged_in
res = app.get(url_public, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_public.title
assert res.json['data']['attributes']['description'] == project_public.description
assert res.json['data']['attributes']['category'] == project_public.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_read)
# test_return_private_project_details_logged_in_admin_contributor
res = app.get(url_private, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_admin)
# test_return_private_project_details_logged_out
res = app.get(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_project_details_logged_in_non_contributor
res = app.get(url_private, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_return_private_project_details_logged_in_write_contributor(
self, app, user, user_two, project_private, url_private, permissions_write):
project_private.add_contributor(
contributor=user_two, auth=Auth(user), save=True)
res = app.get(url_private, auth=user_two.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == project_private.title
assert res.json['data']['attributes']['description'] == project_private.description
assert res.json['data']['attributes']['category'] == project_private.category
assert_items_equal(
res.json['data']['attributes']['current_user_permissions'],
permissions_write)
def test_top_level_project_has_no_parent(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert 'parent' not in res.json['data']['relationships']
assert 'id' in res.json['data']
assert res.content_type == 'application/vnd.api+json'
def test_child_project_has_parent(
self, app, user, project_public, url_public):
public_component = NodeFactory(
parent=project_public, creator=user, is_public=True)
public_component_url = '/{}nodes/{}/'.format(
API_BASE, public_component._id)
res = app.get(public_component_url)
assert res.status_code == 200
url = res.json['data']['relationships']['parent']['links']['related']['href']
assert urlparse(url).path == url_public
def test_node_has(self, app, url_public):
# test_node_has_children_link
res = app.get(url_public)
url = res.json['data']['relationships']['children']['links']['related']['href']
expected_url = '{}children/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_contributors_link
res = app.get(url_public)
url = res.json['data']['relationships']['contributors']['links']['related']['href']
expected_url = '{}contributors/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_node_links_link
res = app.get(url_public)
url = res.json['data']['relationships']['node_links']['links']['related']['href']
expected_url = '{}node_links/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_registrations_link
res = app.get(url_public)
url = res.json['data']['relationships']['registrations']['links']['related']['href']
expected_url = '{}registrations/'.format(url_public)
assert urlparse(url).path == expected_url
# test_node_has_files_link
res = app.get(url_public)
url = res.json['data']['relationships']['files']['links']['related']['href']
expected_url = '{}files/'.format(url_public)
assert urlparse(url).path == expected_url
def test_node_has_comments_link(
self, app, user, project_public, url_public):
CommentFactory(node=project_public, user=user)
res = app.get(url_public)
assert res.status_code == 200
assert 'comments' in res.json['data']['relationships'].keys()
url = res.json['data']['relationships']['comments']['links']['related']['href']
res = app.get(url)
assert res.status_code == 200
assert res.json['data'][0]['type'] == 'comments'
def test_node_comments_link_query_params_formatted(
self, app, user, project_public, project_private, url_private):
CommentFactory(node=project_public, user=user)
project_private_link = PrivateLinkFactory(anonymous=False)
project_private_link.nodes.add(project_private)
project_private_link.save()
res = app.get(url_private, auth=user.auth)
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key not in url
res = app.get(
'{}?view_only={}'.format(
url_private,
project_private_link.key))
url = res.json['data']['relationships']['comments']['links']['related']['href']
assert project_private_link.key in url
def test_node_has_correct_unread_comments_count(
self, app, user, project_public, url_public):
contributor = AuthUserFactory()
project_public.add_contributor(
contributor=contributor, auth=Auth(user), save=True)
CommentFactory(
node=project_public,
user=contributor,
page='node')
res = app.get(
'{}?related_counts=True'.format(url_public),
auth=user.auth)
unread = res.json['data']['relationships']['comments']['links']['related']['meta']['unread']
unread_comments_node = unread['node']
assert unread_comments_node == 1
def test_node_properties(self, app, url_public):
res = app.get(url_public)
assert res.json['data']['attributes']['public'] is True
assert res.json['data']['attributes']['registration'] is False
assert res.json['data']['attributes']['collection'] is False
assert res.json['data']['attributes']['tags'] == []
def test_requesting_folder_returns_error(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 404
def test_cannot_return_registrations_at_node_detail_endpoint(
self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
res = app.get('/{}nodes/{}/'.format(
API_BASE, registration._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_cannot_return_folder_at_node_detail_endpoint(self, app, user):
folder = CollectionFactory(creator=user)
res = app.get(
'/{}nodes/{}/'.format(API_BASE, folder._id),
auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_node_list_embed_identifier_link(self, app, user, project_public, url_public):
url = url_public + '?embed=identifiers'
res = app.get(url)
assert res.status_code == 200
link = res.json['data']['relationships']['identifiers']['links']['related']['href']
assert '{}identifiers/'.format(url_public) in link
@pytest.mark.django_db
class NodeCRUDTestCase:
@pytest.fixture()
def institution_one(self):
return InstitutionFactory()
@pytest.fixture()
def institution_two(self):
return InstitutionFactory()
@pytest.fixture()
def user_two(self, institution_one, institution_two):
auth_user = AuthUserFactory()
auth_user.affiliated_institutions.add(institution_one)
auth_user.affiliated_institutions.add(institution_two)
return auth_user
@pytest.fixture()
def title(self):
return 'Cool Project'
@pytest.fixture()
def title_new(self):
return 'Super Cool Project'
@pytest.fixture()
def description(self):
return 'A Properly Cool Project'
@pytest.fixture()
def description_new(self):
return 'An even cooler project'
@pytest.fixture()
def category(self):
return 'data'
@pytest.fixture()
def category_new(self):
return 'project'
@pytest.fixture()
def project_public(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=True,
creator=user
)
@pytest.fixture()
def project_private(self, user, title, description, category):
return ProjectFactory(
title=title,
description=description,
category=category,
is_public=False,
creator=user
)
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def url_fake(self):
return '/{}nodes/{}/'.format(API_BASE, '12345')
@pytest.fixture()
def make_node_payload(self):
def payload(node, attributes, relationships=None):
payload_data = {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
if relationships:
payload_data['data']['relationships'] = relationships
return payload_data
return payload
@pytest.mark.django_db
class TestNodeUpdate(NodeCRUDTestCase):
def test_node_institution_update(self, app, user_two, project_private, url_private, make_node_payload,
institution_one, institution_two):
project_private.add_contributor(
user_two,
permissions=(permissions.READ, permissions.WRITE, permissions.ADMIN),
auth=Auth(project_private.creator)
)
affiliated_institutions = {
'affiliated_institutions':
{'data': [
{
'type': 'institutions',
'id': institution_one._id
},
{
'type': 'institutions',
'id': institution_two._id
},
]
}
}
payload = make_node_payload(project_private, {'public': False}, relationships=affiliated_institutions)
res = app.patch_json_api(url_private, payload, auth=user_two.auth, expect_errors=False)
assert res.status_code == 200
institutions = project_private.affiliated_institutions.all()
assert institution_one in institutions
assert institution_two in institutions
def test_node_update_invalid_data(self, app, user, url_public):
res = app.put_json_api(
url_public, 'Incorrect data',
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
res = app.put_json_api(
url_public, ['Incorrect data'],
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_cannot_make_project_public_if_non_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log_not(NodeLog.MADE_PUBLIC, project_private):
non_contrib = AuthUserFactory()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_contrib.auth, expect_errors=True
)
assert res.status_code == 403
def test_cannot_make_project_public_if_non_admin_contributor(
self, app, project_private, url_private, make_node_payload):
non_admin = AuthUserFactory()
project_private.add_contributor(
non_admin,
permissions=(permissions.READ, permissions.WRITE),
auth=Auth(project_private.creator)
)
project_private.save()
res = app.patch_json(
url_private,
make_node_payload(project_private, {'public': True}),
auth=non_admin.auth, expect_errors=True
)
assert res.status_code == 403
project_private.reload()
assert not project_private.is_public
def test_can_make_project_public_if_admin_contributor(
self, app, project_private, url_private, make_node_payload):
with assert_latest_log(NodeLog.MADE_PUBLIC, project_private):
admin_user = AuthUserFactory()
project_private.add_contributor(
admin_user,
permissions=(permissions.READ,
permissions.WRITE,
permissions.ADMIN),
auth=Auth(project_private.creator))
project_private.save()
res = app.patch_json_api(
url_private,
make_node_payload(project_private, {'public': True}),
auth=admin_user.auth # self.user is creator/admin
)
assert res.status_code == 200
project_private.reload()
assert project_private.is_public
def test_update_errors(
self, app, user, user_two, title_new, description_new,
category_new, project_public, project_private,
url_public, url_private):
# test_update_project_properties_not_nested
res = app.put_json_api(url_public, {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
'description': description_new,
'category': category_new,
'public': True,
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_update_invalid_id
res = app.put_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_invalid_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_update_no_id
res = app.put_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_update_no_type
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_update_public_project_logged_out
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_project_invalid_title
project = {
'data': {
'type': 'nodes',
'id': project_public._id,
'attributes': {
'title': 'A' * 201,
'category': 'project',
}
}
}
res = app.put_json_api(
url_public, project,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Title cannot exceed 200 characters.'
# test_update_public_project_logged_in_but_unauthorized
res = app.put_json_api(url_public, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_out
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_update_private_project_logged_in_non_contributor
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_update_public_project_logged_in(
self, app, user, title_new, description_new,
category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': True
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_cannot_update_a_registration(self, app, user, project_public):
registration = RegistrationFactory(
project=project_public, creator=user)
original_title = registration.title
original_description = registration.description
url = '/{}nodes/{}/'.format(API_BASE, registration._id)
res = app.put_json_api(url, {
'data': {
'id': registration._id,
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'hypothesis',
'public': True
}
}
}, auth=user.auth, expect_errors=True)
registration.reload()
assert res.status_code == 404
assert registration.title == original_title
assert registration.description == original_description
def test_update_private_project_logged_in_contributor(
self, app, user, title_new, description_new,
category_new, project_private, url_private):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_private):
res = app.put_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new,
'description': description_new,
'category': category_new,
'public': False
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description_new
assert res.json['data']['attributes']['category'] == category_new
def test_update_project_sanitizes_html_properly(
self, app, user, category_new, project_public, url_public):
with assert_latest_log(NodeLog.UPDATED_FIELDS, project_public):
"""Post request should update resource, and any HTML in fields should be stripped"""
new_title = '<strong>Super</strong> Cool Project'
new_description = 'An <script>alert("even cooler")</script> project'
res = app.put_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title,
'description': new_description,
'category': category_new,
'public': True,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == strip_html(
new_description)
def test_partial_update_project_updates_project_correctly_and_sanitizes_html(
self, app, user, description, category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
new_title = 'An <script>alert("even cooler")</script> project'
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': new_title
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
res = app.get(url_public)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == strip_html(
new_title)
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_partial_update_public_project_logged_in(
self, app, user, title_new, description,
category, project_public, url_public):
with assert_latest_log(NodeLog.EDITED_TITLE, project_public):
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_write_to_public_field_non_contrib_forbidden(
self, app, user_two, project_public, url_public):
# Test non-contrib writing to public field
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'public': False},
'id': project_public._id,
'type': 'nodes'
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_partial_update_errors(
self, app, user, user_two, title_new,
project_public, project_private,
url_public, url_private):
# test_partial_update_public_project_logged_out
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_public_project_logged_in_but_unauthorized
# Public resource, logged in, unauthorized
res = app.patch_json_api(url_public, {
'data': {
'attributes': {
'title': title_new},
'id': project_public._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_out
res = app.patch_json_api(url_private, {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'title': title_new
}
}
}, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_partial_update_private_project_logged_in_non_contributor
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_partial_update_invalid_id
res = app.patch_json_api(url_public, {
'data': {
'id': '12345',
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_invalid_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'node',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_update_no_id
res = app.patch_json_api(url_public, {
'data': {
'type': 'nodes',
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_partial_update_no_type
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'attributes': {
'title': title_new,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# Nothing will be updated here
# test_partial_update_project_properties_not_nested
res = app.patch_json_api(url_public, {
'data': {
'id': project_public._id,
'type': 'nodes',
'title': title_new,
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 400
def test_partial_update_private_project_logged_in_contributor(
self, app, user, title_new, description, category, project_private, url_private):
with assert_latest_log(NodeLog.EDITED_TITLE, project_private):
res = app.patch_json_api(url_private, {
'data': {
'attributes': {
'title': title_new},
'id': project_private._id,
'type': 'nodes',
}
}, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert res.json['data']['attributes']['title'] == title_new
assert res.json['data']['attributes']['description'] == description
assert res.json['data']['attributes']['category'] == category
def test_multiple_patch_requests_with_same_category_generates_one_log(
self, app, user, project_private, url_private, make_node_payload):
project_private.category = 'project'
project_private.save()
new_category = 'data'
payload = make_node_payload(
project_private,
attributes={'category': new_category})
original_n_logs = project_private.logs.count()
res = app.patch_json_api(url_private, payload, auth=user.auth)
assert res.status_code == 200
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1 # sanity check
app.patch_json_api(url_private, payload, auth=user.auth)
project_private.reload()
assert project_private.category == new_category
assert project_private.logs.count() == original_n_logs + 1
def test_public_project_with_publicly_editable_wiki_turns_private(
self, app, user, project_public, url_public, make_node_payload):
wiki = project_public.get_addon('wiki')
wiki.set_editing(permissions=True, auth=Auth(user=user), log=True)
res = app.patch_json_api(
url_public,
make_node_payload(project_public, {'public': False}),
auth=user.auth # self.user is creator/admin
)
assert res.status_code == 200
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_set_node_private_updates_ezid(
self, mock_update_ezid_metadata, app, user, project_public,
url_public, make_node_payload):
IdentifierFactory(referent=project_public, category='doi')
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
mock_update_ezid_metadata.assert_called_with(
project_public._id, status='unavailable')
@mock.patch('website.preprints.tasks.update_ezid_metadata_on_change')
def test_set_node_with_preprint_private_updates_ezid(
self, mock_update_ezid_metadata, app, user,
project_public, url_public, make_node_payload):
target_object = PreprintFactory(project=project_public)
res = app.patch_json_api(
url_public,
make_node_payload(
project_public,
{'public': False}),
auth=user.auth)
assert res.status_code == 200
project_public.reload()
assert not project_public.is_public
mock_update_ezid_metadata.assert_called_with(
target_object._id, status='unavailable')
@pytest.mark.django_db
class TestNodeDelete(NodeCRUDTestCase):
def test_deletes_node_errors(
self, app, user, user_two, project_public,
project_private, url_public, url_private,
url_fake):
# test_deletes_public_node_logged_out
res = app.delete(url_public, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_public_node_fails_if_unauthorized
res = app.delete_json_api(
url_public,
auth=user_two.auth,
expect_errors=True)
project_public.reload()
assert res.status_code == 403
assert project_public.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_out
res = app.delete(url_private, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_deletes_private_node_logged_in_non_contributor
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
# test_deletes_invalid_node
res = app.delete(url_fake, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_read_only_contributor(
self, app, user_two, project_private, url_private):
project_private.add_contributor(
user_two, permissions=[permissions.READ])
project_private.save()
res = app.delete(url_private, auth=user_two.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 403
assert project_private.is_deleted is False
assert 'detail' in res.json['errors'][0]
def test_delete_project_with_component_returns_error(self, app, user):
project = ProjectFactory(creator=user)
NodeFactory(parent=project, creator=user)
# Return a 400 because component must be deleted before deleting the
# parent
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, project._id),
auth=user.auth,
expect_errors=True
)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert (
errors[0]['detail'] ==
'Any child components must be deleted prior to deleting this project.')
def test_delete_bookmark_collection_returns_error(self, app, user):
bookmark_collection = find_bookmark_collection(user)
res = app.delete_json_api(
'/{}nodes/{}/'.format(API_BASE, bookmark_collection._id),
auth=user.auth,
expect_errors=True
)
# Bookmark collections are collections, so a 404 is returned
assert res.status_code == 404
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_delete_node_with_preprint_calls_preprint_update_status(
self, mock_update_ezid_metadata_on_change, app, user,
project_public, url_public):
PreprintFactory(project=project_public)
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_ezid_metadata_on_change.called
@mock.patch('website.identifiers.tasks.update_ezid_metadata_on_change.s')
def test_delete_node_with_identifier_calls_preprint_update_status(
self, mock_update_ezid_metadata_on_change, app, user,
project_public, url_public):
IdentifierFactory(referent=project_public, category='doi')
app.delete_json_api(url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert mock_update_ezid_metadata_on_change.called
def test_deletes_public_node_succeeds_as_owner(
self, app, user, project_public, url_public):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_public):
res = app.delete_json_api(
url_public, auth=user.auth, expect_errors=True)
project_public.reload()
assert res.status_code == 204
assert project_public.is_deleted is True
def test_requesting_deleted_returns_410(
self, app, project_public, url_public):
project_public.is_deleted = True
project_public.save()
res = app.get(url_public, expect_errors=True)
assert res.status_code == 410
assert 'detail' in res.json['errors'][0]
def test_deletes_private_node_logged_in_contributor(
self, app, user, project_private, url_private):
with assert_latest_log(NodeLog.PROJECT_DELETED, project_private):
res = app.delete(url_private, auth=user.auth, expect_errors=True)
project_private.reload()
assert res.status_code == 204
assert project_private.is_deleted is True
@pytest.mark.django_db
class TestReturnDeletedNode:
@pytest.fixture()
def project_public_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This public project has been deleted',
category='project',
is_public=True
)
@pytest.fixture()
def project_private_deleted(self, user):
return ProjectFactory(
is_deleted=True,
creator=user,
title='This private project has been deleted',
category='project',
is_public=False
)
@pytest.fixture()
def title_new(self):
return 'This deleted node has been edited'
@pytest.fixture()
def url_project_public_deleted(self, project_public_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_public_deleted._id)
@pytest.fixture()
def url_project_private_deleted(self, project_private_deleted):
return '/{}nodes/{}/'.format(API_BASE, project_private_deleted._id)
def test_return_deleted_node(
self, app, user, title_new, project_public_deleted,
project_private_deleted, url_project_public_deleted,
url_project_private_deleted):
# test_return_deleted_public_node
res = app.get(url_project_public_deleted, expect_errors=True)
assert res.status_code == 410
# test_return_deleted_private_node
res = app.get(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_public_node
res = app.put_json_api(
url_project_public_deleted,
params={
'title': title_new,
'node_id': project_public_deleted._id,
'category': project_public_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_edit_deleted_private_node
res = app.put_json_api(
url_project_private_deleted,
params={
'title': title_new,
'node_id': project_private_deleted._id,
'category': project_private_deleted.category
},
auth=user.auth, expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_public_node
res = app.delete(
url_project_public_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
# test_delete_deleted_private_node
res = app.delete(
url_project_private_deleted,
auth=user.auth,
expect_errors=True)
assert res.status_code == 410
@pytest.mark.django_db
class TestNodeTags:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def project_public(self, user, user_admin):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_public
@pytest.fixture()
def project_private(self, user, user_admin):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
@pytest.fixture()
def payload_public(self, project_public):
return {
'data': {
'id': project_public._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
@pytest.fixture()
def payload_private(self, project_private):
return {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'tags': ['new-tag']
}
}
}
def test_public_project_starts_with_no_tags(self, app, url_public):
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_node_detail_does_not_expose_system_tags(
self, app, project_public, url_public):
project_public.add_system_tag('systag', save=True)
res = app.get(url_public)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_contributor_can_add_tag_to_public_project(
self, app, user, project_public, payload_public, url_public):
with assert_latest_log(NodeLog.TAG_ADDED, project_public):
res = app.patch_json_api(
url_public,
payload_public,
auth=user.auth,
expect_errors=True)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_public.reload()
assert project_public.tags.count() == 1
assert project_public.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_public)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_contributor_can_add_tag_to_private_project(
self, app, user, project_private, payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
# Ensure data is correct in the database
project_private.reload()
assert project_private.tags.count() == 1
assert project_private.tags.first()._id == 'new-tag'
# Ensure data is correct when GETting the resource again
reload_res = app.get(url_private, auth=user.auth)
assert len(reload_res.json['data']['attributes']['tags']) == 1
assert reload_res.json['data']['attributes']['tags'][0] == 'new-tag'
def test_partial_update_project_does_not_clear_tags(
self, app, user_admin, project_private, payload_private, url_private):
res = app.patch_json_api(
url_private,
payload_private,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload = {
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {
'public': True
}
}
}
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
new_payload['data']['attributes']['public'] = False
res = app.patch_json_api(
url_private,
new_payload,
auth=user_admin.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
def test_add_tag_to_project_errors(
self, app, user_non_contrib, user_read_contrib,
payload_public, payload_private,
url_public, url_private):
# test_non_authenticated_user_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_authenticated_user_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=None)
assert res.status_code == 401
# test_non_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_non_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True, auth=user_non_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_public_project
res = app.patch_json_api(
url_public, payload_public,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
# test_read_only_contributor_cannot_add_tag_to_private_project
res = app.patch_json_api(
url_private, payload_private,
expect_errors=True,
auth=user_read_contrib.auth)
assert res.status_code == 403
def test_tags_add_and_remove_properly(
self, app, user, project_private,
payload_private, url_private):
with assert_latest_log(NodeLog.TAG_ADDED, project_private):
res = app.patch_json_api(
url_private, payload_private, auth=user.auth)
assert res.status_code == 200
# Ensure adding tag data is correct from the PATCH response
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'new-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private), assert_latest_log(NodeLog.TAG_ADDED, project_private, 1):
# Ensure removing and adding tag data is correct from the PATCH
# response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': ['newer-tag']}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 1
assert res.json['data']['attributes']['tags'][0] == 'newer-tag'
with assert_latest_log(NodeLog.TAG_REMOVED, project_private):
# Ensure removing tag data is correct from the PATCH response
res = app.patch_json_api(
url_private,
{
'data': {
'id': project_private._id,
'type': 'nodes',
'attributes': {'tags': []}
}
}, auth=user.auth)
assert res.status_code == 200
assert len(res.json['data']['attributes']['tags']) == 0
def test_tags_post_object_instead_of_list(self, user, app):
url = '/{}nodes/'.format(API_BASE)
payload = {'data': {
'type': 'nodes',
'attributes': {
'title': 'new title',
'category': 'project',
'tags': {'foo': 'bar'}
}
}}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
def test_tags_patch_object_instead_of_list(
self, app, user, payload_public, url_public):
payload_public['data']['attributes']['tags'] = {'foo': 'bar'}
res = app.patch_json_api(
url_public, payload_public,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
@pytest.mark.django_db
class TestNodeLicense:
@pytest.fixture()
def user_admin(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def license_name(self):
return 'MIT License'
@pytest.fixture()
def node_license(self, license_name):
return NodeLicense.objects.filter(name=license_name).first()
@pytest.fixture()
def year(self):
return '2105'
@pytest.fixture()
def copyright_holders(self):
return ['Foo', 'Bar']
@pytest.fixture()
def project_public(
self, user, user_admin, node_license,
year, copyright_holders):
project_public = ProjectFactory(
title='Project One', is_public=True, creator=user)
project_public.add_contributor(
user_admin,
permissions=permissions.CREATOR_PERMISSIONS,
save=True)
project_public.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_public.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_public.save()
return project_public
@pytest.fixture()
def project_private(
self, user, user_admin, node_license,
year, copyright_holders):
project_private = ProjectFactory(
title='Project Two', is_public=False, creator=user)
project_private.add_contributor(
user_admin, permissions=permissions.CREATOR_PERMISSIONS, save=True)
project_private.add_contributor(
user, permissions=permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS, save=True)
project_private.node_license = NodeLicenseRecordFactory(
node_license=node_license,
year=year,
copyright_holders=copyright_holders
)
project_private.save()
return project_private
@pytest.fixture()
def url_public(self, project_public):
return '/{}nodes/{}/'.format(API_BASE, project_public._id)
@pytest.fixture()
def url_private(self, project_private):
return '/{}nodes/{}/'.format(API_BASE, project_private._id)
def test_node_has(
self, app, user, node_license, project_public,
project_private, url_private, url_public):
# test_public_node_has_node_license
res = app.get(url_public)
assert project_public.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_public_node_has_license_relationship
res = app.get(url_public)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
# test_private_node_has_node_license
res = app.get(url_private, auth=user.auth)
assert project_private.node_license.year == res.json[
'data']['attributes']['node_license']['year']
# test_private_node_has_license_relationship
res = app.get(url_private, auth=user.auth)
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
assert expected_license_url in actual_license_url
def test_component_return_parent_license_if_no_license(
self, app, user, node_license, project_public):
node = NodeFactory(parent=project_public, creator=user)
node.save()
node_url = '/{}nodes/{}/'.format(API_BASE, node._id)
res = app.get(node_url, auth=user.auth)
assert not node.node_license
assert project_public.node_license.year == \
res.json['data']['attributes']['node_license']['year']
actual_license_url = res.json['data']['relationships']['license']['links']['related']['href']
expected_license_url = '/{}licenses/{}'.format(
API_BASE, node_license._id)
assert expected_license_url in actual_license_url
@pytest.mark.django_db
class TestNodeUpdateLicense:
@pytest.fixture()
def user_admin_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_write_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_read_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def user_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def node(self, user_admin_contrib, user_write_contrib, user_read_contrib):
node = NodeFactory(creator=user_admin_contrib)
node.add_contributor(user_write_contrib, auth=Auth(user_admin_contrib))
node.add_contributor(
user_read_contrib,
auth=Auth(user_admin_contrib),
permissions=['read'])
node.save()
return node
@pytest.fixture()
def license_cc0(self):
return NodeLicense.objects.filter(name='CC0 1.0 Universal').first()
@pytest.fixture()
def license_mit(self):
return NodeLicense.objects.filter(name='MIT License').first()
@pytest.fixture()
def license_no(self):
return NodeLicense.objects.get(name='No license')
@pytest.fixture()
def url_node(self, node):
return '/{}nodes/{}/'.format(API_BASE, node._id)
@pytest.fixture()
def make_payload(self):
def payload(
node_id, license_id=None, license_year=None,
copyright_holders=None):
attributes = {}
if license_year and copyright_holders:
attributes = {
'node_license': {
'year': license_year,
'copyright_holders': copyright_holders
}
}
elif license_year:
attributes = {
'node_license': {
'year': license_year
}
}
elif copyright_holders:
attributes = {
'node_license': {
'copyright_holders': copyright_holders
}
}
return {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes,
'relationships': {
'license': {
'data': {
'type': 'licenses',
'id': license_id
}
}
}
}
} if license_id else {
'data': {
'type': 'nodes',
'id': node_id,
'attributes': attributes
}
}
return payload
@pytest.fixture()
def make_request(self, app):
def request(url, data, auth=None, expect_errors=False):
return app.patch_json_api(
url, data, auth=auth, expect_errors=expect_errors)
return request
def test_admin_update_license_with_invalid_id(
self, user_admin_contrib, node, make_payload,
make_request, url_node):
data = make_payload(
node_id=node._id,
license_id='thisisafakelicenseid'
)
assert node.node_license is None
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'Unable to find specified license.'
node.reload()
assert node.node_license is None
def test_admin_can_update_license(
self, user_admin_contrib, node,
make_payload, make_request,
license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year is None
assert node.node_license.copyright_holders == []
def test_admin_can_update_license_record(
self, user_admin_contrib, node,
make_payload, make_request,
license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
assert node.node_license is None
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_cannot_update(
self, user_write_contrib, user_read_contrib,
user_non_contrib, node, make_payload,
make_request, license_cc0, url_node):
# def test_rw_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_write_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_read_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_read_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_non_contributor_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(
url_node, data,
auth=user_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == exceptions.PermissionDenied.default_detail
# def test_unauthenticated_user_cannot_update_license(self):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, expect_errors=True)
assert res.status_code == 401
assert res.json['errors'][0]['detail'] == exceptions.NotAuthenticated.default_detail
def test_update_node_with_existing_license_year_attribute_only(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_year='2015'
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_copyright_holders_attribute_only(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_with_existing_license_relationship_only(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
)
node.save()
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
def test_update_node_with_existing_license_relationship_and_attributes(
self, user_admin_contrib, node, make_payload, make_request,
license_no, license_cc0, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2014',
'copyrightHolders': ['Reason', 'Mr. E']
},
Auth(user_admin_contrib),
save=True
)
assert node.node_license.node_license == license_no
assert node.node_license.year == '2014'
assert node.node_license.copyright_holders == ['Reason', 'Mr. E']
data = make_payload(
node_id=node._id,
license_id=license_cc0._id,
license_year='2015',
copyright_holders=['Mr. Monument', 'Princess OSF']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.node_license.reload()
assert node.node_license.node_license == license_cc0
assert node.node_license.year == '2015'
assert node.node_license.copyright_holders == [
'Mr. Monument', 'Princess OSF']
def test_update_node_license_without_required_year_in_payload(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
copyright_holders=['Rick', 'Morty']
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'year must be specified for this license'
def test_update_node_license_without_required_copyright_holders_in_payload_(
self, user_admin_contrib, node, make_payload, make_request, license_no, url_node):
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='1994'
)
res = make_request(
url_node, data,
auth=user_admin_contrib.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'copyrightHolders must be specified for this license'
def test_update_node_license_adds_log(
self, user_admin_contrib, node, make_payload,
make_request, license_cc0, url_node):
data = make_payload(
node_id=node._id,
license_id=license_cc0._id
)
logs_before_update = node.logs.count()
res = make_request(url_node, data, auth=user_admin_contrib.auth)
assert res.status_code == 200
node.reload()
logs_after_update = node.logs.count()
assert logs_before_update != logs_after_update
assert node.logs.latest().action == 'license_changed'
def test_update_node_license_without_change_does_not_add_log(
self, user_admin_contrib, node, make_payload,
make_request, license_no, url_node):
node.set_node_license(
{
'id': license_no.license_id,
'year': '2015',
'copyrightHolders': ['Kim', 'Kanye']
},
auth=Auth(user_admin_contrib),
save=True
)
before_num_logs = node.logs.count()
before_update_log = node.logs.latest()
data = make_payload(
node_id=node._id,
license_id=license_no._id,
license_year='2015',
copyright_holders=['Kanye', 'Kim']
)
res = make_request(url_node, data, auth=user_admin_contrib.auth)
node.reload()
after_num_logs = node.logs.count()
after_update_log = node.logs.latest()
assert res.status_code == 200
assert before_num_logs == after_num_logs
assert before_update_log._id == after_update_log._id
| chennan47/osf.io | api_tests/nodes/views/test_node_detail.py | Python | apache-2.0 | 75,427 |
example_template = Template({
'A': RsrcDef({}, []),
'B': RsrcDef({}, []),
'C': RsrcDef({'a': '4alpha'}, ['A', 'B']),
'D': RsrcDef({'c': GetRes('C')}, []),
'E': RsrcDef({'ca': GetAtt('C', 'a')}, []),
})
engine.create_stack('foo', example_template)
engine.noop(3)
engine.rollback_stack('foo')
engine.noop(6)
engine.call(verify, Template())
| zaneb/heat-convergence-prototype | scenarios/basic_create_rollback.py | Python | apache-2.0 | 358 |
"""
Helper classes for creating frontend metadata
"""
class ContactPersonDesc(object):
"""
Description class for a contact person
"""
def __init__(self):
self.contact_type = None
self._email_address = []
self.given_name = None
self.sur_name = None
def add_email_address(self, address):
"""
Adds an email address to the person description
:type address: str
:param address: Address to be added
"""
self._email_address.append(address)
def to_dict(self):
"""
Returns a dictionary representation of the ContactPersonDesc.
The format is the same as a pysaml2 configuration for a contact person.
:rtype: dict[str, str]
:return: A dictionary representation
"""
person = {}
if self.contact_type:
person["contact_type"] = self.contact_type
if self._email_address:
person["email_address"] = self._email_address
if self.given_name:
person["given_name"] = self.given_name
if self.sur_name:
person["sur_name"] = self.sur_name
return person
class UIInfoDesc(object):
"""
Description class for UI info
"""
def __init__(self):
self._description = []
self._display_name = []
self._logos = []
def add_description(self, text, lang):
"""
Binds a description to the given language
:type text: str
:type lang: str
:param text: Description
:param lang: description language
"""
self._description.append({"text": text, "lang": lang})
def add_display_name(self, text, lang):
"""
Binds a display name to the given language
:type text: str
:type lang: str
:param text: Display name
:param lang: Language
"""
self._display_name.append({"text": text, "lang": lang})
def add_logo(self, text, width, height, lang=None):
"""
Binds a logo to the given language
:type text: str
:type width: str
:type height: str
:type lang: Optional[str]
:param text: Path to logo
:param width: width of logo
:param height: height of logo
:param lang: language
"""
logo_entry ={"text": text, "width": width, "height": height}
if lang:
logo_entry["lang"] = lang
self._logos.append(logo_entry)
def to_dict(self):
"""
Returns a dictionary representation of the UIInfoDesc object.
The format is the same as a pysaml2 configuration for ui info.
:rtype: dict[str, str]
:return: A dictionary representation
"""
ui_info = {}
if self._description:
ui_info["description"] = self._description
if self._display_name:
ui_info["display_name"] = self._display_name
if self._logos:
ui_info["logo"] = self._logos
return {"service": {"idp": {"ui_info": ui_info}}} if ui_info else {}
class OrganizationDesc(object):
"""
Description class for an organization
"""
def __init__(self):
self._display_name = []
self._name = []
self._url = []
def add_display_name(self, name, lang):
"""
Binds a display name to the given language
:type name: str
:type lang: str
:param name: display name
:param lang: language
"""
self._display_name.append((name, lang))
def add_name(self, name, lang):
"""
Binds a name to the given language
:type name: str
:type lang: str
:param name: Name of the organization
:param lang: language
"""
self._name.append((name, lang))
def add_url(self, url, lang):
"""
Binds an url to the given language
:type url: str
:type lang: str
:param url: url to bind
:param lang: language
"""
self._url.append((url, lang))
def to_dict(self):
"""
Returns a dictionary representation of the OrganizationDesc object.
The format is the same as a pysaml2 configuration for organization.
:rtype: dict[str, str]
:return: A dictionary representation
"""
org = {}
if self._display_name:
org["display_name"] = self._display_name
if self._name:
org["name"] = self._name
if self._url:
org["url"] = self._url
return {"organization": org} if org else {}
class MetadataDescription(object):
"""
Description class for a backend module
"""
def __init__(self, entity_id):
self.entity_id = entity_id
self._organization = None
self._contact_person = []
self._ui_info = None
def organization(self, organization):
"""
Set an organization to the description
:type organization: satosa.metadata_creation.description.OrganizationDesc
:param organization: Organization description
"""
if not isinstance(organization, OrganizationDesc):
raise TypeError("organization must be of type OrganizationDesc")
self._organization = organization
organization = property(None, organization)
def add_contact_person(self, person):
"""
Adds a contact person to the description
:type person: satosa.metadata_creation.description.ContactPersonDesc
:param person: The contact person to be added
"""
if not isinstance(person, ContactPersonDesc):
raise TypeError("person must be of type ContactPersonDesc")
self._contact_person.append(person)
def ui_info(self, ui_info):
"""
Set an ui info to the description
:type ui_info: satosa.metadata_creation.description.UIInfoDesc
:param ui_info: The ui info to be set
"""
if not isinstance(ui_info, UIInfoDesc):
raise TypeError("ui_info must be of type UIInfoDesc")
self._ui_info = ui_info
ui_info = property(None, ui_info)
def to_dict(self):
"""
Returns a dictionary representation of the MetadataDescription object.
The format is the same as a pysaml2 configuration
:rtype: dict[str, Any]
:return: A dictionary representation
"""
description = {}
description["entityid"] = self.entity_id
if self._organization:
description.update(self._organization.to_dict())
if self._contact_person:
description['contact_person'] = []
for person in self._contact_person:
description['contact_person'].append(person.to_dict())
if self._ui_info:
description.update(self._ui_info.to_dict())
return description
| irtnog/SATOSA | src/satosa/metadata_creation/description.py | Python | apache-2.0 | 6,948 |
# Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Any, Dict, Text
CRASHLYTICS_SUBJECT_TEMPLATE = '{display_id}: {title}'
CRASHLYTICS_MESSAGE_TEMPLATE = '[Issue]({url}) impacts at least {impacted_devices_count} device(s).'
CRASHLYTICS_SETUP_SUBJECT_TEMPLATE = "Setup"
CRASHLYTICS_SETUP_MESSAGE_TEMPLATE = "Webhook has been successfully configured."
VERIFICATION_EVENT = 'verification'
@api_key_only_webhook_view('Crashlytics')
@has_request_variables
def api_crashlytics_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='crashlytics')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], Text) -> HttpResponse
try:
event = payload['event']
if event == VERIFICATION_EVENT:
subject = CRASHLYTICS_SETUP_SUBJECT_TEMPLATE
body = CRASHLYTICS_SETUP_MESSAGE_TEMPLATE
else:
issue_body = payload['payload']
subject = CRASHLYTICS_SUBJECT_TEMPLATE.format(
display_id=issue_body['display_id'],
title=issue_body['title']
)
body = CRASHLYTICS_MESSAGE_TEMPLATE.format(
impacted_devices_count=issue_body['impacted_devices_count'],
url=issue_body['url']
)
except KeyError as e:
return json_error(_("Missing key {} in JSON".format(str(e))))
check_send_message(user_profile, client, 'stream', [stream],
subject, body)
return json_success()
| SmartPeople/zulip | zerver/webhooks/crashlytics/view.py | Python | apache-2.0 | 1,910 |
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ctypes
from ctypes import wintypes
import os
import re
import struct
import time
from oslo_log import log as oslo_logging
import six
from six.moves import winreg
from tzlocal import windows_tz
from win32com import client
import win32net
import win32netcon
import win32process
import win32security
import wmi
from cloudbaseinit import exception
from cloudbaseinit.osutils import base
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import network
from cloudbaseinit.utils.windows import privilege
from cloudbaseinit.utils.windows import timezone
LOG = oslo_logging.getLogger(__name__)
AF_INET6 = 23
UNICAST = 1
MANUAL = 1
PREFERRED_ADDR = 4
advapi32 = ctypes.windll.advapi32
kernel32 = ctypes.windll.kernel32
netapi32 = ctypes.windll.netapi32
userenv = ctypes.windll.userenv
iphlpapi = ctypes.windll.iphlpapi
Ws2_32 = ctypes.windll.Ws2_32
setupapi = ctypes.windll.setupapi
msvcrt = ctypes.cdll.msvcrt
ntdll = ctypes.windll.ntdll
class Win32_PROFILEINFO(ctypes.Structure):
_fields_ = [
('dwSize', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('lpUserName', wintypes.LPWSTR),
('lpProfilePath', wintypes.LPWSTR),
('lpDefaultPath', wintypes.LPWSTR),
('lpServerName', wintypes.LPWSTR),
('lpPolicyPath', wintypes.LPWSTR),
('hprofile', wintypes.HANDLE)
]
class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure):
_fields_ = [
('lgrmi3_domainandname', wintypes.LPWSTR)
]
class Win32_MIB_IPFORWARDROW(ctypes.Structure):
_fields_ = [
('dwForwardDest', wintypes.DWORD),
('dwForwardMask', wintypes.DWORD),
('dwForwardPolicy', wintypes.DWORD),
('dwForwardNextHop', wintypes.DWORD),
('dwForwardIfIndex', wintypes.DWORD),
('dwForwardType', wintypes.DWORD),
('dwForwardProto', wintypes.DWORD),
('dwForwardAge', wintypes.DWORD),
('dwForwardNextHopAS', wintypes.DWORD),
('dwForwardMetric1', wintypes.DWORD),
('dwForwardMetric2', wintypes.DWORD),
('dwForwardMetric3', wintypes.DWORD),
('dwForwardMetric4', wintypes.DWORD),
('dwForwardMetric5', wintypes.DWORD)
]
class Win32_MIB_IPFORWARDTABLE(ctypes.Structure):
_fields_ = [
('dwNumEntries', wintypes.DWORD),
('table', Win32_MIB_IPFORWARDROW * 1)
]
class Win32_OSVERSIONINFOEX_W(ctypes.Structure):
_fields_ = [
('dwOSVersionInfoSize', wintypes.DWORD),
('dwMajorVersion', wintypes.DWORD),
('dwMinorVersion', wintypes.DWORD),
('dwBuildNumber', wintypes.DWORD),
('dwPlatformId', wintypes.DWORD),
('szCSDVersion', wintypes.WCHAR * 128),
('wServicePackMajor', wintypes.WORD),
('wServicePackMinor', wintypes.WORD),
('wSuiteMask', wintypes.WORD),
('wProductType', wintypes.BYTE),
('wReserved', wintypes.BYTE)
]
class Win32_SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('InterfaceClassGuid', disk.GUID),
('Flags', wintypes.DWORD),
('Reserved', ctypes.POINTER(wintypes.ULONG))
]
class Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('DevicePath', ctypes.c_byte * 2)
]
class Win32_STORAGE_DEVICE_NUMBER(ctypes.Structure):
_fields_ = [
('DeviceType', wintypes.DWORD),
('DeviceNumber', wintypes.DWORD),
('PartitionNumber', wintypes.DWORD)
]
msvcrt.malloc.argtypes = [ctypes.c_size_t]
msvcrt.malloc.restype = ctypes.c_void_p
msvcrt.free.argtypes = [ctypes.c_void_p]
msvcrt.free.restype = None
ntdll.RtlVerifyVersionInfo.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W),
wintypes.DWORD, wintypes.ULARGE_INTEGER]
ntdll.RtlVerifyVersionInfo.restype = wintypes.DWORD
kernel32.VerSetConditionMask.argtypes = [wintypes.ULARGE_INTEGER,
wintypes.DWORD,
wintypes.BYTE]
kernel32.VerSetConditionMask.restype = wintypes.ULARGE_INTEGER
kernel32.SetComputerNameExW.argtypes = [ctypes.c_int, wintypes.LPCWSTR]
kernel32.SetComputerNameExW.restype = wintypes.BOOL
kernel32.GetLogicalDriveStringsW.argtypes = [wintypes.DWORD, wintypes.LPWSTR]
kernel32.GetLogicalDriveStringsW.restype = wintypes.DWORD
kernel32.GetDriveTypeW.argtypes = [wintypes.LPCWSTR]
kernel32.GetDriveTypeW.restype = wintypes.UINT
kernel32.CreateFileW.argtypes = [wintypes.LPCWSTR, wintypes.DWORD,
wintypes.DWORD, wintypes.LPVOID,
wintypes.DWORD, wintypes.DWORD,
wintypes.HANDLE]
kernel32.CreateFileW.restype = wintypes.HANDLE
kernel32.DeviceIoControl.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
kernel32.DeviceIoControl.restype = wintypes.BOOL
kernel32.GetProcessHeap.argtypes = []
kernel32.GetProcessHeap.restype = wintypes.HANDLE
kernel32.HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD,
ctypes.c_size_t]
kernel32.HeapAlloc.restype = wintypes.LPVOID
kernel32.HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID]
kernel32.HeapFree.restype = wintypes.BOOL
iphlpapi.GetIpForwardTable.argtypes = [
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE),
ctypes.POINTER(wintypes.ULONG),
wintypes.BOOL]
iphlpapi.GetIpForwardTable.restype = wintypes.DWORD
Ws2_32.inet_ntoa.restype = ctypes.c_char_p
setupapi.SetupDiGetClassDevsW.argtypes = [ctypes.POINTER(disk.GUID),
wintypes.LPCWSTR,
wintypes.HANDLE,
wintypes.DWORD]
setupapi.SetupDiGetClassDevsW.restype = wintypes.HANDLE
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
ctypes.POINTER(disk.GUID),
wintypes.DWORD,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA)]
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailW.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W),
wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
setupapi.SetupDiGetDeviceInterfaceDetailW.restype = wintypes.BOOL
setupapi.SetupDiDestroyDeviceInfoList.argtypes = [wintypes.HANDLE]
setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL
VER_MAJORVERSION = 1
VER_MINORVERSION = 2
VER_BUILDNUMBER = 4
VER_GREATER_EQUAL = 3
GUID_DEVINTERFACE_DISK = disk.GUID(0x53f56307, 0xb6bf, 0x11d0, 0x94, 0xf2,
0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
class WindowsUtils(base.BaseOSUtils):
NERR_GroupNotFound = 2220
NERR_UserNotFound = 2221
ERROR_ACCESS_DENIED = 5
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_NO_DATA = 232
ERROR_NO_SUCH_MEMBER = 1387
ERROR_MEMBER_IN_ALIAS = 1378
ERROR_INVALID_MEMBER = 1388
ERROR_NO_MORE_FILES = 18
STATUS_REVISION_MISMATCH = 0xC0000059
ADS_UF_PASSWORD_EXPIRED = 0x800000
PASSWORD_CHANGED_FLAG = 1
INVALID_HANDLE_VALUE = 0xFFFFFFFF
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
OPEN_EXISTING = 3
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x002D1080
MAX_PATH = 260
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 0x10
DRIVE_CDROM = 5
SERVICE_STATUS_STOPPED = "Stopped"
SERVICE_STATUS_START_PENDING = "Start Pending"
SERVICE_STATUS_STOP_PENDING = "Stop Pending"
SERVICE_STATUS_RUNNING = "Running"
SERVICE_STATUS_CONTINUE_PENDING = "Continue Pending"
SERVICE_STATUS_PAUSE_PENDING = "Pause Pending"
SERVICE_STATUS_PAUSED = "Paused"
SERVICE_STATUS_UNKNOWN = "Unknown"
SERVICE_START_MODE_AUTOMATIC = "Automatic"
SERVICE_START_MODE_MANUAL = "Manual"
SERVICE_START_MODE_DISABLED = "Disabled"
ComputerNamePhysicalDnsHostname = 5
_config_key = 'SOFTWARE\\Cloudbase Solutions\\Cloudbase-Init\\'
_service_name = 'cloudbase-init'
_FW_IP_PROTOCOL_TCP = 6
_FW_IP_PROTOCOL_UDP = 17
_FW_SCOPE_ALL = 0
_FW_SCOPE_LOCAL_SUBNET = 1
def reboot(self):
with privilege.acquire_privilege(win32security.SE_SHUTDOWN_NAME):
ret_val = advapi32.InitiateSystemShutdownExW(
0, "Cloudbase-Init reboot",
0, True, True, 0)
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Reboot failed: %r")
def user_exists(self, username):
try:
self._get_user_info(username, 1)
return True
except exception.ItemNotFoundException:
# User not found
return False
def create_user(self, username, password, password_expires=False):
user_info = {
"name": username,
"password": password,
"priv": win32netcon.USER_PRIV_USER,
"flags": win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT,
}
if not password_expires:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create user failed: %s" % ex.args[2])
def _get_user_info(self, username, level):
try:
return win32net.NetUserGetInfo(None, username, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Failed to get user info: %s" % ex.args[2])
def set_user_password(self, username, password, password_expires=False):
user_info = self._get_user_info(username, 1)
user_info["password"] = password
if password_expires:
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserSetInfo(None, username, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Set user password failed: %s" % ex.args[2])
def change_password_next_logon(self, username):
"""Force the given user to change the password at next logon."""
user_info = self._get_user_info(username, 4)
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
user_info["password_expired"] = 1
try:
win32net.NetUserSetInfo(None, username, 4, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Setting password expiration failed: %s" % ex.args[2])
@staticmethod
def _get_cch_referenced_domain_name(domain_name):
return wintypes.DWORD(
ctypes.sizeof(domain_name) // ctypes.sizeof(wintypes.WCHAR))
def _get_user_sid_and_domain(self, username):
sid = ctypes.create_string_buffer(1024)
cbSid = wintypes.DWORD(ctypes.sizeof(sid))
domainName = ctypes.create_unicode_buffer(1024)
cchReferencedDomainName = self._get_cch_referenced_domain_name(
domainName)
sidNameUse = wintypes.DWORD()
ret_val = advapi32.LookupAccountNameW(
0, six.text_type(username), sid, ctypes.byref(cbSid), domainName,
ctypes.byref(cchReferencedDomainName), ctypes.byref(sidNameUse))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot get user SID: %r")
return sid, domainName.value
def add_user_to_local_group(self, username, groupname):
lmi = Win32_LOCALGROUP_MEMBERS_INFO_3()
lmi.lgrmi3_domainandname = six.text_type(username)
ret_val = netapi32.NetLocalGroupAddMembers(0, six.text_type(groupname),
3, ctypes.addressof(lmi), 1)
if ret_val == self.NERR_GroupNotFound:
raise exception.CloudbaseInitException('Group not found')
elif ret_val == self.ERROR_ACCESS_DENIED:
raise exception.CloudbaseInitException('Access denied')
elif ret_val == self.ERROR_NO_SUCH_MEMBER:
raise exception.CloudbaseInitException('Username not found')
elif ret_val == self.ERROR_MEMBER_IN_ALIAS:
# The user is already a member of the group
pass
elif ret_val == self.ERROR_INVALID_MEMBER:
raise exception.CloudbaseInitException('Invalid user')
elif ret_val != 0:
raise exception.CloudbaseInitException('Unknown error')
def get_user_sid(self, username):
try:
user_info = self._get_user_info(username, 4)
return str(user_info["user_sid"])[6:]
except exception.ItemNotFoundException:
# User not found
pass
def create_user_logon_session(self, username, password, domain='.',
load_profile=True):
token = wintypes.HANDLE()
ret_val = advapi32.LogonUserW(six.text_type(username),
six.text_type(domain),
six.text_type(password), 2, 0,
ctypes.byref(token))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"User logon failed: %r")
if load_profile:
pi = Win32_PROFILEINFO()
pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO)
pi.lpUserName = six.text_type(username)
ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi))
if not ret_val:
kernel32.CloseHandle(token)
raise exception.WindowsCloudbaseInitException(
"Cannot load user profile: %r")
return token
def close_user_logon_session(self, token):
kernel32.CloseHandle(token)
def get_user_home(self, username):
user_sid = self.get_user_sid(username)
if user_sid:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\%s' % user_sid) as key:
return winreg.QueryValueEx(key, 'ProfileImagePath')[0]
LOG.debug('Home directory not found for user %r', username)
return None
def sanitize_shell_input(self, value):
return value.replace('"', '\\"')
def set_host_name(self, new_host_name):
ret_val = kernel32.SetComputerNameExW(
self.ComputerNamePhysicalDnsHostname,
six.text_type(new_host_name))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot set host name: %r")
return True
def get_network_adapters(self):
"""Return available adapters as a list of tuples of (name, mac)."""
conn = wmi.WMI(moniker='//./root/cimv2')
# Get Ethernet adapters only
wql = ('SELECT * FROM Win32_NetworkAdapter WHERE '
'AdapterTypeId = 0 AND MACAddress IS NOT NULL')
if self.check_os_version(6, 0):
wql += ' AND PhysicalAdapter = True'
q = conn.query(wql)
return [(r.Name, r.MACAddress) for r in q]
def get_dhcp_hosts_in_use(self):
dhcp_hosts = []
for net_addr in network.get_adapter_addresses():
if net_addr["dhcp_enabled"] and net_addr["dhcp_server"]:
dhcp_hosts.append((net_addr["mac_address"],
net_addr["dhcp_server"]))
return dhcp_hosts
def set_ntp_client_config(self, ntp_hosts):
base_dir = self._get_system_dir()
w32tm_path = os.path.join(base_dir, "w32tm.exe")
# Convert the NTP hosts list to a string, in order to pass
# it to w32tm.
ntp_hosts = ",".join(ntp_hosts)
args = [w32tm_path, '/config', '/manualpeerlist:%s' % ntp_hosts,
'/syncfromflags:manual', '/update']
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'w32tm failed to configure NTP.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def set_network_adapter_mtu(self, mac_address, mtu):
if not self.check_os_version(6, 0):
raise exception.CloudbaseInitException(
'Setting the MTU is currently not supported on Windows XP '
'and Windows Server 2003')
iface_index_list = [
net_addr["interface_index"] for net_addr
in network.get_adapter_addresses()
if net_addr["mac_address"] == mac_address]
if not iface_index_list:
raise exception.CloudbaseInitException(
'Network interface with MAC address "%s" not found' %
mac_address)
else:
iface_index = iface_index_list[0]
LOG.debug('Setting MTU for interface "%(mac_address)s" with '
'value "%(mtu)s"',
{'mac_address': mac_address, 'mtu': mtu})
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "ipv4", "set", "subinterface",
str(iface_index), "mtu=%s" % mtu,
"store=persistent"]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Setting MTU for interface "%(mac_address)s" with '
'value "%(mtu)s" failed' % {'mac_address': mac_address,
'mtu': mtu})
def set_static_network_config(self, mac_address, address, netmask,
broadcast, gateway, dnsnameservers):
conn = wmi.WMI(moniker='//./root/cimv2')
query = conn.query("SELECT * FROM Win32_NetworkAdapter WHERE "
"MACAddress = '{}'".format(mac_address))
if not len(query):
raise exception.CloudbaseInitException(
"Network adapter not found")
adapter_config = query[0].associators(
wmi_result_class='Win32_NetworkAdapterConfiguration')[0]
LOG.debug("Setting static IP address")
(ret_val,) = adapter_config.EnableStatic([address], [netmask])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set static IP address on network adapter (%d)",
ret_val)
reboot_required = (ret_val == 1)
if gateway:
LOG.debug("Setting static gateways")
(ret_val,) = adapter_config.SetGateways([gateway], [1])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set gateway on network adapter (%d)",
ret_val)
reboot_required = reboot_required or ret_val == 1
if dnsnameservers:
LOG.debug("Setting static DNS servers")
(ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers)
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set DNS on network adapter (%d)",
ret_val)
reboot_required = reboot_required or ret_val == 1
return reboot_required
def set_static_network_config_v6(self, mac_address, address6,
netmask6, gateway6):
"""Set IPv6 info for a network card."""
# Get local properties by MAC identification.
adapters = network.get_adapter_addresses()
for adapter in adapters:
if mac_address == adapter["mac_address"]:
ifname = adapter["friendly_name"]
ifindex = adapter["interface_index"]
break
else:
raise exception.CloudbaseInitException(
"Adapter with MAC {!r} not available".format(mac_address))
# TODO(cpoieana): Extend support for other platforms.
# Currently windows8 @ ws2012 or above.
if not self.check_os_version(6, 2):
LOG.warning("Setting IPv6 info not available "
"on this system")
return
conn = wmi.WMI(moniker='//./root/StandardCimv2')
query = conn.query("SELECT * FROM MSFT_NetIPAddress "
"WHERE InterfaceAlias = '{}'".format(ifname))
netip = query[0]
params = {
"InterfaceIndex": ifindex,
"InterfaceAlias": ifname,
"IPAddress": address6,
"AddressFamily": AF_INET6,
"PrefixLength": netmask6,
# Manual set type.
"Type": UNICAST,
"PrefixOrigin": MANUAL,
"SuffixOrigin": MANUAL,
"AddressState": PREFERRED_ADDR,
# No expiry.
"ValidLifetime": None,
"PreferredLifetime": None,
"SkipAsSource": False,
"DefaultGateway": gateway6,
"PolicyStore": None,
"PassThru": False,
}
LOG.debug("Setting IPv6 info for %s", ifname)
try:
netip.Create(**params)
except wmi.x_wmi as exc:
raise exception.CloudbaseInitException(exc.com_error)
def _get_config_key_name(self, section):
key_name = self._config_key
if section:
key_name += section.replace('/', '\\') + '\\'
return key_name
def set_config_value(self, name, value, section=None):
key_name = self._get_config_key_name(section)
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
if type(value) == int:
regtype = winreg.REG_DWORD
else:
regtype = winreg.REG_SZ
winreg.SetValueEx(key, name, 0, regtype, value)
def get_config_value(self, name, section=None):
key_name = self._get_config_key_name(section)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
(value, regtype) = winreg.QueryValueEx(key, name)
return value
except WindowsError:
return None
def wait_for_boot_completion(self):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\Setup\\Status\\SysprepStatus", 0,
winreg.KEY_READ) as key:
while True:
gen_state = winreg.QueryValueEx(key,
"GeneralizationState")[0]
if gen_state == 7:
break
time.sleep(1)
LOG.info('Waiting for sysprep completion. '
'GeneralizationState: %d', gen_state)
except WindowsError as ex:
if ex.winerror == 2:
LOG.debug('Sysprep data not found in the registry, '
'skipping sysprep completion check.')
else:
raise ex
def _get_service(self, service_name):
conn = wmi.WMI(moniker='//./root/cimv2')
service_list = conn.Win32_Service(Name=service_name)
if len(service_list):
return service_list[0]
def check_service_exists(self, service_name):
return self._get_service(service_name) is not None
def get_service_status(self, service_name):
service = self._get_service(service_name)
return service.State
def get_service_start_mode(self, service_name):
service = self._get_service(service_name)
return service.StartMode
def set_service_start_mode(self, service_name, start_mode):
# TODO(alexpilotti): Handle the "Delayed Start" case
service = self._get_service(service_name)
(ret_val,) = service.ChangeStartMode(start_mode)
if ret_val != 0:
raise exception.CloudbaseInitException(
'Setting service %(service_name)s start mode failed with '
'return value: %(ret_val)d' % {'service_name': service_name,
'ret_val': ret_val})
def start_service(self, service_name):
LOG.debug('Starting service %s', service_name)
service = self._get_service(service_name)
(ret_val,) = service.StartService()
if ret_val != 0:
raise exception.CloudbaseInitException(
'Starting service %(service_name)s failed with return value: '
'%(ret_val)d' % {'service_name': service_name,
'ret_val': ret_val})
def stop_service(self, service_name):
LOG.debug('Stopping service %s', service_name)
service = self._get_service(service_name)
(ret_val,) = service.StopService()
if ret_val != 0:
raise exception.CloudbaseInitException(
'Stopping service %(service_name)s failed with return value:'
' %(ret_val)d' % {'service_name': service_name,
'ret_val': ret_val})
def terminate(self):
# Wait for the service to start. Polling the service "Started" property
# is not enough
time.sleep(3)
self.stop_service(self._service_name)
def get_default_gateway(self):
default_routes = [r for r in self._get_ipv4_routing_table()
if r[0] == '0.0.0.0']
if default_routes:
return default_routes[0][3], default_routes[0][2]
else:
return None, None
@staticmethod
def _heap_alloc(heap, size):
table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value))
if not table_mem:
raise exception.CloudbaseInitException(
'Unable to allocate memory for the IP forward table')
return table_mem
@contextlib.contextmanager
def _get_forward_table(self):
heap = kernel32.GetProcessHeap()
forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE)
size = wintypes.ULONG(forward_table_size)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
try:
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err == self.ERROR_INSUFFICIENT_BUFFER:
kernel32.HeapFree(heap, 0, p_forward_table)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem,
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err and err != kernel32.ERROR_NO_DATA:
raise exception.CloudbaseInitException(
'Unable to get IP forward table. Error: %s' % err)
yield p_forward_table
finally:
kernel32.HeapFree(heap, 0, p_forward_table)
def _get_ipv4_routing_table(self):
routing_table = []
with self._get_forward_table() as p_forward_table:
forward_table = p_forward_table.contents
table = ctypes.cast(
ctypes.addressof(forward_table.table),
ctypes.POINTER(Win32_MIB_IPFORWARDROW *
forward_table.dwNumEntries)).contents
for row in table:
destination = Ws2_32.inet_ntoa(
row.dwForwardDest).decode()
netmask = Ws2_32.inet_ntoa(
row.dwForwardMask).decode()
gateway = Ws2_32.inet_ntoa(
row.dwForwardNextHop).decode()
routing_table.append((
destination,
netmask,
gateway,
row.dwForwardIfIndex,
row.dwForwardMetric1))
return routing_table
def check_static_route_exists(self, destination):
return len([r for r in self._get_ipv4_routing_table()
if r[0] == destination]) > 0
def add_static_route(self, destination, mask, next_hop, interface_index,
metric):
args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop]
(out, err, ret_val) = self.execute_process(args)
# Cannot use the return value to determine the outcome
if ret_val or err:
raise exception.CloudbaseInitException(
'Unable to add route: %s' % err)
def check_os_version(self, major, minor, build=0):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
vi.dwMajorVersion = major
vi.dwMinorVersion = minor
vi.dwBuildNumber = build
mask = 0
for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]:
mask = kernel32.VerSetConditionMask(mask, type_mask,
VER_GREATER_EQUAL)
type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER
ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask)
if not ret_val:
return True
elif ret_val == self.STATUS_REVISION_MISMATCH:
return False
else:
raise exception.CloudbaseInitException(
"RtlVerifyVersionInfo failed with error: %s" % ret_val)
def get_volume_label(self, drive):
max_label_size = 261
label = ctypes.create_unicode_buffer(max_label_size)
ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label,
max_label_size, 0, 0, 0, 0, 0)
if ret_val:
return label.value
def generate_random_password(self, length):
while True:
pwd = super(WindowsUtils, self).generate_random_password(length)
# Make sure that the Windows complexity requirements are met:
# http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx
valid = True
for r in ["[a-z]", "[A-Z]", "[0-9]"]:
if not re.search(r, pwd):
valid = False
if valid:
return pwd
def _split_str_buf_list(self, buf, buf_len):
i = 0
value = ''
values = []
while i < buf_len:
c = buf[i]
if c != '\x00':
value += c
else:
values.append(value)
value = ''
i += 1
return values
def _get_logical_drives(self):
buf_size = self.MAX_PATH
buf = ctypes.create_unicode_buffer(buf_size + 1)
buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf)
if not buf_len:
raise exception.WindowsCloudbaseInitException(
"GetLogicalDriveStringsW failed: %r")
return self._split_str_buf_list(buf, buf_len)
def get_cdrom_drives(self):
drives = self._get_logical_drives()
return [d for d in drives if kernel32.GetDriveTypeW(d) ==
self.DRIVE_CDROM]
def _is_64bit_arch(self):
# interpreter's bits
return struct.calcsize("P") == 8
def get_physical_disks(self):
physical_disks = []
disk_guid = GUID_DEVINTERFACE_DISK
handle_disks = setupapi.SetupDiGetClassDevsW(
ctypes.byref(disk_guid), None, None,
self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE)
if handle_disks == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
"SetupDiGetClassDevs failed")
try:
did = Win32_SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA)
index = 0
while setupapi.SetupDiEnumDeviceInterfaces(
handle_disks, None, ctypes.byref(disk_guid), index,
ctypes.byref(did)):
index += 1
handle_disk = self.INVALID_HANDLE_VALUE
required_size = wintypes.DWORD()
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), None, 0,
ctypes.byref(required_size), None):
if (kernel32.GetLastError() !=
self.ERROR_INSUFFICIENT_BUFFER):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
pdidd = ctypes.cast(
msvcrt.malloc(ctypes.c_size_t(required_size.value)),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W))
try:
pdidd.contents.cbSize = ctypes.sizeof(
Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)
if not self._is_64bit_arch():
# NOTE(cpoieana): For some reason, on x86 platforms
# the alignment or content of the struct
# is not taken into consideration.
pdidd.contents.cbSize = 6
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), pdidd,
required_size, None, None):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
device_path = ctypes.cast(
pdidd.contents.DevicePath, wintypes.LPWSTR).value
handle_disk = kernel32.CreateFileW(
device_path, 0, self.FILE_SHARE_READ,
None, self.OPEN_EXISTING, 0, 0)
if handle_disk == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
'CreateFileW failed')
sdn = Win32_STORAGE_DEVICE_NUMBER()
b = wintypes.DWORD()
if not kernel32.DeviceIoControl(
handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER,
None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn),
ctypes.byref(b), None):
raise exception.WindowsCloudbaseInitException(
'DeviceIoControl failed: %r')
physical_disks.append(
r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber)
finally:
msvcrt.free(pdidd)
if handle_disk != self.INVALID_HANDLE_VALUE:
kernel32.CloseHandle(handle_disk)
finally:
setupapi.SetupDiDestroyDeviceInfoList(handle_disks)
return physical_disks
def get_volumes(self):
"""Retrieve a list with all the volumes found on all disks."""
volumes = []
volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH)
handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH)
if handle_volumes == self.INVALID_HANDLE_VALUE:
raise exception.WindowsCloudbaseInitException(
"FindFirstVolumeW failed: %r")
try:
while True:
volumes.append(volume.value)
found = kernel32.FindNextVolumeW(handle_volumes, volume,
self.MAX_PATH)
if not found:
errno = ctypes.GetLastError()
if errno == self.ERROR_NO_MORE_FILES:
break
else:
raise exception.WindowsCloudbaseInitException(
"FindNextVolumeW failed: %r")
finally:
kernel32.FindVolumeClose(handle_volumes)
return volumes
def _get_fw_protocol(self, protocol):
if protocol == self.PROTOCOL_TCP:
fw_protocol = self._FW_IP_PROTOCOL_TCP
elif protocol == self.PROTOCOL_UDP:
fw_protocol = self._FW_IP_PROTOCOL_UDP
else:
raise NotImplementedError("Unsupported protocol")
return fw_protocol
def firewall_create_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_port = client.Dispatch("HNetCfg.FWOpenPort")
fw_port.Name = name
fw_port.Protocol = self._get_fw_protocol(protocol)
fw_port.Port = port
fw_port.Scope = self._FW_SCOPE_ALL
fw_port.Enabled = True
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_profile = fw_profile.GloballyOpenPorts.Add(fw_port)
def firewall_remove_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_protocol = self._get_fw_protocol(protocol)
fw_profile = fw_profile.GloballyOpenPorts.Remove(port, fw_protocol)
def is_wow64(self):
return win32process.IsWow64Process()
def get_system32_dir(self):
return os.path.expandvars('%windir%\\system32')
def get_syswow64_dir(self):
return os.path.expandvars('%windir%\\syswow64')
def get_sysnative_dir(self):
return os.path.expandvars('%windir%\\sysnative')
def check_sysnative_dir_exists(self):
sysnative_dir_exists = os.path.isdir(self.get_sysnative_dir())
if not sysnative_dir_exists and self.is_wow64():
LOG.warning('Unable to validate sysnative folder presence. '
'If Target OS is Server 2003 x64, please ensure '
'you have KB942589 installed')
return sysnative_dir_exists
def _get_system_dir(self, sysnative=True):
"""Return Windows system directory with compatibility support.
Depending on the interpreter bits and platform architecture,
the return value may vary between
C:\Windows\(System32|SysWOW64|Sysnative).
Note that "Sysnative" is just an alias (doesn't really exist on disk).
More info about this can be found in documentation.
"""
if sysnative and self.check_sysnative_dir_exists():
return self.get_sysnative_dir()
if not sysnative and self._is_64bit_arch():
return self.get_syswow64_dir()
return self.get_system32_dir()
def is_nano_server(self):
return self._check_server_level("NanoServer")
def _check_server_level(self, server_level):
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\"
"ServerLevels") as key:
return winreg.QueryValueEx(key, server_level)[0] == 1
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def execute_powershell_script(self, script_path, sysnative=True):
base_dir = self._get_system_dir(sysnative)
powershell_path = os.path.join(base_dir,
'WindowsPowerShell\\v1.0\\'
'powershell.exe')
args = [powershell_path]
if not self.is_nano_server():
args += ['-ExecutionPolicy', 'RemoteSigned', '-NonInteractive',
'-File']
args.append(script_path)
return self.execute_process(args, shell=False)
def execute_system32_process(self, args, shell=True, decode_output=False,
sysnative=True):
base_dir = self._get_system_dir(sysnative)
process_path = os.path.join(base_dir, args[0])
return self.execute_process([process_path] + args[1:],
decode_output=decode_output, shell=shell)
def get_maximum_password_length(self):
return 20
def set_timezone(self, timezone_name):
windows_name = windows_tz.tz_win.get(timezone_name)
if not windows_name:
raise exception.CloudbaseInitException(
"The given timezone name is unrecognised: %r" % timezone_name)
timezone.Timezone(windows_name).set(self)
| cmin764/cloudbase-init | cloudbaseinit/osutils/windows.py | Python | apache-2.0 | 42,295 |
# Copyright 2012 Kevin Ormbrek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from suds.sudsobject import Object as SudsObject
class _FactoryKeywords(object):
def set_wsdl_object_attribute(self, object, name, value):
"""Sets the attribute of a WSDL object.
Example:
| ${order search request}= | Create Wsdl Object | OrderSearchRequest | |
| Set Wsdl Object Attribute | ${order search request} | id | 4065 |
"""
self._assert_is_suds_object(object)
getattr(object, name)
setattr(object, name, value)
def get_wsdl_object_attribute(self, object, name):
"""Gets the attribute of a WSDL object.
Extendend variable syntax may be used to access attributes; however,
some WSDL objects may have attribute names that are illegal in Python,
necessitating this keyword.
Example:
| ${sale record}= | Call Soap Method | getLastSale | |
| ${price}= | Get Wsdl Object Attribute | ${sale record} | Price |
"""
self._assert_is_suds_object(object)
return getattr(object, name)
def create_wsdl_object(self, type, *name_value_pairs):
"""Creates a WSDL object of the specified `type`.
Requested `type` must be defined in the WSDL, in an import specified
by the WSDL, or with `Add Doctor Import`. `type` is case sensitive.
Example:
| ${contact}= | Create Wsdl Object | Contact | |
| Set Wsdl Object Attribute | ${contact} | Name | Kelly Newman |
Attribute values can be set by passing the attribute name and value in
pairs. This is equivalent to the two lines above:
| ${contact}= | Create Wsdl Object | Contact | Name | Kelly Newman |
"""
if len(name_value_pairs) % 2 != 0:
raise ValueError("Creating a WSDL object failed. There should be "
"an even number of name-value pairs.")
obj = self._client().factory.create(type)
for i in range(0, len(name_value_pairs), 2):
self.set_wsdl_object_attribute(obj, name_value_pairs[i], name_value_pairs[i + 1])
return obj
# private
def _assert_is_suds_object(self, object):
if not isinstance(object, SudsObject):
raise ValueError("Object must be a WSDL object (suds.sudsobject.Object).")
| ombre42/robotframework-sudslibrary | src/SudsLibrary/factory.py | Python | apache-2.0 | 2,975 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListTags
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_v1_generated_DataCatalog_ListTags_sync]
from google.cloud import datacatalog_v1
def sample_list_tags():
# Create a client
client = datacatalog_v1.DataCatalogClient()
# Initialize request argument(s)
request = datacatalog_v1.ListTagsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tags(request=request)
# Handle the response
for response in page_result:
print(response)
# [END datacatalog_v1_generated_DataCatalog_ListTags_sync]
| googleapis/python-datacatalog | samples/generated_samples/datacatalog_v1_generated_data_catalog_list_tags_sync.py | Python | apache-2.0 | 1,472 |
# -*- coding: utf-8 -*-
# Scrapy settings for DynamicItemsScrapy project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'DynamicItemsScrapy'
SPIDER_MODULES = ['DynamicItemsScrapy.spiders']
NEWSPIDER_MODULE = 'DynamicItemsScrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'DynamicItemsScrapy (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'DynamicItemsScrapy.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'DynamicItemsScrapy.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'DynamicItemsScrapy.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
| WilliamKinaan/ScrapyDynamicItems | DynamicItemsScrapy/DynamicItemsScrapy/settings.py | Python | apache-2.0 | 3,084 |
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import fixtures
from jsonschema import exceptions as jsonschema_exc
import six
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
from nova.api.validation import validators
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
query_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.single_param({'type': 'string',
'format': 'uuid'}),
'foos': parameter_types.multi_params({'type': 'string'})
},
'patternProperties': {
"^_": parameter_types.multi_params({'type': 'string'})},
'additionalProperties': True
}
class FakeQueryParametersController(object):
@validation.query_schema(query_schema, '2.3')
def get(self, req):
return list(set(req.GET.keys()))
class RegexFormatFakeController(object):
schema = {
'type': 'object',
'properties': {
'foo': {
'format': 'regex',
},
},
}
@validation.schema(request_body_schema=schema)
def post(self, req, body):
return 'Validation succeeded.'
class FakeRequest(object):
api_version_request = api_version.APIVersionRequest("2.1")
environ = {}
legacy_v2 = False
def is_legacy_v2(self):
return self.legacy_v2
class ValidationRegex(test.NoDBTestCase):
def test_build_regex_range(self):
# this is much easier to think about if we only use the ascii
# subset because it's a printable range we can think
# about. The algorithm works for all ranges.
def _get_all_chars():
for i in range(0x7F):
yield six.unichr(i)
self.useFixture(fixtures.MonkeyPatch(
'nova.api.validation.parameter_types._get_all_chars',
_get_all_chars))
# note that since we use only the ascii range in the tests
# we have to clear the cache to recompute them.
parameter_types._reset_cache()
r = parameter_types._build_regex_range(ws=False)
self.assertEqual(r, re.escape('!') + '-' + re.escape('~'))
# if we allow whitespace the range starts earlier
r = parameter_types._build_regex_range(ws=True)
self.assertEqual(r, re.escape(' ') + '-' + re.escape('~'))
# excluding a character will give us 2 ranges
r = parameter_types._build_regex_range(ws=True, exclude=['A'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + '-' + re.escape('~'))
# inverting which gives us all the initial unprintable characters.
r = parameter_types._build_regex_range(ws=False, invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape(' '))
# excluding characters that create a singleton. Naively this would be:
# ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.
r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])
self.assertEqual(r,
re.escape(' ') + '-' + re.escape('@') +
'B' + 'D' + '-' + re.escape('~'))
# ws=True means the positive regex has printable whitespaces,
# so the inverse will not. The inverse will include things we
# exclude.
r = parameter_types._build_regex_range(
ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)
self.assertEqual(r,
re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ')
class APIValidationTestCase(test.NoDBTestCase):
post_schema = None
def setUp(self):
super(APIValidationTestCase, self).setUp()
self.post = None
if self.post_schema is not None:
@validation.schema(request_body_schema=self.post_schema)
def post(req, body):
return 'Validation succeeded.'
self.post = post
def check_validation_error(self, method, body, expected_detail, req=None):
if not req:
req = FakeRequest()
try:
method(body=body, req=req)
except exception.ValidationError as ex:
self.assertEqual(400, ex.kwargs['code'])
if isinstance(expected_detail, list):
self.assertIn(ex.kwargs['detail'], expected_detail,
'Exception details did not match expected')
elif not re.match(expected_detail, ex.kwargs['detail']):
self.assertEqual(expected_detail, ex.kwargs['detail'],
'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
self.fail('Any exception does not happen.')
class FormatCheckerTestCase(test.NoDBTestCase):
def _format_checker(self, format, value, error_message):
format_checker = validators.FormatChecker()
exc = self.assertRaises(jsonschema_exc.FormatError,
format_checker.check, value, format)
self.assertIsInstance(exc.cause, exception.InvalidName)
self.assertEqual(error_message,
exc.cause.format_message())
def test_format_checker_failed_with_non_string_name(self):
error_message = ("An invalid 'name' value was provided. The name must "
"be: printable characters. "
"Can not start or end with whitespace.")
self._format_checker("name", " ", error_message)
self._format_checker("name", None, error_message)
def test_format_checker_failed_name_with_leading_trailing_spaces(self):
error_message = ("An invalid 'name' value was provided. "
"The name must be: printable characters with at "
"least one non space character")
self._format_checker("name_with_leading_trailing_spaces",
None, error_message)
class MicroversionsSchemaTestCase(APIValidationTestCase):
def setUp(self):
super(MicroversionsSchemaTestCase, self).setUp()
schema_v21_int = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
}
}
}
schema_v20_str = copy.deepcopy(schema_v21_int)
schema_v20_str['properties']['foo'] = {'type': 'string'}
@validation.schema(schema_v20_str, '2.0', '2.0')
@validation.schema(schema_v21_int, '2.1')
def post(req, body):
return 'Validation succeeded.'
self.post = post
def test_validate_v2compatible_request(self):
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual(self.post(body={'foo': 'bar'}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: 1. "
"1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail, req=req)
def test_validate_v21_request(self):
req = FakeRequest()
self.assertEqual(self.post(body={'foo': 1}, req=req),
'Validation succeeded.')
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail, req=req)
def test_validate_v2compatible_request_with_none_min_version(self):
schema_none = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer'
}
}
}
@validation.schema(schema_none)
def post(req, body):
return 'Validation succeeded.'
req = FakeRequest()
req.legacy_v2 = True
self.assertEqual('Validation succeeded.',
post(body={'foo': 1}, req=req))
detail = ("Invalid input for field/attribute foo. Value: bar. "
"'bar' is not of type 'integer'")
self.check_validation_error(post, body={'foo': 'bar'},
expected_detail=detail, req=req)
class QueryParamsSchemaTestCase(test.NoDBTestCase):
def setUp(self):
super(QueryParamsSchemaTestCase, self).setUp()
self.controller = FakeQueryParametersController()
def test_validate_request(self):
req = fakes.HTTPRequest.blank("/tests?foo=%s" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertEqual(['foo'], self.controller.get(req))
def test_validate_request_failed(self):
# parameter 'foo' expect a UUID
req = fakes.HTTPRequest.blank("/tests?foo=abc")
req.api_version_request = api_version.APIVersionRequest("2.3")
ex = self.assertRaises(exception.ValidationError, self.controller.get,
req)
if six.PY3:
self.assertEqual("Invalid input for query parameters foo. Value: "
"abc. 'abc' is not a 'uuid'", six.text_type(ex))
else:
self.assertEqual("Invalid input for query parameters foo. Value: "
"abc. u'abc' is not a 'uuid'", six.text_type(ex))
def test_validate_request_with_multiple_values(self):
req = fakes.HTTPRequest.blank("/tests?foos=abc")
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertEqual(['foos'], self.controller.get(req))
req = fakes.HTTPRequest.blank("/tests?foos=abc&foos=def")
self.assertEqual(['foos'], self.controller.get(req))
def test_validate_request_with_multiple_values_fails(self):
req = fakes.HTTPRequest.blank(
"/tests?foo=%s&foo=%s" % (fakes.FAKE_UUID, fakes.FAKE_UUID))
req.api_version_request = api_version.APIVersionRequest("2.3")
self.assertRaises(exception.ValidationError, self.controller.get, req)
def test_validate_request_unicode_decode_failure(self):
req = fakes.HTTPRequest.blank("/tests?foo=%88")
req.api_version_request = api_version.APIVersionRequest("2.1")
ex = self.assertRaises(
exception.ValidationError, self.controller.get, req)
self.assertIn("Query string is not UTF-8 encoded", six.text_type(ex))
def test_strip_out_additional_properties(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&-bar=456" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
res = self.controller.get(req)
res.sort()
self.assertEqual(['foo', 'foos'], res)
def test_no_strip_out_additional_properties_when_not_match_version(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&bar=456" % fakes.FAKE_UUID)
# The JSON-schema matches to the API version 2.3 and above. Request
# with version 2.1 to ensure there isn't no strip out for additional
# parameters when schema didn't match the request version.
req.api_version_request = api_version.APIVersionRequest("2.1")
res = self.controller.get(req)
res.sort()
self.assertEqual(['bar', 'foo', 'foos'], res)
def test_strip_out_correct_pattern_retained(self):
req = fakes.HTTPRequest.blank(
"/tests?foos=abc&foo=%s&bar=123&_foo_=456" % fakes.FAKE_UUID)
req.api_version_request = api_version.APIVersionRequest("2.3")
res = self.controller.get(req)
res.sort()
self.assertEqual(['_foo_', 'foo', 'foos'], res)
class RequiredDisableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
}
def test_validate_required_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'abc': 1}, req=FakeRequest()),
'Validation succeeded.')
class RequiredEnableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo']
}
def test_validate_required_enable(self):
self.assertEqual(self.post(body={'foo': 1},
req=FakeRequest()), 'Validation succeeded.')
def test_validate_required_enable_fails(self):
detail = "'foo' is a required property"
self.check_validation_error(self.post, body={'abc': 1},
expected_detail=detail)
class AdditionalPropertiesEnableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
}
def test_validate_additionalProperties_enable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 1, 'ext': 1},
req=FakeRequest()),
'Validation succeeded.')
class AdditionalPropertiesDisableTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo'],
'additionalProperties': False,
}
def test_validate_additionalProperties_disable(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_additionalProperties_disable_fails(self):
detail = "Additional properties are not allowed ('ext' was unexpected)"
self.check_validation_error(self.post, body={'foo': 1, 'ext': 1},
expected_detail=detail)
class PatternPropertiesTestCase(APIValidationTestCase):
post_schema = {
'patternProperties': {
'^[a-zA-Z0-9]{1,10}$': {
'type': 'string'
},
},
'additionalProperties': False,
}
def test_validate_patternProperties(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'bar'}, req=FakeRequest()))
def test_validate_patternProperties_fails(self):
details = [
"Additional properties are not allowed ('__' was unexpected)",
"'__' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'"
]
self.check_validation_error(self.post, body={'__': 'bar'},
expected_detail=details)
details = [
"'' does not match any of the regexes: '^[a-zA-Z0-9]{1,10}$'",
"Additional properties are not allowed ('' was unexpected)"
]
self.check_validation_error(self.post, body={'': 'bar'},
expected_detail=details)
details = [
("'0123456789a' does not match any of the regexes: "
"'^[a-zA-Z0-9]{1,10}$'"),
("Additional properties are not allowed ('0123456789a' was"
" unexpected)")
]
self.check_validation_error(self.post, body={'0123456789a': 'bar'},
expected_detail=details)
# Note(jrosenboom): This is referencing an internal python error
# string, which is no stable interface. We need a patch in the
# jsonschema library in order to fix this properly.
if six.PY3:
detail = "expected string or bytes-like object"
else:
detail = "expected string or buffer"
self.check_validation_error(self.post, body={None: 'bar'},
expected_detail=detail)
class StringTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
},
},
}
def test_validate_string(self):
self.assertEqual(self.post(body={'foo': 'abc'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': ''}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.5."
" 1.5 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1.5},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
class StringLengthTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'minLength': 1,
'maxLength': 10,
},
},
}
def test_validate_string_length(self):
self.assertEqual(self.post(body={'foo': '0'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_string_length_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is too short")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0123456789a."
" '0123456789a' is too long")
self.check_validation_error(self.post, body={'foo': '0123456789a'},
expected_detail=detail)
class IntegerTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
},
},
}
def test_validate_integer(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '0123456789'},
req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0xffff."
" '0xffff' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '0xffff'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" 1.0 is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': 1.0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1.0."
" '1.0' does not match '^[0-9]+$'")
self.check_validation_error(self.post, body={'foo': '1.0'},
expected_detail=detail)
class IntegerRangeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1,
'maximum': 10,
},
},
}
def test_validate_integer_range(self):
self.assertEqual(self.post(body={'foo': 1}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': 10}, req=FakeRequest()),
'Validation succeeded.')
self.assertEqual(self.post(body={'foo': '1'}, req=FakeRequest()),
'Validation succeeded.')
def test_validate_integer_range_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': 0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': 11},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0."
" 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': '0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
" 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': '11'},
expected_detail=detail)
class BooleanTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.boolean,
},
}
def test_validate_boolean(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': True}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': False}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'True'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'False'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '0'}, req=FakeRequest()))
def test_validate_boolean_fails(self):
enum_boolean = ("[True, 'True', 'TRUE', 'true', '1', 'ON', 'On',"
" 'on', 'YES', 'Yes', 'yes',"
" False, 'False', 'FALSE', 'false', '0', 'OFF', 'Off',"
" 'off', 'NO', 'No', 'no']")
detail = ("Invalid input for field/attribute foo. Value: bar."
" 'bar' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 2."
" '2' is not one of %s") % enum_boolean
self.check_validation_error(self.post, body={'foo': '2'},
expected_detail=detail)
class HostnameTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname,
},
}
def test_validate_hostname(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
def test_validate_hostname_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-._]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class HostnameIPaddressTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.hostname_or_ip_address,
},
}
def test_validate_hostname_or_ip_address(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'localhost.localdomain.com'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my-host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my_host'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '192.168.10.100'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '2001:db8::9abc'},
req=FakeRequest()))
def test_validate_hostname_or_ip_address_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" 1 is not of type 'string'")
self.check_validation_error(self.post, body={'foo': 1},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: my$host."
" 'my$host' does not match '^[a-zA-Z0-9-_.:]*$'")
self.check_validation_error(self.post, body={'foo': 'my$host'},
expected_detail=detail)
class NameTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters. "
"Can not start or end with whitespace.")
should_fail = (' ',
' server',
'server ',
u'a\xa0', # trailing unicode space
u'\uffff', # non-printable unicode
)
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class NameWithLeadingTrailingSpacesTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name_with_leading_trailing_spaces,
},
}
def test_validate_name(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'm1.small'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'my server'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'a'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434'}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': u'\u0434\u2006\ufffd'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc '},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'abc abc abc'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': ' abc abc abc '},
req=FakeRequest()))
# leading unicode space
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '\xa0abc'},
req=FakeRequest()))
def test_validate_name_fails(self):
error = ("An invalid 'name' value was provided. The name must be: "
"printable characters with at least one non space character")
should_fail = (
' ',
u'\xa0', # unicode space
u'\uffff', # non-printable unicode
)
for item in should_fail:
self.check_validation_error(self.post, body={'foo': item},
expected_detail=error)
# four-byte unicode, if supported by this python build
try:
self.check_validation_error(self.post, body={'foo': u'\U00010000'},
expected_detail=error)
except ValueError:
pass
class NoneTypeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.none
}
}
def test_validate_none(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'None'},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': None},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': {}},
req=FakeRequest()))
def test_validate_none_fails(self):
detail = ("Invalid input for field/attribute foo. Value: ."
" '' is not one of ['None', None, {}]")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: "
"{'key': 'val'}. {'key': 'val'} is not one of "
"['None', None, {}]")
self.check_validation_error(self.post, body={'foo': {'key': 'val'}},
expected_detail=detail)
class NameOrNoneTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.name_or_none
}
}
def test_valid(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': None},
req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1'},
req=FakeRequest()))
def test_validate_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 1234. 1234 "
"is not valid under any of the given schemas")
self.check_validation_error(self.post, body={'foo': 1234},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: . '' "
"is not valid under any of the given schemas")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
too_long_name = 256 * "k"
detail = ("Invalid input for field/attribute foo. Value: %s. "
"'%s' is not valid under any of the "
"given schemas") % (too_long_name, too_long_name)
self.check_validation_error(self.post,
body={'foo': too_long_name},
expected_detail=detail)
class TcpUdpPortTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': parameter_types.tcp_udp_port,
},
}
def test_validate_tcp_udp_port(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 1024}, req=FakeRequest()))
self.assertEqual('Validation succeeded.',
self.post(body={'foo': '1024'}, req=FakeRequest()))
def test_validate_tcp_udp_port_fails(self):
detail = ("Invalid input for field/attribute foo. Value: True."
" True is not of type 'integer', 'string'")
self.check_validation_error(self.post, body={'foo': True},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 65536."
" 65536(.0)? is greater than the maximum of 65535")
self.check_validation_error(self.post, body={'foo': 65536},
expected_detail=detail)
class CidrFormatTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'cidr',
},
},
}
def test_validate_cidr(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.10.0/24'},
req=FakeRequest()
))
def test_validate_cidr_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: bar."
" 'bar' is not a 'cidr'")
self.check_validation_error(self.post,
body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: . '' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': ''},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0. '192.168.1.0' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.1.0 /24."
" '192.168.1.0 /24' is not a 'cidr'")
self.check_validation_error(self.post, body={'foo': '192.168.1.0 /24'},
expected_detail=detail)
class DatetimeTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'date-time',
},
},
}
def test_validate_datetime(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2014-01-14T01:00:00Z'},
req=FakeRequest()
))
def test_validate_datetime_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 2014-13-14T01:00:00Z."
" '2014-13-14T01:00:00Z' is not a 'date-time'")
self.check_validation_error(self.post,
body={'foo': '2014-13-14T01:00:00Z'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: bar. 'bar' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': 'bar'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'date-time'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
class UuidTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uuid',
},
},
}
def test_validate_uuid(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '70a599e0-31e7-49b7-b260-868f441e862b'},
req=FakeRequest()
))
def test_validate_uuid_fails(self):
detail = ("Invalid input for field/attribute foo."
" Value: 70a599e031e749b7b260868f441e862."
" '70a599e031e749b7b260868f441e862' is not a 'uuid'")
self.check_validation_error(self.post,
body={'foo': '70a599e031e749b7b260868f441e862'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 1."
" '1' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': '1'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'uuid'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
class UriTestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'uri',
},
},
}
def test_validate_uri(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://localhost:8774/v2/servers'},
req=FakeRequest()
))
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': 'http://[::1]:8774/v2/servers'},
req=FakeRequest()
))
def test_validate_uri_fails(self):
base_detail = ("Invalid input for field/attribute foo. Value: {0}. "
"'{0}' is not a 'uri'")
invalid_uri = 'http://localhost:8774/v2/servers##'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'http://[fdf8:01]:8774/v2/servers'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = '1'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
invalid_uri = 'abc'
self.check_validation_error(self.post,
body={'foo': invalid_uri},
expected_detail=base_detail.format(
invalid_uri))
class Ipv4TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv4',
},
},
}
def test_validate_ipv4(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '192.168.0.100'},
req=FakeRequest()
))
def test_validate_ipv4_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv4'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 2001:db8::1234:0:0:9abc."
" '2001:db8::1234:0:0:9abc' is not a 'ipv4'")
self.check_validation_error(self.post,
body={'foo': '2001:db8::1234:0:0:9abc'},
expected_detail=detail)
class Ipv6TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'ipv6',
},
},
}
def test_validate_ipv6(self):
self.assertEqual('Validation succeeded.',
self.post(
body={'foo': '2001:db8::1234:0:0:9abc'},
req=FakeRequest()
))
def test_validate_ipv6_fails(self):
detail = ("Invalid input for field/attribute foo. Value: abc."
" 'abc' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'abc'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: localhost."
" 'localhost' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': 'localhost'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo."
" Value: 192.168.0.100. '192.168.0.100' is not a 'ipv6'")
self.check_validation_error(self.post, body={'foo': '192.168.0.100'},
expected_detail=detail)
class Base64TestCase(APIValidationTestCase):
post_schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'base64',
},
},
}
def test_validate_base64(self):
self.assertEqual('Validation succeeded.',
self.post(body={'foo': 'aGVsbG8gd29ybGQ='},
req=FakeRequest()))
# 'aGVsbG8gd29ybGQ=' is the base64 code of 'hello world'
def test_validate_base64_fails(self):
value = 'A random string'
detail = ("Invalid input for field/attribute foo. "
"Value: %s. '%s' is not a 'base64'") % (value, value)
self.check_validation_error(self.post, body={'foo': value},
expected_detail=detail)
class RegexFormatTestCase(APIValidationTestCase):
def setUp(self):
super(RegexFormatTestCase, self).setUp()
self.controller = RegexFormatFakeController()
def test_validate_regex(self):
req = fakes.HTTPRequest.blank("")
self.assertEqual('Validation succeeded.',
self.controller.post(req, body={'foo': u'Myserver'}))
def test_validate_regex_fails(self):
value = 1
req = fakes.HTTPRequest.blank("")
detail = ("Invalid input for field/attribute foo. "
"Value: %s. %s is not a 'regex'") % (value, value)
self.check_validation_error(self.controller.post, req=req,
body={'foo': value},
expected_detail=detail)
| rahulunair/nova | nova/tests/unit/test_api_validation.py | Python | apache-2.0 | 47,979 |
import unittest
import subprocess
import os
import platform
import shutil
from os.path import join, normpath, abspath, split
import sys
env_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1])
sys.path.insert(0, env_path)
import littlechef
# Set some convenience variables
test_path = split(normpath(abspath(__file__)))[0]
littlechef_top = normpath(join(test_path, '..'))
if platform.system() == 'Windows':
fix = join(littlechef_top, 'fix.cmd')
WIN32 = True
else:
fix = join(littlechef_top, 'fix')
WIN32 = False
class BaseTest(unittest.TestCase):
def setUp(self):
"""Change to the test directory"""
self.set_location()
def set_location(self, location=test_path):
"""Change directories to a known location"""
os.chdir(location)
def execute(self, call):
"""Executes a command and returns stdout and stderr"""
if WIN32:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()
class TestConfig(BaseTest):
def tearDown(self):
self.set_location()
def test_not_a_kitchen(self):
"""Should exit with error when not a kitchen directory"""
# Change to parent dir, which has no nodes/cookbooks/roles dir
self.set_location(littlechef_top)
# Call fix from the current directory above "tests/"
resp, error = self.execute([fix, 'node:a'])
self.assertTrue("Fatal error" in error, resp)
self.assertTrue(
'No {0} file found'.format(littlechef.CONFIGFILE) in error, error)
self.assertEquals(resp, "", resp)
def test_version(self):
"""Should output the correct Little Chef version"""
resp, error = self.execute([fix, '-v'])
self.assertEquals(resp, "",
"Response should be empty, version should be in stderr")
self.assertTrue(
'LittleChef {0}'.format(littlechef.__version__) in error)
def test_list_commands(self):
"""Should output a list of available commands"""
resp, error = self.execute([fix, '-l'])
self.assertEquals(error, "")
expected = "Starts a Chef Solo configuration run"
self.assertTrue(expected in resp)
commands = resp.split('\nAvailable commands:\n')[-1]
commands = filter(None, commands.split('\n'))
self.assertEquals(len(commands), 21)
def test_verbose(self):
"""Should turn on verbose output"""
resp, error = self.execute([fix, '--verbose', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Verbose output on' in resp, resp)
def test_debug(self):
"""Should turn on debug loglevel"""
resp, error = self.execute([fix, '--debug', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Debug level on' in resp, resp)
class TestEnvironment(BaseTest):
def test_no_valid_value(self):
"""Should error out when the env value is empty or is a fabric task"""
resp, error = self.execute([fix, 'list_nodes', '--env'])
self.assertEquals(resp, "")
self.assertTrue(
"error: argument -e/--env: expected one argument" in error, error)
resp, error = self.execute([fix, '--env', 'list_nodes'])
self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
cmd = [fix, '--env', 'nodes_with_role:base', 'role:base']
resp, error = self.execute(cmd)
self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
def test_valid_environment(self):
"""Should set the chef_environment value when one is given"""
resp, error = self.execute([fix, 'list_nodes', '--env', 'staging'])
self.assertEquals(error, "", error)
self.assertTrue("Environment: staging" in resp, resp)
class TestRunner(BaseTest):
def test_no_node_given(self):
"""Should abort when no node is given"""
resp, error = self.execute([fix, 'node:'])
self.assertTrue("Fatal error: No node was given" in error)
def test_plugin(self):
"""Should execute the given plugin"""
resp, error = self.execute([fix, 'node:testnode1', 'plugin:notthere'])
expected = ", could not find 'notthere.py' in the plugin directory"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:bad'])
expected = "Found plugin 'bad', but it seems to have a syntax error:"
expected += " invalid syntax (bad.py, line 6)"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:dummy'])
expected = "Executing plugin '{0}' on {1}".format("dummy", "testnode1")
self.assertTrue(expected in resp, resp + error)
def test_list_plugins(self):
"""Should print a list of available plugins"""
resp, error = self.execute([fix, 'list_plugins'])
self.assertTrue("List of available plugins:" in resp, resp)
self.assertTrue("bad: Plugin has a syntax error" in resp, resp)
self.assertTrue("dummy: Dummy LittleChef plugin" in resp, resp)
class TestCookbooks(BaseTest):
def test_list_recipes(self):
"""Should list available recipes"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertEquals(error, "")
self.assertTrue('subversion::client' in resp)
self.assertTrue('subversion::server' in resp)
def test_list_recipes_site_cookbooks(self):
"""Should give priority to site-cookbooks information"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertTrue('Modified by site-cookbooks' in resp)
def test_list_recipes_detailed(self):
"""Should show a detailed list of available recipes"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('subversion::client' in resp)
for field in ['description', 'version', 'dependencies', 'attributes']:
self.assertTrue(field in resp)
def test_list_recipes_detailed_site_cookbooks(self):
"""Should show a detailed list of available recipes with site-cookbook
priority
"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('0.8.4' in resp)
def test_no_metadata(self):
"""Should abort if cookbook has no metadata.json"""
bad_cookbook = join(test_path, 'cookbooks', 'bad_cookbook')
os.mkdir(bad_cookbook)
try:
resp, error = self.execute([fix, 'list_recipes'])
except OSError:
self.fail("Couldn't execute {0}".format(fix))
finally:
os.rmdir(bad_cookbook)
expected = 'Fatal error: Cookbook "bad_cookbook" has no metadata.json'
self.assertTrue(expected in error)
class TestListRoles(BaseTest):
def test_list_roles(self):
"""Should list all roles"""
resp, error = self.execute([fix, 'list_roles'])
self.assertTrue('base' in resp and 'example aplication' in resp)
def test_list_roles_detailed(self):
"""Should show a detailed list of all roles"""
resp, error = self.execute([fix, 'list_roles_detailed'])
self.assertTrue('base' in resp and 'example aplication' in resp)
class TestListNodes(BaseTest):
def test_list_nodes(self):
"""Should list all nodes"""
resp, error = self.execute([fix, 'list_nodes'])
for node in ['testnode1', 'testnode2', 'testnode3.mydomain.com']:
self.assertTrue(node in resp)
self.assertTrue('Recipes: subversion' in resp)
def test_list_nodes_in_env(self):
"""Should list all nodes in an environment"""
resp, error = self.execute([fix, '--env', 'staging', 'list_nodes'])
self.assertTrue('testnode2' in resp)
self.assertFalse('testnode1' in resp)
self.assertFalse('testnode3.mydomain.com' in resp)
def test_list_nodes_detailed(self):
"""Should show a detailed list of all nodes"""
resp, error = self.execute([fix, 'list_nodes_detailed'])
self.assertTrue('testnode1' in resp)
self.assertTrue('Recipe: subversion' in resp)
def test_list_nodes_with_recipe(self):
"""Should list all nodes with a recipe in the run list"""
resp, error = self.execute([fix, 'list_nodes_with_recipe:subversion'])
self.assertTrue('testnode1' in resp)
self.assertTrue('Recipes: subversion' in resp)
resp, error = self.execute([fix, 'list_nodes_with_recipe:apache2'])
self.assertFalse('testnode1' in resp)
class TestNewKitchen(BaseTest):
def setUp(self):
self.new_kitchen = join(test_path, 'test_new_kitchen')
os.mkdir(self.new_kitchen)
self.set_location(self.new_kitchen)
def tearDown(self):
shutil.rmtree(self.new_kitchen)
self.set_location()
def test_new_kitchen_creates_required_directories(self):
resp, error = self.execute([fix, 'new_kitchen'])
kitchen_contents = os.listdir(os.getcwd())
self.assertTrue('roles' in kitchen_contents)
self.assertTrue('cookbooks' in kitchen_contents)
self.assertTrue('site-cookbooks' in kitchen_contents)
self.assertTrue('data_bags' in kitchen_contents)
self.assertTrue('nodes' in kitchen_contents)
self.assertTrue('environments' in kitchen_contents)
self.assertTrue(littlechef.CONFIGFILE in kitchen_contents)
def test_new_kitchen_can_list_nodes(self):
self.execute([fix, 'new_kitchen'])
with open(littlechef.CONFIGFILE, "w") as configfh:
print >> configfh, "[userinfo]"
print >> configfh, "user = testuser"
print >> configfh, "password = testpassword"
resp, error = self.execute([fix, 'list_nodes'])
self.assertFalse(error)
self.assertTrue('Found 0 nodes' in resp)
self.assertEqual('', error)
| tobami/littlechef | tests/test_command.py | Python | apache-2.0 | 10,393 |
"""Compose ACLs on ports."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from faucet import valve_of
from faucet.conf import InvalidConfigError
def push_vlan(vlan_vid):
"""Push a VLAN tag with optional selection of eth type."""
vid = vlan_vid
vlan_eth_type = None
if isinstance(vlan_vid, dict):
vid = vlan_vid['vid']
if 'eth_type' in vlan_vid:
vlan_eth_type = vlan_vid['eth_type']
if vlan_eth_type is None:
return valve_of.push_vlan_act(vid)
return valve_of.push_vlan_act(vid, eth_type=vlan_eth_type)
def rewrite_vlan(output_dict):
"""Implement actions to rewrite VLAN headers."""
vlan_actions = []
if 'pop_vlans' in output_dict:
for _ in range(output_dict['pop_vlans']):
vlan_actions.append(valve_of.pop_vlan())
# if vlan tag is specified, push it.
if 'vlan_vid' in output_dict:
vlan_actions.extend(push_vlan(output_dict['vlan_vid']))
# swap existing VID
elif 'swap_vid' in output_dict:
vlan_actions.append(
valve_of.set_vlan_vid(output_dict['swap_vid']))
# or, if a list, push them all (all with type Q).
elif 'vlan_vids' in output_dict:
for vlan_vid in output_dict['vlan_vids']:
vlan_actions.extend(push_vlan(vlan_vid))
return vlan_actions
def build_output_actions(output_dict):
"""Implement actions to alter packet/output."""
output_actions = []
output_port = None
ofmsgs = []
# rewrite any VLAN headers first always
vlan_actions = rewrite_vlan(output_dict)
if vlan_actions:
output_actions.extend(vlan_actions)
if 'set_fields' in output_dict:
for set_fields in output_dict['set_fields']:
output_actions.append(valve_of.set_field(**set_fields))
if 'port' in output_dict:
output_port = output_dict['port']
output_actions.append(valve_of.output_port(output_port))
if 'ports' in output_dict:
for output_port in output_dict['ports']:
output_actions.append(valve_of.output_port(output_port))
if 'failover' in output_dict:
failover = output_dict['failover']
group_id = failover['group_id']
buckets = []
for port in failover['ports']:
buckets.append(valve_of.bucket(
watch_port=port, actions=[valve_of.output_port(port)]))
ofmsgs.append(valve_of.groupdel(group_id=group_id))
ofmsgs.append(valve_of.groupadd_ff(group_id=group_id, buckets=buckets))
output_actions.append(valve_of.group_act(group_id=group_id))
return (output_port, output_actions, ofmsgs)
# TODO: change this, maybe this can be rewritten easily
# possibly replace with a class for ACLs
def build_acl_entry(rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num=None, vlan_vid=None):
acl_inst = []
acl_act = []
acl_match_dict = {}
acl_ofmsgs = []
acl_cookie = None
allow_inst = acl_allow_inst
for attrib, attrib_value in list(rule_conf.items()):
if attrib == 'in_port':
continue
if attrib == 'cookie':
acl_cookie = attrib_value
continue
if attrib == 'description':
continue
if attrib == 'actions':
allow = False
allow_specified = False
if 'allow' in attrib_value:
allow_specified = True
if attrib_value['allow'] == 1:
allow = True
if 'force_port_vlan' in attrib_value:
if attrib_value['force_port_vlan'] == 1:
allow_inst = acl_force_port_vlan_inst
if 'meter' in attrib_value:
meter_name = attrib_value['meter']
acl_inst.append(valve_of.apply_meter(meters[meter_name].meter_id))
if 'mirror' in attrib_value:
port_no = attrib_value['mirror']
acl_act.append(valve_of.output_port(port_no))
if not allow_specified:
allow = True
if 'output' in attrib_value:
output_port, output_actions, output_ofmsgs = build_output_actions(
attrib_value['output'])
acl_act.extend(output_actions)
acl_ofmsgs.extend(output_ofmsgs)
# if port specified, output packet now and exit pipeline.
if output_port is not None:
continue
if allow:
acl_inst.append(allow_inst)
else:
acl_match_dict[attrib] = attrib_value
if port_num is not None:
acl_match_dict['in_port'] = port_num
if vlan_vid is not None:
acl_match_dict['vlan_vid'] = valve_of.vid_present(vlan_vid)
try:
acl_match = valve_of.match_from_dict(acl_match_dict)
except TypeError:
raise InvalidConfigError('invalid type in ACL')
if acl_act:
acl_inst.append(valve_of.apply_actions(acl_act))
return (acl_match, acl_inst, acl_cookie, acl_ofmsgs)
def build_acl_ofmsgs(acls, acl_table,
acl_allow_inst, acl_force_port_vlan_inst,
highest_priority, meters,
exact_match, port_num=None, vlan_vid=None):
ofmsgs = []
acl_rule_priority = highest_priority
for acl in acls:
for rule_conf in acl.rules:
acl_match, acl_inst, acl_cookie, acl_ofmsgs = build_acl_entry(
rule_conf, meters,
acl_allow_inst, acl_force_port_vlan_inst,
port_num, vlan_vid)
ofmsgs.extend(acl_ofmsgs)
if exact_match:
flowmod = acl_table.flowmod(
acl_match, priority=highest_priority, inst=acl_inst, cookie=acl_cookie)
else:
flowmod = acl_table.flowmod(
acl_match, priority=acl_rule_priority, inst=acl_inst, cookie=acl_cookie)
ofmsgs.append(flowmod)
acl_rule_priority -= 1
return ofmsgs
| wackerly/faucet | faucet/valve_acl.py | Python | apache-2.0 | 6,809 |
from resolwe.flow.models import Data
from resolwe.test import tag_process, with_resolwe_host
from resolwe_bio.utils.test import KBBioProcessTestCase
class MicroRNATestCase(KBBioProcessTestCase):
@with_resolwe_host
@tag_process("workflow-mirna")
def test_mirna_workflow(self):
# Prepare data for aligning the reads with bowtie2 and annotation file for featureCounts.
with self.preparation_stage():
inputs = {
"src": "genome_rsem.fa.gz",
"species": "Homo sapiens",
"build": "fake_genome_RSEM",
}
ref_seq = self.run_process("upload-fasta-nucl", inputs)
bowtie2_index = self.run_process("bowtie2-index", {"ref_seq": ref_seq.id})
single_reads = self.prepare_reads(["reads rsem.fq.gz"])
annotation = self.prepare_annotation(
"annotation_rsem.gtf.gz",
species="Homo sapiens",
build="fake_genome_RSEM",
)
inputs = {
"preprocessing": {
"reads": single_reads.pk,
"adapters": {"down_primers_seq": ["TAATGAACAATGCAAGTTTGA"]},
"filtering": {"minlen": 15, "maxlen": 35, "error_rate": 0.2},
},
"alignment": {
"genome": bowtie2_index.pk,
"alignment_options": {
"mode": "--local",
"speed": "--very-sensitive",
"L": 8,
"rep_mode": "k",
"k_reports": 5,
},
},
"quant_options": {
"annotation": annotation.pk,
"id_attribute": "gene_id",
"feature_class": "exon",
"normalization_type": "CPM",
"count_multi_mapping_reads": True,
"allow_multi_overlap": True,
},
"assay_type": "non_specific",
}
# Run process and assert.
self.run_process("workflow-mirna", inputs)
workflow = Data.objects.filter(process__slug="feature_counts").last()
# check featureCount summary
self.assertFile(
workflow, "rc", "mirna_featurecounts_rc.tab.gz", compression="gzip"
)
self.assertFile(
workflow, "exp", "mirna_featurecounts_cpm.tab.gz", compression="gzip"
)
| genialis/resolwe-bio | resolwe_bio/tests/workflows/test_mirna.py | Python | apache-2.0 | 2,406 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import time
import uuid
import testresources
import testtools
from heatclient import client as heatclient
from keystoneclient.v2_0 import client as ksclient
from muranoclient import client as mclient
import muranoclient.common.exceptions as exceptions
import murano.tests.functional.engine.config as cfg
CONF = cfg.cfg.CONF
class MuranoBase(testtools.TestCase, testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
@classmethod
def setUpClass(cls):
super(MuranoBase, cls).setUpClass()
cfg.load_config()
keystone_client = ksclient.Client(username=CONF.murano.user,
password=CONF.murano.password,
tenant_name=CONF.murano.tenant,
auth_url=CONF.murano.auth_url)
heat_url = keystone_client.service_catalog.url_for(
service_type='orchestration', endpoint_type='publicURL')
cls.heat_client = heatclient.Client('1', endpoint=heat_url,
token=keystone_client.auth_token)
url = CONF.murano.murano_url
murano_url = url if 'v1' not in url else "/".join(
url.split('/')[:url.split('/').index('v1')])
cls.muranoclient = mclient.Client('1',
endpoint=murano_url,
token=keystone_client.auth_token)
cls.linux = CONF.murano.linux_image
cls.pkgs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'murano-app-incubator'
))
def upload_package(package_name, body, app):
files = {'%s' % package_name: open(app, 'rb')}
return cls.muranoclient.packages.create(body, files)
upload_package(
'PostgreSQL',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.PostgreSql.zip')
)
upload_package(
'SqlDatabase',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.SqlDatabase.zip')
)
upload_package(
'Apache',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path,
'io.murano.apps.apache.ApacheHttpServer.zip')
)
upload_package(
'Tomcat',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.apache.Tomcat.zip')
)
upload_package(
'Telnet',
{"categories": ["Web"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.linux.Telnet.zip')
)
def setUp(self):
super(MuranoBase, self).setUp()
self.environments = []
def tearDown(self):
super(MuranoBase, self).tearDown()
for env in self.environments:
try:
self.environment_delete(env)
except Exception:
pass
def environment_delete(self, environment_id, timeout=180):
self.muranoclient.environments.delete(environment_id)
start_time = time.time()
while time.time() - start_time < timeout:
try:
self.muranoclient.environments.get(environment_id)
except exceptions.HTTPNotFound:
return
raise Exception(
'Environment {0} was not deleted in {1} seconds'.format(
environment_id, timeout))
def wait_for_environment_deploy(self, environment):
start_time = time.time()
while environment.manager.get(environment.id).status != 'ready':
if time.time() - start_time > 1200:
self.fail(
'Environment deployment is not finished in 1200 seconds')
time.sleep(5)
return environment.manager.get(environment.id)
def check_port_access(self, ip, port):
result = 1
start_time = time.time()
while time.time() - start_time < 300:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), port))
sock.close()
if result == 0:
break
time.sleep(5)
self.assertEqual(0, result, '%s port is closed on instance' % port)
def deployment_success_check(self, environment, port):
deployment = self.muranoclient.deployments.list(environment.id)[-1]
self.assertEqual('success', deployment.state,
'Deployment status is {0}'.format(deployment.state))
ip = environment.services[-1]['instance']['floatingIpAddress']
if ip:
self.check_port_access(ip, port)
else:
self.fail('Instance does not have floating IP')
def test_deploy_telnet(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
environment_name = 'Telnetenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 23)
def test_deploy_apache(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.ApacheHttpServer",
"id": str(uuid.uuid4())
}
}
environment_name = 'Apacheenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 80)
def test_deploy_postgresql(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"database": "test_db",
"username": "test_usr",
"password": "test_pass",
"?": {
"type": "io.murano.databases.PostgreSql",
"id": str(uuid.uuid4())
}
}
environment_name = 'Postgreenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 5432)
def test_deploy_tomcat(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.Tomcat",
"id": str(uuid.uuid4())
}
}
environment_name = 'Tomcatenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 8080)
def _get_telnet_app(self):
return {
"instance": {
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"flavor": "m1.medium",
"image": self.linux,
"name": "instance{0}".format(uuid.uuid4().hex[:5]),
},
"name": "app{0}".format(uuid.uuid4().hex[:5]),
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
def _quick_deploy(self, name, *apps):
environment = self.muranoclient.environments.create({'name': name})
self.environments.append(environment.id)
session = self.muranoclient.sessions.configure(environment.id)
for app in apps:
self.muranoclient.services.post(environment.id,
path='/',
data=app,
session_id=session.id)
self.muranoclient.sessions.deploy(environment.id, session.id)
return self.wait_for_environment_deploy(environment)
def _get_stack(self, environment_id):
for stack in self.heat_client.stacks.list():
if environment_id in stack.description:
return stack
def test_instance_refs_are_removed_after_application_is_removed(self):
# FIXME(sergmelikyan): Revise this as part of proper fix for #1359998
self.skipTest('Skipped until proper fix for #1359998 is proposed')
name = 'e' + uuid.uuid4().hex
# create environment with telnet application
application1 = self._get_telnet_app()
application2 = self._get_telnet_app()
application_id = application1['?']['id']
instance_name = application1['instance']['name']
apps = [application1, application2]
environment = self._quick_deploy(name, *apps)
# delete telnet application
session = self.muranoclient.sessions.configure(environment.id)
self.muranoclient.services.delete(environment.id,
'/' + application_id,
session.id)
self.muranoclient.sessions.deploy(environment.id, session.id)
self.wait_for_environment_deploy(environment)
stack_name = self._get_stack(environment.id).stack_name
template = self.heat_client.stacks.template(stack_name)
ip_addresses = '{0}-assigned-ip'.format(instance_name)
floating_ip = '{0}-FloatingIPaddress'.format(instance_name)
self.assertNotIn(ip_addresses, template['outputs'])
self.assertNotIn(floating_ip, template['outputs'])
self.assertNotIn(instance_name, template['resources'])
def test_stack_deletion_after_env_is_deleted(self):
name = 'e' + uuid.uuid4().hex
application = self._get_telnet_app()
environment = self._quick_deploy(name, application)
stack = self._get_stack(environment.id)
self.assertIsNotNone(stack)
self.muranoclient.environments.delete(environment.id)
start_time = time.time()
while stack is not None:
if time.time() - start_time > 300:
break
time.sleep(5)
stack = self._get_stack(environment.id)
self.assertIsNone(stack, 'stack is not deleted')
| telefonicaid/murano | murano/tests/functional/engine/base.py | Python | apache-2.0 | 12,267 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-18 19:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coursedashboards', '0005_auto_20170915_2036'),
]
operations = [
migrations.CreateModel(
name='CourseOfferingMajor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField()),
],
options={
'db_table': 'CourseOfferingMajor',
},
),
migrations.AlterUniqueTogether(
name='coursemajor',
unique_together=set([]),
),
migrations.RemoveField(
model_name='coursemajor',
name='course',
),
migrations.RemoveField(
model_name='coursemajor',
name='major',
),
migrations.AlterField(
model_name='course',
name='curriculum',
field=models.CharField(max_length=20),
),
migrations.DeleteModel(
name='CourseMajor',
),
migrations.AddField(
model_name='courseofferingmajor',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Course'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='major',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Major'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='term',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Term'),
),
migrations.AlterUniqueTogether(
name='courseofferingmajor',
unique_together=set([('major', 'term', 'course')]),
),
]
| uw-it-aca/course-dashboards | coursedashboards/migrations/0006_auto_20170918_1954.py | Python | apache-2.0 | 2,064 |
from __future__ import print_function
###################################################################
# Copyright 2013-2017 All Rights Reserved
# Authors: The Paradrop Team
###################################################################
import sys
from paradrop.base import settings
from paradrop.lib.utils.pd_storage import PDStorage
from .chute import Chute
class ChuteStorage(PDStorage):
"""
ChuteStorage class.
This class holds onto the list of Chutes on this AP.
It implements the PDStorage class which allows us to save the chuteList to disk transparently
"""
# Class variable of chute list so all instances see the same thing
chuteList = dict()
def __init__(self, filename=None, save_timer=settings.FC_CHUTESTORAGE_SAVE_TIMER):
if(not filename):
filename = settings.FC_CHUTESTORAGE_FILE
PDStorage.__init__(self, filename, save_timer)
# Has it been loaded?
if(len(ChuteStorage.chuteList) == 0):
self.loadFromDisk()
def setAttr(self, attr):
"""Save our attr however we want (as class variable for all to see)"""
ChuteStorage.chuteList = attr
def getAttr(self):
"""Get our attr (as class variable for all to see)"""
return ChuteStorage.chuteList
def getChuteList(self):
"""Return a list of the names of the chutes we know of."""
return ChuteStorage.chuteList.values()
def getChute(self, name):
"""Returns a reference to a chute we have in our cache, or None."""
return ChuteStorage.chuteList.get(name, None)
def deleteChute(self, ch):
"""Deletes a chute from the chute storage. Can be sent the chute object, or the chute name."""
if (isinstance(ch, Chute)):
del ChuteStorage.chuteList[ch.name]
else:
del ChuteStorage.chuteList[ch]
self.saveToDisk()
def saveChute(self, ch):
"""
Saves the chute provided in our internal chuteList.
Also since we just received a new chute to hold onto we should save our ChuteList to disk.
"""
# check if there is a version of the chute already
oldch = ChuteStorage.chuteList.get(ch.name, None)
if(oldch != None):
# we should merge these chutes so we don't lose any data
oldch.__dict__.update(ch.__dict__)
# TODO: do we need to deal with cache separate? Old code we did
else:
ChuteStorage.chuteList[ch.name] = ch
self.saveToDisk()
def clearChuteStorage(self):
ChuteStorage.chuteList.clear()
self.saveToDisk()
#
# Functions we override to implement PDStorage Properly
#
def attrSaveable(self):
"""Returns True if we should save the ChuteList, otherwise False."""
return (type(ChuteStorage.chuteList) == dict)
@classmethod
def get_chute(cls, name):
return cls.chuteList[name]
if(__name__ == '__main__'): # pragma: no cover
def usage():
print('Usage: $0 -ls : print chute storage details')
exit(0)
try:
if(sys.argv[1] != '-ls'):
usage()
except Exception as e:
print(e)
usage()
cs = ChuteStorage()
chutes = cs.getChuteList()
for ch in chutes:
print(ch)
| ParadropLabs/Paradrop | paradrop/daemon/paradrop/core/chute/chute_storage.py | Python | apache-2.0 | 3,342 |
import envi.archs.h8.emu as h8_emu
import envi.archs.h8.regs as h8_regs
import vivisect.impemu.emulator as v_i_emulator
class H8WorkspaceEmulator(v_i_emulator.WorkspaceEmulator, h8_emu.H8Emulator):
taintregs = [h8_regs.REG_ER0, h8_regs.REG_ER1, h8_regs.REG_ER2]
def __init__(self, vw, logwrite=False, logread=False):
h8_emu.H8Emulator.__init__(self)
v_i_emulator.WorkspaceEmulator.__init__(self, vw, logwrite=logwrite, logread=logread)
| bat-serjo/vivisect | vivisect/impemu/platarch/h8.py | Python | apache-2.0 | 464 |
from django.conf import settings
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.PDB import *
from Bio.PDB.PDBIO import Select
from common.definitions import *
from protein.models import Protein, ProteinSegment
from residue.models import Residue
from structure.functions import BlastSearch, MappedResidue, StructureSeqNumOverwrite
from structure.sequence_parser import *
import Bio.PDB.Polypeptide as polypeptide
import os,logging
from collections import OrderedDict
logger = logging.getLogger("protwis")
#==============================================================================
#Class for annotating the pdb structures with generic numbers
class GenericNumbering(object):
residue_list = ["ARG","ASP","GLU","HIS","ASN","GLN","LYS","SER","THR","HID","PHE","LEU","ILE","TYR","TRP","VAL","MET","PRO","CYS","ALA","GLY"]
exceptions = {'6GDG':[255, 10]}
def __init__ (self, pdb_file=None, pdb_filename=None, structure=None, pdb_code=None, blast_path='blastp',
blastdb=os.sep.join([settings.STATICFILES_DIRS[0], 'blast', 'protwis_blastdb']),top_results=1, sequence_parser=False, signprot=False):
# pdb_file can be either a name/path or a handle to an open file
self.pdb_file = pdb_file
self.pdb_filename = pdb_filename
# if pdb 4 letter code is specified
self.pdb_code = pdb_code
# dictionary of 'MappedResidue' object storing information about alignments and bw numbers
self.residues = {}
self.pdb_seq = {} #Seq('')
# list of uniprot ids returned from blast
self.prot_id_list = []
#setup for local blast search
self.blast = BlastSearch(blast_path=blast_path, blastdb=blastdb,top_results=top_results)
# calling sequence parser
if sequence_parser:
if pdb_code:
struct = Structure.objects.get(pdb_code__index=self.pdb_code)
if not signprot:
if pdb_code:
s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=struct.protein_conformation.protein.parent.id)
else:
s = SequenceParser(pdb_file=self.pdb_file)#, wt_protein_id=struct.protein_conformation.protein.parent.id)
else:
s = SequenceParser(pdb_file=self.pdb_file, wt_protein_id=signprot.id)
self.pdb_structure = s.pdb_struct
self.mapping = s.mapping
self.wt = s.wt
else:
if self.pdb_file:
self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_file)[0]
elif self.pdb_filename:
self.pdb_structure = PDBParser(PERMISSIVE=True, QUIET=True).get_structure('ref', self.pdb_filename)[0]
else:
self.pdb_structure = structure
self.parse_structure(self.pdb_structure)
def parse_structure(self, pdb_struct):
"""
extracting sequence and preparing dictionary of residues
bio.pdb reads pdb in the following cascade: model->chain->residue->atom
"""
for chain in pdb_struct:
self.residues[chain.id] = {}
self.pdb_seq[chain.id] = Seq('')
for res in chain:
#in bio.pdb the residue's id is a tuple of (hetatm flag, residue number, insertion code)
if res.resname == "HID":
resname = polypeptide.three_to_one('HIS')
else:
if res.resname not in self.residue_list:
continue
self.residues[chain.id][res.id[1]] = MappedResidue(res.id[1], polypeptide.three_to_one(res.resname))
self.pdb_seq[chain.id] = ''.join([self.residues[chain.id][x].name for x in sorted(self.residues[chain.id].keys())])
for pos, res in enumerate(sorted(self.residues[chain.id].keys()), start=1):
self.residues[chain.id][res].pos_in_aln = pos
def locate_res_by_pos (self, chain, pos):
for res in self.residues[chain].keys():
if self.residues[chain][res].pos_in_aln == pos:
return res
return 0
def map_blast_seq (self, prot_id, hsps, chain):
#find uniprot residue numbers corresponding to those in pdb file
q_seq = list(hsps.query)
tmp_seq = list(hsps.sbjct)
subj_counter = hsps.sbjct_start
q_counter = hsps.query_start
logger.info("{}\n{}".format(hsps.query, hsps.sbjct))
logger.info("{:d}\t{:d}".format(hsps.query_start, hsps.sbjct_start))
rs = Residue.objects.prefetch_related('display_generic_number', 'protein_segment').filter(
protein_conformation__protein=prot_id)
residues = {}
for r in rs:
residues[r.sequence_number] = r
while tmp_seq:
#skipping position if there is a gap in either of sequences
if q_seq[0] == '-' or q_seq[0] == 'X' or q_seq[0] == ' ':
subj_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
continue
if tmp_seq[0] == '-' or tmp_seq[0] == 'X' or tmp_seq[0] == ' ':
q_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
continue
if tmp_seq[0] == q_seq[0]:
resn = self.locate_res_by_pos(chain, q_counter)
if resn != 0:
if subj_counter in residues:
db_res = residues[subj_counter]
if db_res.protein_segment:
segment = db_res.protein_segment.slug
self.residues[chain][resn].add_segment(segment)
if db_res.display_generic_number:
num = db_res.display_generic_number.label
bw, gpcrdb = num.split('x')
gpcrdb = "{}.{}".format(bw.split('.')[0], gpcrdb)
self.residues[chain][resn].add_bw_number(bw)
self.residues[chain][resn].add_gpcrdb_number(gpcrdb)
self.residues[chain][resn].add_gpcrdb_number_id(db_res.display_generic_number.id)
self.residues[chain][resn].add_display_number(num)
self.residues[chain][resn].add_residue_record(db_res)
else:
logger.warning("Could not find residue {} {} in the database.".format(resn, subj_counter))
if prot_id not in self.prot_id_list:
self.prot_id_list.append(prot_id)
q_counter += 1
subj_counter += 1
tmp_seq.pop(0)
q_seq.pop(0)
def get_substructure_mapping_dict(self):
mapping_dict = {}
for chain in self.residues.keys():
for res in self.residues[chain].keys():
if self.residues[chain][res].segment in mapping_dict.keys():
mapping_dict[self.residues[chain][res].segment].append(self.residues[chain][res].number)
else:
mapping_dict[self.residues[chain][res].segment] = [self.residues[chain][res].number,]
return mapping_dict
def get_annotated_structure(self):
for chain in self.pdb_structure:
for residue in chain:
if residue.id[1] in self.residues[chain.id].keys():
if self.residues[chain.id][residue.id[1]].gpcrdb != 0.:
residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb))
if self.residues[chain.id][residue.id[1]].bw != 0.:
residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw))
return self.pdb_structure
def save_gn_to_pdb(self):
#replace bfactor field of CA atoms with b-w numbers and return filehandle with the structure written
for chain in self.pdb_structure:
for residue in chain:
if residue.id[1] in self.residues[chain.id].keys():
if self.residues[chain.id][residue.id[1]].gpcrdb != 0.:
residue["CA"].set_bfactor(float(self.residues[chain.id][residue.id[1]].gpcrdb))
if self.residues[chain.id][residue.id[1]].bw != 0.:
residue["N"].set_bfactor(float(self.residues[chain.id][residue.id[1]].bw))
r = self.residues[chain.id][residue.id[1]]
#get the basename, extension and export the pdb structure with b-w numbers
root, ext = os.path.splitext(self.pdb_filename)
io=PDBIO()
io.set_structure(self.pdb_structure)
io.save("%s_GPCRDB%s" %(root, ext))
def assign_generic_numbers(self):
alignments = {}
#blast search goes first, looping through all the chains
for chain in self.pdb_seq.keys():
alignments[chain] = self.blast.run(self.pdb_seq[chain])
#map the results onto pdb sequence for every sequence pair from blast
for chain in self.pdb_seq.keys():
for alignment in alignments[chain]:
if alignment == []:
continue
for hsps in alignment[1].hsps:
self.map_blast_seq(alignment[0], hsps, chain)
return self.get_annotated_structure()
def assign_generic_numbers_with_sequence_parser(self):
for chain in self.pdb_structure:
for residue in chain:
if chain.id in self.mapping:
if residue.id[1] in self.mapping[chain.id].keys():
gpcrdb_num = self.mapping[chain.id][residue.id[1]].gpcrdb
if gpcrdb_num != '' and len(gpcrdb_num.split('x'))==2:
bw, gn = gpcrdb_num.split('x')
gn = "{}.{}".format(bw.split('.')[0], gn)
if len(gn.split('.')[1])==3:
gn = '-'+gn[:-1]
try:
residue["CA"].set_bfactor(float(gn))
residue["N"].set_bfactor(float(bw))
except:
pass
return self.pdb_structure
def assign_cgn_with_sequence_parser(self, target_chain):
pdb_array = OrderedDict()
for s in G_PROTEIN_SEGMENTS['Full']:
pdb_array[s] = OrderedDict()
i, j = 0, 0
key_list = [i.gpcrdb for i in list(self.mapping[target_chain].values())]
for key, vals in self.mapping[target_chain].items():
category, segment, num = vals.gpcrdb.split('.')
if self.pdb_code in self.exceptions:
try:
if self.pdb_structure[target_chain][key].get_id()[1]>=self.exceptions[self.pdb_code][0]:
if i<self.exceptions[self.pdb_code][1]:
pdb_array[segment][vals.gpcrdb] = 'x'
i+=1
continue
except:
pass
this_cat, this_seg, this_num = key_list[j].split('.')
try:
pdb_array[segment][vals.gpcrdb] = self.pdb_structure[target_chain][key-i].get_list()
except:
pdb_array[segment][vals.gpcrdb] = 'x'
j+=1
return pdb_array
| cmunk/protwis | structure/assign_generic_numbers_gpcr.py | Python | apache-2.0 | 11,967 |
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# Copyright (C) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class BaseGroupSnapshotsTest(base.BaseVolumeAdminTest):
@classmethod
def skip_checks(cls):
super(BaseGroupSnapshotsTest, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_group_snapshot(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = data_utils.rand_name(
self.__class__.__name__ + '-Group_Snapshot')
group_snapshot = self.group_snapshots_client.create_group_snapshot(
**kwargs)['group_snapshot']
group_snapshot['group_id'] = kwargs['group_id']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._delete_group_snapshot, group_snapshot)
waiters.wait_for_volume_resource_status(
self.group_snapshots_client, group_snapshot['id'], 'available')
return group_snapshot
def _delete_group_snapshot(self, group_snapshot):
self.group_snapshots_client.delete_group_snapshot(group_snapshot['id'])
vols = self.volumes_client.list_volumes(detail=True)['volumes']
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for vol in vols:
for snap in snapshots:
if (vol['group_id'] == group_snapshot['group_id'] and
vol['id'] == snap['volume_id']):
self.snapshots_client.wait_for_resource_deletion(
snap['id'])
self.group_snapshots_client.wait_for_resource_deletion(
group_snapshot['id'])
class GroupSnapshotsTest(BaseGroupSnapshotsTest):
"""Test group snapshot"""
volume_min_microversion = '3.14'
volume_max_microversion = 'latest'
@decorators.idempotent_id('1298e537-f1f0-47a3-a1dd-8adec8168897')
def test_group_snapshot_create_show_list_delete(self):
"""Test create/show/list/delete group snapshot
1. Create volume type "volume_type1"
2. Create group type "group_type1"
3. Create group "group1" with "group_type1" and "volume_type1"
4. Create volume "volume1" with "volume_type1" and "group1"
5. Create group snapshot "group_snapshot1" with "group1"
6. Check snapshot created from "volume1" reaches available status
7. Check the created group snapshot "group_snapshot1" is in the list
of all group snapshots
8. Delete group snapshot "group_snapshot1"
"""
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
# Create group
grp = self.create_group(group_type=group_type['id'],
volume_types=[volume_type['id']])
# Create volume
vol = self.create_volume(volume_type=volume_type['id'],
group_id=grp['id'])
# Create group snapshot
group_snapshot_name = data_utils.rand_name('group_snapshot')
group_snapshot = self._create_group_snapshot(
group_id=grp['id'], name=group_snapshot_name)
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if vol['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
self.assertEqual(group_snapshot_name, group_snapshot['name'])
# Get a given group snapshot
group_snapshot = self.group_snapshots_client.show_group_snapshot(
group_snapshot['id'])['group_snapshot']
self.assertEqual(group_snapshot_name, group_snapshot['name'])
# Get all group snapshots with details, check some detail-specific
# elements, and look for the created group snapshot
group_snapshots = self.group_snapshots_client.list_group_snapshots(
detail=True)['group_snapshots']
for grp_snapshot in group_snapshots:
self.assertIn('created_at', grp_snapshot)
self.assertIn('group_id', grp_snapshot)
self.assertIn((group_snapshot['name'], group_snapshot['id']),
[(m['name'], m['id']) for m in group_snapshots])
# Delete group snapshot
self._delete_group_snapshot(group_snapshot)
group_snapshots = self.group_snapshots_client.list_group_snapshots()[
'group_snapshots']
self.assertNotIn((group_snapshot['name'], group_snapshot['id']),
[(m['name'], m['id']) for m in group_snapshots])
@decorators.idempotent_id('eff52c70-efc7-45ed-b47a-4ad675d09b81')
def test_create_group_from_group_snapshot(self):
"""Test creating group from group snapshot
1. Create volume type "volume_type1"
2. Create group type "group_type1"
3. Create group "group1" with "group_type1" and "volume_type1"
4. Create volume "volume1" with "volume_type1" and "group1"
5. Create group snapshot "group_snapshot1" with "group1"
6. Check snapshot created from "volume1" reaches available status
7. Create group "group2" from "group_snapshot1"
8. Check the volumes belonging to "group2" reach available status
9. Check "group2" reaches available status
"""
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
# Create Group
grp = self.create_group(group_type=group_type['id'],
volume_types=[volume_type['id']])
# Create volume
vol = self.create_volume(volume_type=volume_type['id'],
group_id=grp['id'])
# Create group_snapshot
group_snapshot_name = data_utils.rand_name('group_snapshot')
group_snapshot = self._create_group_snapshot(
group_id=grp['id'], name=group_snapshot_name)
self.assertEqual(group_snapshot_name, group_snapshot['name'])
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if vol['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
# Create Group from Group snapshot
grp_name2 = data_utils.rand_name('Group_from_snap')
grp2 = self.groups_client.create_group_from_source(
group_snapshot_id=group_snapshot['id'], name=grp_name2)['group']
self.addCleanup(self.delete_group, grp2['id'])
self.assertEqual(grp_name2, grp2['name'])
vols = self.volumes_client.list_volumes(detail=True)['volumes']
for vol in vols:
if vol['group_id'] == grp2['id']:
waiters.wait_for_volume_resource_status(
self.volumes_client, vol['id'], 'available')
waiters.wait_for_volume_resource_status(
self.groups_client, grp2['id'], 'available')
@decorators.idempotent_id('7d7fc000-0b4c-4376-a372-544116d2e127')
@decorators.related_bug('1739031')
def test_delete_group_snapshots_following_updated_volumes(self):
"""Test deleting group snapshot following updated volumes
1. Create volume type "volume_type1"
2. Create group type "group_type1"
3. Create group "group1" with "group_type1" and "volume_type1"
4. Create 2 volumes "volume1" and "volume2"
with "volume_type1" and "group1"
5. For each created volume, removing and then adding back to "group1"
6. Create group snapshot "group_snapshot1" with "group1"
7. Check snapshots created from "volume1" and "volume2" reach
available status
8. Delete "group_snapshot1"
9. Check snapshots created from "volume1" and "volume2" are deleted
"""
volume_type = self.create_volume_type()
group_type = self.create_group_type()
# Create a volume group
grp = self.create_group(group_type=group_type['id'],
volume_types=[volume_type['id']])
# Note: When dealing with consistency groups all volumes must
# reside on the same backend. Adding volumes to the same consistency
# group from multiple backends isn't supported. In order to ensure all
# volumes share the same backend, all volumes must share same
# volume-type and group id.
volume_list = []
for _ in range(2):
volume = self.create_volume(volume_type=volume_type['id'],
group_id=grp['id'])
volume_list.append(volume['id'])
for vol in volume_list:
self.groups_client.update_group(grp['id'],
remove_volumes=vol)
waiters.wait_for_volume_resource_status(
self.groups_client, grp['id'], 'available')
self.groups_client.update_group(grp['id'],
add_volumes=vol)
waiters.wait_for_volume_resource_status(
self.groups_client, grp['id'], 'available')
# Verify the created volumes are associated with consistency group
vols = self.volumes_client.list_volumes(detail=True)['volumes']
grp_vols = [v for v in vols if v['group_id'] == grp['id']]
self.assertEqual(2, len(grp_vols))
# Create a snapshot group
group_snapshot = self._create_group_snapshot(group_id=grp['id'])
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if snap['volume_id'] in volume_list:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
# Delete a snapshot group
self._delete_group_snapshot(group_snapshot)
class GroupSnapshotsV319Test(BaseGroupSnapshotsTest):
"""Test group snapshot with volume microversion greater than 3.18"""
volume_min_microversion = '3.19'
volume_max_microversion = 'latest'
@decorators.idempotent_id('3b42c9b9-c984-4444-816e-ca2e1ed30b40')
def test_reset_group_snapshot_status(self):
"""Test resetting group snapshot status to creating/available/error"""
# Create volume type
volume_type = self.create_volume_type()
# Create group type
group_type = self.create_group_type()
# Create group
group = self.create_group(group_type=group_type['id'],
volume_types=[volume_type['id']])
# Create volume
volume = self.create_volume(volume_type=volume_type['id'],
group_id=group['id'])
# Create group snapshot
group_snapshot = self._create_group_snapshot(group_id=group['id'])
snapshots = self.snapshots_client.list_snapshots(
detail=True)['snapshots']
for snap in snapshots:
if volume['id'] == snap['volume_id']:
waiters.wait_for_volume_resource_status(
self.snapshots_client, snap['id'], 'available')
# Reset group snapshot status
self.addCleanup(waiters.wait_for_volume_resource_status,
self.group_snapshots_client,
group_snapshot['id'], 'available')
self.addCleanup(
self.admin_group_snapshots_client.reset_group_snapshot_status,
group_snapshot['id'], 'available')
for status in ['creating', 'available', 'error']:
self.admin_group_snapshots_client.reset_group_snapshot_status(
group_snapshot['id'], status)
waiters.wait_for_volume_resource_status(
self.group_snapshots_client, group_snapshot['id'], status)
| openstack/tempest | tempest/api/volume/admin/test_group_snapshots.py | Python | apache-2.0 | 12,992 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from uuid import UUID
import uuid
try:
bytes()
except NameError:
bytes = str
from org.apache.qpid.proton import Proton, ProtonUnsupportedOperationException
from org.apache.qpid.proton import InterruptException as Interrupt
from org.apache.qpid.proton import TimeoutException as Timeout
from org.apache.qpid.proton.engine import \
Transport as JTransport, Sender as JSender, Receiver as JReceiver, \
Sasl, SslDomain as JSslDomain, \
EndpointState, TransportException
from org.apache.qpid.proton.message import \
MessageFormat, Message as JMessage
from org.apache.qpid.proton.codec import \
Data as JData
from org.apache.qpid.proton.messenger import MessengerException, Status
from org.apache.qpid.proton.amqp.transport import ErrorCondition, SenderSettleMode, ReceiverSettleMode
from org.apache.qpid.proton.amqp.messaging import Source, Target, Accepted, \
Rejected, Received, Modified, Released, AmqpValue
from org.apache.qpid.proton.amqp import UnsignedInteger, UnsignedLong, UnsignedByte, UnsignedShort, Symbol, \
Decimal32, Decimal64, Decimal128
from jarray import zeros, array
from java.util import EnumSet, UUID as JUUID, Date as JDate, HashMap
from java.nio import ByteBuffer
from java.lang import Character as JCharacter, String as JString, Integer as JInteger
from java.lang import NoClassDefFoundError
class Constant(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
class Skipped(Exception):
skipped = True
PENDING = "PENDING"
ACCEPTED = "ACCEPTED"
REJECTED = "REJECTED"
RELEASED = "RELEASED"
SETTLED = "SETTLED"
STATUSES = {
Status.ACCEPTED: ACCEPTED,
Status.REJECTED: REJECTED,
Status.PENDING: PENDING,
Status.RELEASED: RELEASED,
Status.SETTLED: SETTLED,
Status.UNKNOWN: None
}
MANUAL = "MANUAL"
AUTOMATIC = "AUTOMATIC"
API_LANGUAGE = "Java"
IMPLEMENTATION_LANGUAGE = "C"
if Proton.getDefaultImplementationType().name() == "PROTON_J":
IMPLEMENTATION_LANGUAGE = "Java"
class Endpoint(object):
LOCAL_UNINIT = 1
LOCAL_ACTIVE = 2
LOCAL_CLOSED = 4
REMOTE_UNINIT = 8
REMOTE_ACTIVE = 16
REMOTE_CLOSED = 32
def __init__(self):
self.condition = None
@property
def remote_condition(self):
return Condition(impl = self.impl.getRemoteCondition())
@property
def state(self):
local = self.impl.getLocalState()
remote = self.impl.getRemoteState()
result = 0
if (local == EndpointState.UNINITIALIZED):
result = result | self.LOCAL_UNINIT
elif (local == EndpointState.ACTIVE):
result = result | self.LOCAL_ACTIVE
elif (local == EndpointState.CLOSED):
result = result | self.LOCAL_CLOSED
if (remote == EndpointState.UNINITIALIZED):
result = result | self.REMOTE_UNINIT
elif (remote == EndpointState.ACTIVE):
result = result | self.REMOTE_ACTIVE
elif (remote == EndpointState.CLOSED):
result = result | self.REMOTE_CLOSED
return result
def _enums(self, mask):
local = []
if (self.LOCAL_UNINIT | mask):
local.append(EndpointState.UNINITIALIZED)
if (self.LOCAL_ACTIVE | mask):
local.append(EndpointState.ACTIVE)
if (self.LOCAL_CLOSED | mask):
local.append(EndpointState.CLOSED)
remote = []
if (self.REMOTE_UNINIT | mask):
remote.append(EndpointState.UNINITIALIZED)
if (self.REMOTE_ACTIVE | mask):
remote.append(EndpointState.ACTIVE)
if (self.REMOTE_CLOSED | mask):
remote.append(EndpointState.CLOSED)
return EnumSet.of(*local), EnumSet.of(*remote)
def open(self):
self.impl.open()
def close(self):
if self.condition is not None:
self.impl.setCondition(self.condition.impl)
self.impl.close()
class Condition(object):
def __init__(self, name=None, description=None, info=None, impl=None):
if impl is None:
impl = ErrorCondition(Symbol.valueOf(name), description)
if info is not None:
impl.setInfo(info)
self.impl = impl
def _get_name(self):
c = self.impl.getCondition()
if c is not None:
return c.toString()
def _set_name(self, n):
self.impl.setCondition(Symbol.valueOf(n))
name = property(_get_name, _set_name)
def _get_description(self):
return self.impl.getDescription()
def _set_description(self, d):
self.impl.setDescription(d)
description = property(_get_description, _set_description)
def _get_info(self):
return self.impl.getInfo()
def _set_info(self, i):
self.impl.setInfo(i)
info = property(_get_info, _get_description)
def __repr__(self):
return "Condition(%s)" % ", ".join([repr(x) for x in
(self.name, self.description, self.info)
if x])
def __eq__(self, o):
if not isinstance(o, Condition): return False
return self.impl.equals(o.impl)
def _2J(self):
return self.impl
def wrap_connection(impl):
if impl:
return impl.getContext()
else:
return None
class Connection(Endpoint):
def __init__(self):
Endpoint.__init__(self)
self.impl = Proton.connection()
self.impl.setContext(self)
self.desired_capabilities = None
self.offered_capabilities = None
self.properties = None
@property
def writable(self):
raise ProtonUnsupportedOperationException("Connection.writable")
def session(self):
return wrap_session(self.impl.session())
def session_head(self, mask):
return wrap_session(self.impl.sessionHead(*self._enums(mask)))
def link_head(self, mask):
return wrap_link(self.impl.linkHead(*self._enums(mask)))
@property
def work_head(self):
return wrap_delivery(self.impl.getWorkHead())
def _get_container(self):
return self.impl.getContainer()
def _set_container(self, container):
self.impl.setContainer(container)
container = property(_get_container, _set_container)
def _get_hostname(self):
return self.impl.getHostname()
def _set_hostname(self, hostname):
self.impl.setHostname(hostname)
hostname = property(_get_hostname, _set_hostname)
def _get_remote_container(self):
return self.impl.getRemoteContainer()
def _set_remote_container(self, container):
self.impl.setRemoteContainer(container)
remote_container = property(_get_remote_container, _set_remote_container)
def _get_remote_hostname(self):
return self.impl.getRemoteHostname()
def _set_remote_hostname(self, hostname):
self.impl.setRemoteHostname(hostname)
remote_hostname = property(_get_remote_hostname, _set_remote_hostname)
@property
def remote_offered_capabilities(self):
return convertToPyArray(Data.SYMBOL, self.impl.getRemoteOfferedCapabilities(),symbol)
@property
def remote_desired_capabilities(self):
return convertToPyArray(Data.SYMBOL, self.impl.getRemoteDesiredCapabilities(),symbol)
@property
def remote_properties(self):
return J2PY(self.impl.getRemoteProperties());
def open(self):
self.impl.setOfferedCapabilities(PY2J(self.offered_capabilities))
self.impl.setDesiredCapabilities(PY2J(self.desired_capabilities))
self.impl.setProperties(PY2J(self.properties))
Endpoint.open(self)
def wrap_session(impl):
# XXX
if impl: return Session(impl)
class Session(Endpoint):
def __init__(self, impl):
Endpoint.__init__(self)
self.impl = impl
@property
def connection(self):
return wrap_connection(self.impl.getConnection())
def sender(self, name):
return wrap_link(self.impl.sender(name))
def receiver(self, name):
return wrap_link(self.impl.receiver(name))
def _get_incoming_capacity(self):
return self.impl.getIncomingCapacity()
def _set_incoming_capacity(self, capacity):
self.impl.setIncomingCapacity(capacity)
incoming_capacity = property(_get_incoming_capacity, _set_incoming_capacity)
@property
def outgoing_bytes(self):
return self.impl.getOutgoingBytes()
@property
def incoming_bytes(self):
return self.impl.getIncomingBytes()
def wrap_link(impl):
if impl is None: return None
elif isinstance(impl, JSender):
return Sender(impl)
elif isinstance(impl, JReceiver):
return Receiver(impl)
else:
raise Exception("unknown type")
class Link(Endpoint):
SND_UNSETTLED = SenderSettleMode.UNSETTLED
SND_SETTLED = SenderSettleMode.SETTLED
SND_MIXED = SenderSettleMode.MIXED
RCV_FIRST = ReceiverSettleMode.FIRST
RCV_SECOND = ReceiverSettleMode.SECOND
def __init__(self, impl):
Endpoint.__init__(self)
self.impl = impl
@property
def source(self):
if self.impl.getSource() is None:
self.impl.setSource(Source())
return Terminus(self.impl.getSource())
@property
def target(self):
if self.impl.getTarget() is None:
self.impl.setTarget(Target())
return Terminus(self.impl.getTarget())
@property
def remote_source(self):
return Terminus(self.impl.getRemoteSource())
@property
def remote_target(self):
return Terminus(self.impl.getRemoteTarget())
@property
def session(self):
return wrap_session(self.impl.getSession())
def delivery(self, tag):
return wrap_delivery(self.impl.delivery(tag))
@property
def current(self):
return wrap_delivery(self.impl.current())
def advance(self):
return self.impl.advance()
@property
def unsettled(self):
return self.impl.getUnsettled()
@property
def credit(self):
return self.impl.getCredit()
@property
def available(self):
raise ProtonUnsupportedOperationException("Link.available")
@property
def queued(self):
return self.impl.getQueued()
def next(self, mask):
return wrap_link(self.impl.next(*self._enums(mask)))
@property
def name(self):
return self.impl.getName()
@property
def remote_snd_settle_mode(self):
return self.impl.getRemoteSenderSettleMode()
@property
def remote_rcv_settle_mode(self):
return self.impl.getRemoteReceiverSettleMode()
def _get_snd_settle_mode(self):
return self.impl.getSenderSettleMode()
def _set_snd_settle_mode(self, mode):
self.impl.setSenderSettleMode(mode)
snd_settle_mode = property(_get_snd_settle_mode, _set_snd_settle_mode)
def _get_rcv_settle_mode(self):
return self.impl.getReceiverSettleMode()
def _set_rcv_settle_mode(self, mode):
self.impl.setReceiverSettleMode(mode)
rcv_settle_mode = property(_get_rcv_settle_mode, _set_rcv_settle_mode)
def drained(self):
return self.impl.drained()
class DataDummy:
def format(self):
pass
def put_array(self, *args, **kwargs):
raise ProtonUnsupportedOperationException("Data.put_array")
class Terminus(object):
UNSPECIFIED = None
DIST_MODE_UNSPECIFIED = None
DIST_MODE_COPY = "copy"
DIST_MODE_MOVE = "move"
def __init__(self, impl):
self.impl = impl
self.type = None
self.timeout = None
self.durability = None
self.expiry_policy = None
self.properties = DataDummy()
self.outcomes = DataDummy()
self.filter = DataDummy()
self.capabilities = DataDummy()
def _get_address(self):
return self.impl.getAddress()
def _set_address(self, address):
self.impl.setAddress(address)
address = property(_get_address, _set_address)
def _get_timeout(self):
return self.impl.getTimeout()
def _set_timeout(self, t):
if t is not None:
t = UnsignedInteger(t)
return self.impl.setTimeout(t)
timeout = property(_get_timeout, _set_timeout)
def _is_dynamic(self):
return self.impl.getDynamic()
def _set_dynamic(self, dynamic):
self.impl.setDynamic(dynamic)
dynamic = property(_is_dynamic, _set_dynamic)
def _get_distribution_mode(self):
if isinstance(self.impl, Source):
sym = self.impl.getDistributionMode()
if sym is None:
return self.DIST_MODE_UNSPECIFIED
else:
return sym.toString()
else:
return self.DIST_MODE_UNSPECIFIED
def _set_distribution_mode(self, mode):
if isinstance(self.impl, Source):
if mode in [None, "copy", "move"]:
self.impl.setDistributionMode(Symbol.valueOf(mode))
else:
self.impl.setDistributionMode(None)
distribution_mode = property(_get_distribution_mode, _set_distribution_mode)
def copy(self, src):
self.address = src.address
self.timeout = src.timeout
self.dynamic = src.dynamic
self.distribution_mode = src.distribution_mode
class Sender(Link):
def offered(self, n):
raise ProtonUnsupportedOperationException("Sender.offered")
def send(self, bytes):
return self.impl.send(bytes, 0, len(bytes))
class Receiver(Link):
def flow(self, n):
self.impl.flow(n)
def drain(self, n):
self.impl.drain(n)
def draining(self):
return self.impl.draining()
def recv(self, size):
output = zeros(size, "b")
n = self.impl.recv(output, 0, size)
if n >= 0:
return output.tostring()[:n]
elif n == JTransport.END_OF_STREAM:
return None
else:
raise Exception(n)
class Disposition(object):
RECEIVED = 0x23
ACCEPTED = 0x24
REJECTED = 0x25
RELEASED = 0x26
MODIFIED = 0x27
def __init__(self):
self.type = 0
self._received = None
self._accepted = None
self._rejected = None
self._released = None
self._modified = None
def _get_section_number(self):
if self._received:
return J2PY(self._received.getSectionNumber())
else:
return 0
def _set_section_number(self, n):
if not self._received:
self._received = Received()
self._received.setSectionNumber(UnsignedInteger(n))
section_number = property(_get_section_number, _set_section_number)
def _get_section_offset(self):
if self._received:
return J2PY(self._received.getSectionOffset())
else:
return 0
def _set_section_offset(self, n):
if not self._received:
self._received = Received()
self._received.setSectionOffset(UnsignedLong(n))
section_offset = property(_get_section_offset, _set_section_offset)
def _get_failed(self):
if self._modified:
return self._modified.getDeliveryFailed()
else:
return False
def _set_failed(self, b):
if not self._modified:
self._modified = Modified()
self._modified.setDeliveryFailed(b)
failed = property(_get_failed, _set_failed)
def _get_undeliverable(self):
if self._modified:
return self._modified.getUndeliverableHere()
else:
return False
def _set_undeliverable(self, b):
if not self._modified:
self._modified = Modified()
self._modified.setUndeliverableHere(b)
undeliverable = property(_get_undeliverable, _set_undeliverable)
def _get_data(self):
return None
def _set_data(self, obj):
raise Skipped()
data = property(_get_data, _set_data)
def _get_annotations(self):
if self._modified:
return J2PY(self._modified.getMessageAnnotations())
else:
return None
def _set_annotations(self, obj):
if not self._modified:
self._modified = Modified()
self._modified.setMessageAnnotations(PY2J(obj))
annotations = property(_get_annotations, _set_annotations)
def _get_condition(self):
if self._rejected:
return Condition(impl = self._rejected.getError())
else:
return None
def _set_condition(self, obj):
if not self._rejected:
self._rejected = Rejected()
self._rejected.setError(obj._2J())
condition = property(_get_condition, _set_condition)
def _as_received(self):
if self._received is None:
self._received = Received()
return self._received
def _as_accepted(self):
if self._accepted is None:
self._accepted = Accepted.getInstance()
return self._accepted
def _as_rejected(self):
if self._rejected is None:
self._rejected = Rejected()
return self._rejected
def _as_released(self):
if self._released is None:
self._released = Released.getInstance()
return self._released
def _as_modified(self):
if self._modified is None:
self._modified = Modified()
return self._modified
PY2J = {
RECEIVED: _as_received,
ACCEPTED: _as_accepted,
REJECTED: _as_rejected,
RELEASED: _as_released,
MODIFIED: _as_modified
}
def _2J(self):
return self.PY2J[self.type](self)
def _from_received(self, s):
self.type = self.RECEIVED
self._received = s
def _from_accepted(self, s):
self.type = self.ACCEPTED
self._accepted = s
def _from_rejected(self, s):
self.type = self.REJECTED
self._rejected = s
def _from_released(self, s):
self.type = self.RELEASED
self._released = s
def _from_modified(self, s):
self.type = self.MODIFIED
self._modified = s
J2PY = {
Received: _from_received,
Accepted: _from_accepted,
Rejected: _from_rejected,
Released: _from_released,
Modified: _from_modified
}
def _2PY(self, impl):
self.J2PY[type(impl)](self, impl)
def wrap_delivery(impl):
if impl: return Delivery(impl)
class Delivery(object):
RECEIVED = Disposition.RECEIVED
ACCEPTED = Disposition.ACCEPTED
REJECTED = Disposition.REJECTED
RELEASED = Disposition.RELEASED
MODIFIED = Disposition.MODIFIED
def __init__(self, impl):
self.impl = impl
self.local = Disposition()
@property
def tag(self):
return self.impl.getTag().tostring()
@property
def writable(self):
return self.impl.isWritable()
@property
def readable(self):
return self.impl.isReadable()
@property
def updated(self):
return self.impl.isUpdated()
def update(self, disp):
self.local.type = disp
self.impl.disposition(self.local._2J())
@property
def remote(self):
d = Disposition()
d._2PY(self.impl.getRemoteState())
return d
@property
def remote_state(self):
return self.remote.type
@property
def local_state(self):
return self.local.type
def settle(self):
self.impl.settle()
@property
def settled(self):
return self.impl.remotelySettled()
@property
def work_next(self):
return wrap_delivery(self.impl.getWorkNext())
@property
def pending(self):
return self.impl.pending()
class Transport(object):
TRACE_OFF = 0
TRACE_RAW = 1
TRACE_FRM = 2
TRACE_DRV = 4
def __init__(self):
self.impl = Proton.transport()
self._ssl = None
self._sasl = None
def __del__(self):
if hasattr(self, ".impl") and self.impl:
pn_transport_free(self.impl)
if hasattr(self, "_sasl") and self._sasl:
# pn_transport_free deallocs the C sasl associated with the
# transport, so erase the reference if a SASL object was used.
self._sasl._sasl = None
self._sasl = None
if hasattr(self, "_ssl") and self._ssl:
# ditto the owned c SSL object
self._ssl._ssl = None
self._ssl = None
del self._trans
def trace(self, mask):
# XXX: self.impl.trace(mask)
pass
def bind(self, connection):
self.impl.bind(connection.impl)
def capacity(self):
return self.impl.capacity()
def push(self, bytes):
input_buffer = self.impl.tail()
input_buffer.put(bytes)
self.impl.process()
def close_tail(self):
self.impl.close_tail()
def pending(self):
return self.impl.pending()
def peek(self, size):
output_buffer = self.impl.head()
output_length = min(size, output_buffer.remaining())
output = zeros(output_length, "b")
output_buffer.mark()
output_buffer.get(output)
output_buffer.reset()
return output.tostring()
def pop(self, size):
self.impl.pop(size)
def close_head(self):
self.impl.close_head()
def output(self, size):
p = self.pending()
if p < 0:
return None
else:
out = self.peek(min(size, p))
self.pop(len(out))
return out
def input(self, bytes):
if not bytes:
self.close_tail()
return None
else:
c = self.capacity()
if (c < 0):
return None
trimmed = bytes[:c]
self.push(trimmed)
return len(trimmed)
def _get_max_frame_size(self):
return self.impl.getMaxFrameSize()
def _set_max_frame_size(self, value):
self.impl.setMaxFrameSize(value)
max_frame_size = property(_get_max_frame_size, _set_max_frame_size,
doc="""
Sets the maximum size for received frames (in bytes).
""")
@property
def remote_max_frame_size(self):
return self.impl.getRemoteMaxFrameSize()
# AMQP 1.0 idle-time-out
def _get_idle_timeout(self):
#return pn_transport_get_idle_timeout(self._trans)
raise ProtonUnsupportedOperationException("Transport.idle_timeout")
def _set_idle_timeout(self, value):
#pn_transport_set_idle_timeout(self._trans, value)
raise ProtonUnsupportedOperationException("Transport.idle_timeout")
idle_timeout = property(_get_idle_timeout, _set_idle_timeout,
doc="""
The idle timeout of the connection (in milliseconds).
""")
@property
def remote_idle_timeout(self):
#return pn_transport_get_remote_idle_timeout(self._trans)
raise ProtonUnsupportedOperationException("Transport.remote_idle_timeout")
@property
def frames_output(self):
#return pn_transport_get_frames_output(self._trans)
raise ProtonUnsupportedOperationException("Transport.frames_output")
@property
def frames_input(self):
#return pn_transport_get_frames_input(self._trans)
raise ProtonUnsupportedOperationException("Transport.frames_input")
def sasl(self):
# SASL factory (singleton for this transport)
if not self._sasl:
self._sasl = SASL(self)
return self._sasl
def ssl(self, domain=None, session_details=None):
# SSL factory (singleton for this transport)
if not self._ssl:
self._ssl = SSL(self, domain, session_details)
return self._ssl
class UnmappedType:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "UnmappedType(%s)" % self.msg
class ulong(long):
def __repr__(self):
return "ulong(%s)" % long.__repr__(self)
class timestamp(long):
def __repr__(self):
return "timestamp(%s)" % long.__repr__(self)
class symbol(unicode):
def __repr__(self):
return "symbol(%s)" % unicode.__repr__(self)
class char(unicode):
def __repr__(self):
return "char(%s)" % unicode.__repr__(self)
class Described(object):
def __init__(self, descriptor, value):
self.descriptor = descriptor
self.value = value
def __repr__(self):
return "Described(%r, %r)" % (self.descriptor, self.value)
def __eq__(self, o):
if isinstance(o, Described):
return self.descriptor == o.descriptor and self.value == o.value
else:
return False
UNDESCRIBED = Constant("UNDESCRIBED")
class Array(object):
def __init__(self, descriptor, type, *elements):
self.descriptor = descriptor
self.type = type
self.elements = elements
def __repr__(self):
if self.elements:
els = ", %s" % (", ".join(map(repr, self.elements)))
else:
els = ""
return "Array(%r, %r%s)" % (self.descriptor, self.type, els)
def __eq__(self, o):
if isinstance(o, Array):
return self.descriptor == o.descriptor and \
self.type == o.type and self.elements == o.elements
else:
return False
class Data(object):
NULL = JData.DataType.NULL;
BOOL = JData.DataType.BOOL;
UBYTE = JData.DataType.UBYTE;
BYTE = JData.DataType.BYTE;
USHORT = JData.DataType.USHORT;
SHORT = JData.DataType.SHORT;
UINT = JData.DataType.UINT;
INT = JData.DataType.INT;
CHAR = JData.DataType.CHAR;
ULONG = JData.DataType.ULONG;
LONG = JData.DataType.LONG;
TIMESTAMP = JData.DataType.TIMESTAMP;
FLOAT = JData.DataType.FLOAT;
DOUBLE = JData.DataType.DOUBLE;
DECIMAL32 = JData.DataType.DECIMAL32;
DECIMAL64 = JData.DataType.DECIMAL64;
DECIMAL128 = JData.DataType.DECIMAL128;
UUID = JData.DataType.UUID;
BINARY = JData.DataType.BINARY;
STRING = JData.DataType.STRING;
SYMBOL = JData.DataType.SYMBOL;
DESCRIBED = JData.DataType.DESCRIBED;
ARRAY = JData.DataType.ARRAY;
LIST = JData.DataType.LIST;
MAP = JData.DataType.MAP;
def __init__(self, capacity=16):
self._data = Proton.data(capacity)
def __del__(self):
if hasattr(self, "_data"):
pn_data_free(self._data)
del self._data
def clear(self):
self._data.clear()
def rewind(self):
self._data.rewind()
def next(self):
return self._data.next()
def prev(self):
return self._data.prev()
def enter(self):
return self._data.enter()
def exit(self):
return self._data.exit()
def lookup(self, name):
return self._data.lookup(name)
def narrow(self):
self._data.narrow()
def widen(self):
self._data.widen()
def type(self):
return self._data.type()
def encode(self):
b = self._data.encode()
return b.getArray().tostring()[b.getArrayOffset():b.getLength()]
def decode(self, encoded):
return self._data.decode(ByteBuffer.wrap(encoded))
def put_list(self):
self._data.putList()
def put_map(self):
self._data.putMap()
def put_array(self, described, element_type):
self._data.putArray(described, element_type)
def put_described(self):
self._data.putDescribed()
def put_null(self):
self._data.putNull()
def put_bool(self, b):
self._data.putBoolean(b)
def put_ubyte(self, ub):
self._data.putUnsignedByte(UnsignedByte.valueOf(ub))
def put_byte(self, b):
self._data.putByte(b)
def put_ushort(self, us):
self._data.putUnsignedShort(UnsignedShort.valueOf(us))
def put_short(self, s):
self._data.putShort(s)
def put_uint(self, ui):
self._data.putUnsignedInteger(UnsignedInteger.valueOf(ui))
def put_int(self, i):
self._data.putInt(i)
def put_char(self, c):
self._data.putChar(ord(c))
def put_ulong(self, ul):
self._data.putUnsignedLong(UnsignedLong.valueOf(ul))
def put_long(self, l):
self._data.putLong(l)
def put_timestamp(self, t):
self._data.putTimestamp(JDate(t))
def put_float(self, f):
self._data.putFloat(f)
def put_double(self, d):
self._data.putDouble(d)
def put_decimal32(self, d):
self._data.putDecimal32(Decimal32(d))
def put_decimal64(self, d):
self._data.putDecimal64(Decimal64(d))
def put_decimal128(self, d):
self._data.putDecimal128(Decimal128(d))
def put_uuid(self, u):
u = JUUID.fromString( str(u) )
self._data.putUUID(u)
def put_binary(self, b):
self._data.putBinary(b)
def put_string(self, s):
self._data.putString(s)
def put_symbol(self, s):
self._data.putSymbol(Symbol.valueOf(s))
def get_list(self):
return self._data.getList()
def get_map(self):
return self._data.getMap()
def get_array(self):
count = self._data.getArray()
described = self._data.isArrayDescribed()
type = self._data.getArrayType()
return count, described, type
def is_described(self):
return self._data.isDescribed()
def is_null(self):
return self._data.isNull()
def get_bool(self):
return self._data.getBoolean()
def get_ubyte(self):
return self._data.getUnsignedByte().shortValue()
def get_byte(self):
return self._data.getByte()
def get_ushort(self):
return self._data.getUnsignedShort().intValue()
def get_short(self):
return self._data.getShort()
def get_int(self):
return self._data.getInt()
def get_uint(self):
return self._data.getUnsignedInteger().longValue()
def get_char(self):
return char(unichr(self._data.getChar()))
def get_ulong(self):
return ulong(self._data.getUnsignedLong().longValue())
def get_long(self):
return self._data.getLong()
def get_timestamp(self):
return self._data.getTimestamp().getTime()
def get_float(self):
return self._data.getFloat()
def get_double(self):
return self._data.getDouble()
def get_decimal32(self):
return self._data.getDecimal32().getBits()
def get_decimal64(self):
return self._data.getDecimal64().getBits()
def get_decimal128(self):
return self._data.getDecimal128().asBytes().tostring()
def get_uuid(self):
return UUID(self._data.getUUID().toString() )
def get_binary(self):
b = self._data.getBinary()
return b.getArray().tostring()[b.getArrayOffset():b.getArrayOffset()+b.getLength()]
def get_string(self):
return self._data.getString()
def get_symbol(self):
return symbol(self._data.getSymbol().toString())
def put_dict(self, d):
self.put_map()
self.enter()
try:
for k, v in d.items():
self.put_object(k)
self.put_object(v)
finally:
self.exit()
def get_dict(self):
if self.enter():
try:
result = {}
while self.next():
k = self.get_object()
if self.next():
v = self.get_object()
else:
v = None
result[k] = v
finally:
self.exit()
return result
def put_sequence(self, s):
self.put_list()
self.enter()
try:
for o in s:
self.put_object(o)
finally:
self.exit()
def get_sequence(self):
if self.enter():
try:
result = []
while self.next():
result.append(self.get_object())
finally:
self.exit()
return result
def get_py_described(self):
if self.enter():
try:
self.next()
descriptor = self.get_object()
self.next()
value = self.get_object()
finally:
self.exit()
return Described(descriptor, value)
def put_py_described(self, d):
self.put_described()
self.enter()
try:
self.put_object(d.descriptor)
self.put_object(d.value)
finally:
self.exit()
def get_py_array(self):
count, described, type = self.get_array()
if self.enter():
try:
if described:
self.next()
descriptor = self.get_object()
else:
descriptor = UNDESCRIBED
elements = []
while self.next():
elements.append(self.get_object())
finally:
self.exit()
return Array(descriptor, type, *elements)
def put_py_array(self, a):
self.put_array(a.descriptor != UNDESCRIBED, a.type)
self.enter()
try:
for e in a.elements:
self.put_object(e)
finally:
self.exit()
put_mappings = {
None.__class__: lambda s, _: s.put_null(),
bool: put_bool,
dict: put_dict,
list: put_sequence,
tuple: put_sequence,
unicode: put_string,
bytes: put_binary,
symbol: put_symbol,
int: put_int,
char: put_char,
long: put_long,
ulong: put_ulong,
timestamp: put_timestamp,
float: put_double,
uuid.UUID: put_uuid,
Described: put_py_described,
Array: put_py_array
}
get_mappings = {
NULL: lambda s: None,
BOOL: get_bool,
BYTE: get_byte,
UBYTE: get_ubyte,
SHORT: get_short,
USHORT: get_ushort,
INT: get_int,
UINT: get_uint,
CHAR: get_char,
LONG: get_long,
ULONG: get_ulong,
TIMESTAMP: get_timestamp,
FLOAT: get_float,
DOUBLE: get_double,
DECIMAL32: get_decimal32,
DECIMAL64: get_decimal64,
DECIMAL128: get_decimal128,
UUID: get_uuid,
BINARY: get_binary,
STRING: get_string,
SYMBOL: get_symbol,
DESCRIBED: get_py_described,
ARRAY: get_py_array,
LIST: get_sequence,
MAP: get_dict
}
def put_object(self, obj):
putter = self.put_mappings[obj.__class__]
putter(self, obj)
def get_object(self):
type = self.type()
if type is None: return None
getter = self.get_mappings.get(type)
if getter:
return getter(self)
else:
self.dump()
return UnmappedType(str(type))
def copy(self, src):
self._data.copy(src._data)
def format(self):
return self._data.toString()
class Messenger(object):
def __init__(self, name=None):
if name:
self.impl = Proton.messenger(name)
else:
self.impl = Proton.messenger()
def route(self, pattern, address):
self.impl.route(pattern, address)
def rewrite(self, pattern, address):
self.impl.rewrite(pattern, address)
def start(self):
self.impl.start()
def stop(self):
self.impl.stop()
@property
def stopped(self):
return self.impl.stopped()
def subscribe(self, source):
self.impl.subscribe(source)
def put(self, message):
self.impl.put(message.impl)
return self.impl.outgoingTracker()
def send(self, n=-1):
self.impl.send(n)
def recv(self, n=-1):
self.impl.recv(n)
@property
def receiving(self):
return self.impl.receiving()
def work(self, timeout=None):
if timeout is None:
t = -1
else:
t = long(1000*timeout)
try:
err = self.impl.work(t)
except Timeout, e:
return False
return err
def interrupt(self):
self.impl.interrupt()
def get(self, message=None):
result = self.impl.get()
if message and result:
message.impl = result
return self.impl.incomingTracker()
@property
def outgoing(self):
return self.impl.outgoing()
@property
def incoming(self):
return self.impl.incoming()
def _get_timeout(self):
t = self.impl.getTimeout()
if t == -1:
return None
else:
return float(t)/1000
def _set_timeout(self, timeout):
if timeout is None:
t = -1
else:
t = long(1000*timeout)
self.impl.setTimeout(t)
timeout = property(_get_timeout, _set_timeout)
def _is_blocking(self):
return self.impl.isBlocking()
def _set_blocking(self, b):
self.impl.setBlocking(b)
blocking = property(_is_blocking, _set_blocking)
def accept(self, tracker=None):
if tracker is None:
tracker = self.impl.incomingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.accept(tracker, flags)
def reject(self, tracker=None):
if tracker is None:
tracker = self.impl.incomingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.reject(tracker, flags)
def settle(self, tracker=None):
if tracker is None:
tracker = self.impl.outgoingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.settle(tracker, flags)
def status(self, tracker):
return STATUSES[self.impl.getStatus(tracker)]
def _get_incoming_window(self):
return self.impl.getIncomingWindow()
def _set_incoming_window(self, window):
self.impl.setIncomingWindow(window)
incoming_window = property(_get_incoming_window, _set_incoming_window)
def _get_outgoing_window(self):
return self.impl.getOutgoingWindow()
def _set_outgoing_window(self, window):
self.impl.setOutgoingWindow(window)
outgoing_window = property(_get_outgoing_window, _set_outgoing_window)
def _get_certificate(self):
raise Skipped()
def _set_certificate(self, xxx):
raise Skipped()
certificate = property(_get_certificate, _set_certificate)
def buffered(self, tracker):
raise Skipped()
class Message(object):
AMQP = MessageFormat.AMQP
TEXT = MessageFormat.TEXT
DATA = MessageFormat.DATA
JSON = MessageFormat.JSON
DEFAULT_PRIORITY = JMessage.DEFAULT_PRIORITY
def __init__(self):
self.impl = Proton.message()
def clear(self):
self.impl.clear()
def save(self):
saved = self.impl.save()
if saved is None:
saved = ""
elif not isinstance(saved, unicode):
saved = saved.tostring()
return saved
def load(self, data):
self.impl.load(data)
def encode(self):
size = 1024
output = zeros(size, "b")
while True:
n = self.impl.encode(output, 0, size)
# XXX: need to check for overflow
if n > 0:
return output.tostring()[:n]
else:
raise Exception(n)
def decode(self, data):
self.impl.decode(data,0,len(data))
def _get_id(self):
id = self.impl.getMessageId()
if isinstance(id, JUUID):
id = UUID( id.toString() )
return id
def _set_id(self, value):
if isinstance(value, UUID):
value = JUUID.fromString( str(value) )
return self.impl.setMessageId(value)
id = property(_get_id, _set_id)
def _get_correlation_id(self):
id = self.impl.getCorrelationId()
if isinstance(id, JUUID):
id = UUID( id.toString() )
return id
def _set_correlation_id(self, value):
if isinstance(value, UUID):
value = JUUID.fromString( str(value) )
return self.impl.setCorrelationId(value)
correlation_id = property(_get_correlation_id, _set_correlation_id)
def _get_ttl(self):
return self.impl.getTtl()
def _set_ttl(self, ttl):
self.impl.setTtl(ttl)
ttl = property(_get_ttl, _set_ttl)
def _get_priority(self):
return self.impl.getPriority()
def _set_priority(self, priority):
self.impl.setPriority(priority)
priority = property(_get_priority, _set_priority)
def _get_address(self):
return self.impl.getAddress()
def _set_address(self, address):
self.impl.setAddress(address)
address = property(_get_address, _set_address)
def _get_subject(self):
return self.impl.getSubject()
def _set_subject(self, subject):
self.impl.setSubject(subject)
subject = property(_get_subject, _set_subject)
def _get_user_id(self):
u = self.impl.getUserId()
if u is None: return ""
else: return u.tostring()
def _set_user_id(self, user_id):
self.impl.setUserId(user_id)
user_id = property(_get_user_id, _set_user_id)
def _get_reply_to(self):
return self.impl.getReplyTo()
def _set_reply_to(self, reply_to):
self.impl.setReplyTo(reply_to)
reply_to = property(_get_reply_to, _set_reply_to)
def _get_reply_to_group_id(self):
return self.impl.getReplyToGroupId()
def _set_reply_to_group_id(self, reply_to_group_id):
self.impl.setReplyToGroupId(reply_to_group_id)
reply_to_group_id = property(_get_reply_to_group_id, _set_reply_to_group_id)
def _get_group_id(self):
return self.impl.getGroupId()
def _set_group_id(self, group_id):
self.impl.setGroupId(group_id)
group_id = property(_get_group_id, _set_group_id)
def _get_group_sequence(self):
return self.impl.getGroupSequence()
def _set_group_sequence(self, group_sequence):
self.impl.setGroupSequence(group_sequence)
group_sequence = property(_get_group_sequence, _set_group_sequence)
def _is_first_acquirer(self):
return self.impl.isFirstAcquirer()
def _set_first_acquirer(self, b):
self.impl.setFirstAcquirer(b)
first_acquirer = property(_is_first_acquirer, _set_first_acquirer)
def _get_expiry_time(self):
return self.impl.getExpiryTime()
def _set_expiry_time(self, expiry_time):
self.impl.setExpiryTime(expiry_time)
expiry_time = property(_get_expiry_time, _set_expiry_time)
def _is_durable(self):
return self.impl.isDurable()
def _set_durable(self, durable):
self.impl.setDurable(durable)
durable = property(_is_durable, _set_durable)
def _get_delivery_count(self):
return self.impl.getDeliveryCount()
def _set_delivery_count(self, delivery_count):
self.impl.setDeliveryCount(delivery_count)
delivery_count = property(_get_delivery_count, _set_delivery_count)
def _get_creation_time(self):
return self.impl.getCreationTime()
def _set_creation_time(self, creation_time):
self.impl.setCreationTime(creation_time)
creation_time = property(_get_creation_time, _set_creation_time)
def _get_content_type(self):
return self.impl.getContentType()
def _set_content_type(self, content_type):
self.impl.setContentType(content_type)
content_type = property(_get_content_type, _set_content_type)
def _get_content_encoding(self):
return self.impl.getContentEncoding()
def _set_content_encoding(self, content_encoding):
self.impl.setContentEncoding(content_encoding)
content_encoding = property(_get_content_encoding, _set_content_encoding)
def _get_format(self):
return self.impl.getFormat()
def _set_format(self, format):
self.impl.setMessageFormat(format)
format = property(_get_format, _set_format)
def _get_body(self):
body = self.impl.getBody()
if isinstance(body, AmqpValue):
return body.getValue()
else:
return body
def _set_body(self, body):
self.impl.setBody(AmqpValue(body))
body = property(_get_body, _set_body)
class SASL(object):
OK = Sasl.PN_SASL_OK
AUTH = Sasl.PN_SASL_AUTH
def __new__(cls, transport):
"""Enforce a singleton SASL object per Transport"""
if not transport._sasl:
obj = super(SASL, cls).__new__(cls)
obj._sasl = transport.impl.sasl()
transport._sasl = obj
return transport._sasl
def mechanisms(self, mechanisms):
self._sasl.setMechanisms(mechanisms.split())
def client(self):
self._sasl.client()
def server(self):
self._sasl.server()
def send(self, data):
self._sasl.send(data, 0, len(data))
def recv(self):
size = 4096
output = zeros(size, "b")
n = self._sasl.recv(output, 0, size)
if n >= 0:
return output.tostring()[:n]
elif n == JTransport.END_OF_STREAM:
return None
else:
raise Exception(n)
def _get_outcome(self):
value = self._sasl.getOutcome()
if value == Sasl.PN_SASL_NONE:
return None
else:
return value
def _set_outcome(self, outcome):
self.impl.setOutcome(outcome)
outcome = property(_get_outcome, _set_outcome)
def done(self, outcome):
self._sasl.done(outcome)
def plain(self, user, password):
self._sasl.plain(user,password)
class SSLException(Exception):
pass
class SSLUnavailable(SSLException):
pass
class SSLDomain(object):
MODE_SERVER = JSslDomain.Mode.SERVER
MODE_CLIENT = JSslDomain.Mode.CLIENT
VERIFY_PEER = JSslDomain.VerifyMode.VERIFY_PEER
VERIFY_PEER_NAME = JSslDomain.VerifyMode.VERIFY_PEER_NAME
ANONYMOUS_PEER = JSslDomain.VerifyMode.ANONYMOUS_PEER
def __init__(self, mode):
try:
self._domain = Proton.sslDomain()
except NoClassDefFoundError, e:
raise SSLUnavailable()
self._domain.init(mode)
def set_credentials(self, cert_file, key_file, password):
self._domain.setCredentials(cert_file, key_file, password)
def set_trusted_ca_db(self, certificate_db):
self._domain.setTrustedCaDb(certificate_db)
def set_peer_authentication(self, verify_mode, trusted_CAs=None):
# TODO the method calls (setTrustedCaDb/setPeerAuthentication) have to occur in
# that order otherwise tests fail with proton-jni. It is not clear yet why.
if trusted_CAs is not None:
self._domain.setTrustedCaDb(trusted_CAs)
self._domain.setPeerAuthentication(verify_mode)
def allow_unsecured_client(self, allow_unsecured = True):
self._domain.allowUnsecuredClient(allow_unsecured)
class SSLSessionDetails(object):
def __init__(self, session_id):
self._session_details = Proton.sslPeerDetails(session_id, 1)
class SSL(object):
def __new__(cls, transport, domain, session_details=None):
"""Enforce a singleton SSL object per Transport"""
if transport._ssl:
# unfortunately, we've combined the allocation and the configuration in a
# single step. So catch any attempt by the application to provide what
# may be a different configuration than the original (hack)
ssl = transport._ssl
if (domain and (ssl._domain is not domain) or
session_details and (ssl._session_details is not session_details)):
raise SSLException("Cannot re-configure existing SSL object!")
else:
obj = super(SSL, cls).__new__(cls)
obj._domain = domain
obj._session_details = session_details
internal_session_details = None
if session_details:
internal_session_details = session_details._session_details
obj._ssl = transport.impl.ssl(domain._domain, internal_session_details)
transport._ssl = obj
return transport._ssl
def __init__(self, transport, domain, session_details=None):
internal_session_details = None
if session_details:
internal_session_details = session_details._session_details
self._ssl = transport.impl.ssl(domain._domain, internal_session_details)
self._session_details = session_details
def get_session_details(self):
return self._session_details
def cipher_name(self):
return self._ssl.getCipherName()
def protocol_name(self):
return self._ssl.getProtocolName()
def _set_peer_hostname(self, hostname):
self._ssl.setPeerHostname(hostname)
def _get_peer_hostname(self):
return self._ssl.getPeerHostname()
peer_hostname = property(_get_peer_hostname, _set_peer_hostname)
class Driver(object):
""" Proton-c platform abstraction - not needed."""
def __init__(self, *args, **kwargs):
raise ProtonUnsupportedOperationException("Driver")
class Connector(object):
""" Proton-c platform abstraction - not needed."""
def __init__(self, *args, **kwargs):
raise ProtonUnsupportedOperationException("Connector")
class Listener(object):
""" Proton-c platform abstraction - not needed."""
def __init__(self, *args, **kwargs):
raise ProtonUnsupportedOperationException("Listener")
def convertToPyArray(t,a,f):
if a == None or len(a) == 0:
return None
return Array(UNDESCRIBED, t, *map(f,a))
arrayElementMappings = {
JData.DataType.SYMBOL: lambda s: Symbol.valueOf(s)
}
arrayTypeMappings = {
JData.DataType.SYMBOL: Symbol
}
conversions_J2PY = {
dict: lambda d: dict([(J2PY(k), J2PY(v)) for k, v in d.items()]),
HashMap: lambda m: dict([(J2PY(e.getKey()), J2PY(e.getValue())) for e in m.entrySet()]),
list: lambda l: [J2PY(x) for x in l],
Symbol: lambda s: symbol(s.toString()),
UnsignedInteger: lambda n: n.longValue(),
UnsignedLong: lambda n: n.longValue()
}
conversions_PY2J = {
dict: lambda d: dict([(PY2J(k), PY2J(v)) for k, v in d.items()]),
list: lambda l: [PY2J(x) for x in l],
symbol: lambda s: Symbol.valueOf(s),
Array: lambda a: array(map(arrayElementMappings[a.type], a.elements),
arrayTypeMappings[a.type])
}
def identity(x): return x
def J2PY(obj):
result = conversions_J2PY.get(type(obj), identity)(obj)
return result
def PY2J(obj):
result = conversions_PY2J.get(type(obj), identity)(obj)
return result
__all__ = [
"ACCEPTED",
"Array",
"API_LANGUAGE",
"IMPLEMENTATION_LANGUAGE",
"MANUAL",
"PENDING",
"REJECTED",
"RELEASED",
"SETTLED",
"char",
"Condition",
"Connection",
"Connector",
"Data",
"Delivery",
"Disposition",
"Described",
"Driver",
"Endpoint",
"Link",
"Listener",
"Message",
"MessageException",
"Messenger",
"MessengerException",
"ProtonException",
"Receiver",
"SASL",
"Sender",
"Session",
"SSL",
"SSLDomain",
"SSLException",
"SSLSessionDetails",
"SSLUnavailable",
"symbol",
"timestamp",
"Terminus",
"Timeout",
"Interrupt",
"Transport",
"TransportException",
"ulong",
"UNDESCRIBED"]
| bbcarchdev/qpid-proton | proton-j/proton-api/src/main/resources/proton.py | Python | apache-2.0 | 48,151 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.ops.losses import util as tf_losses_util
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see
https://www.tensorflow.org/tutorials/distribute/custom_training for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```python
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
# SUM_OVER_BATCH is only allowed in losses managed by `fit` or
# CannedEstimators.
self._allow_sum_over_batch_size = False
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
scope_name = 'lambda' if self.name == '<lambda>' else self.name
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
Returns:
Loss values with the shape `[batch_size, d0, .. dN-1]`.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if (not self._allow_sum_over_batch_size and
distribution_strategy_context.has_strategy() and
(self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/tutorials/distribute/custom_training'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Usage:
>>> mse = tf.keras.losses.MeanSquaredError()
>>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.5
>>> loss = mse([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.25
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Usage:
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.5
>>> loss = mae([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.25
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Usage:
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
500000000.0
>>> loss = mape([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
250000000.0
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true) - log(y_pred))`
Usage:
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
>>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]])
>>> loss.numpy()
0.24022643
>>> loss = msle([[0., 1.], [0., 0.]], [[1., 1.], [1., 0.]],
... sample_weight=[0.7, 0.3])
>>> loss.numpy()
0.12011322
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
>>> bce = tf.keras.losses.BinaryCrossentropy()
>>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.81492424
>>> loss = bce([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.45814526
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we assume
that `y_pred` contains probabilities (i.e., values in [0, 1]).
Note: Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0, we
compute the loss between the predicted labels and a smoothed version of
the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: (Optional) Name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> loss = cce([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> loss.numpy()
1.1769392
>>> loss = cce([[0, 1, 0], [0, 0, 1]],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> loss.numpy()
0.8135988
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
**Note: Using from_logits=True is more numerically stable.**
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> loss.numpy()
1.1769392
>>> loss = scce([1, 2], [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> loss.numpy()
0.8135988
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
Note: Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> h = tf.keras.losses.Hinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.3
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.55
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> h = tf.keras.losses.SquaredHinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.86
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.73
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)`
Usage:
>>> h = tf.keras.losses.CategoricalHinge()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
1.4000001
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]], sample_weight=[1, 0])
>>> loss.numpy()
0.6
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
>>> p = tf.keras.losses.Poisson()
>>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]])
>>> loss.numpy()
0.49999997
>>> loss = p([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]],
... sample_weight=[1., 0.])
>>> loss.numpy()
0.49999997
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Usage:
>>> l = tf.keras.losses.LogCosh()
>>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]])
>>> loss.numpy()
0.10844523
>>> loss = l([[0., 1.], [0., 0.]], [[1., 1.], [0., 0.]],
... sample_weight=[1., 0.])
>>> loss.numpy()
0.10844523
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> kl = tf.keras.losses.KLDivergence()
>>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.45814306
>>> loss = kl([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.4581446
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
>>> h = tf.keras.losses.Huber()
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> loss.numpy()
0.155
>>> loss = h([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> loss.numpy()
0.09
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = abs(y_true - y_pred)`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true) - log(y_pred))`
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg = sum(y_true * y_pred)` and `pos = maximum(1 - y_true)`
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
```python
loss = tf.keras.losses.KLD([.4, .9, .2], [.5, .8, .12])
print('Loss: ', loss.numpy()) # Loss: 0.11891246
```
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets.
`loss = -sum(y_true * y_pred)`
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between `y_true` and `y_pred`.
`loss = -sum(y_true * y_pred)`
Usage:
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
>>> loss = cosine_loss([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
>>> loss.numpy()
-0.49999997
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`.
When used with `tf.distribute.Strategy`, outside of built-in training
loops such as `tf.keras` `compile` and `fit`, using `AUTO` or
`SUM_OVER_BATCH_SIZE` will raise an error. Please see
https://www.tensorflow.org/tutorials/distribute/custom_training
for more details on this.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
| jhseu/tensorflow | tensorflow/python/keras/losses.py | Python | apache-2.0 | 45,944 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("form_designer", "__first__"), ("fluent_contents", "0001_initial")]
operations = [
migrations.CreateModel(
name="FormDesignerLink",
fields=[
(
"contentitem_ptr",
models.OneToOneField(
parent_link=True,
on_delete=models.CASCADE,
auto_created=True,
primary_key=True,
serialize=False,
to="fluent_contents.ContentItem",
),
),
(
"form_definition",
models.ForeignKey(
verbose_name="Form",
on_delete=models.PROTECT,
to="form_designer.FormDefinition",
),
),
],
options={
"db_table": "contentitem_formdesignerlink_formdesignerlink",
"verbose_name": "Form link",
"verbose_name_plural": "Form links",
},
bases=("fluent_contents.contentitem",),
)
]
| django-fluent/django-fluent-contents | fluent_contents/plugins/formdesignerlink/migrations/0001_initial.py | Python | apache-2.0 | 1,286 |
########################################################################################################################
#
# cookie_reader.py
#
# Purpose: read cookies from the web browser (currently only Chrome supported) and make them into Python objects
#
# Author: Braxton J. Schafer (bjschafer) [bjs]
#
# Creation date: 10/10/2014
#
# Copyright (c) 2014 Braxton J. Schafer
#
# Changelog:
#
########################################################################################################################
import sqlite3 as sqlite
import sys
import os.path
import json
from pokemon import pokemon
class cookie_reader():
def __init__(self, cookie_location, browser_type):
self.cookie_location = cookie_location
self._expand_tilde()
self.filename = "http_play.pokemonshowdown.com_0.localstorage"
def _get_cookie_location(self):
platform = sys.platform
if (platform == 'darwin'):
return "~/Library/Application Support/Google/Chrome/Default/Local Storage/"
elif (platform == 'linux2'):
return "~/.config/google-chrome/Default/Cookies"
elif (platform == 'win32' or platform == 'win64'):
return "~/AppData/Local/Google/Chrome/User Data/ Default/Local Storage/"
return "Platform not recognized."
def _expand_tilde(self):
self.cookie_location = self.cookie_location.replace('~', os.path.expanduser('~'))
def _read_from_database(self):
conn = sqlite.connect(self.cookie_location + self.filename)
conn.text_factory = str
c = conn.cursor()
c.execute("""SELECT value FROM ItemTable WHERE key='showdown_teams'""")
return c.fetchone()
def _get_json(self):
raw_json = str(self._read_from_database())
raw_json = raw_json[3:-3]
raw_json = raw_json.replace('\\x00', '')
return json.loads(raw_json)
def read_teams(self):
decoded = self._get_json()
for team in decoded:
yield (team['name'], team['format'], [pokemon(t) for t in team['team']])
if __name__ == '__main__':
c = cookie_reader()
for t in c.read_teams():
print(t) | bjschafer/showdown-sync | cookie_reader.py | Python | apache-2.0 | 2,178 |
# Python bindings to oDesk API
# python-odesk version 0.5
# (C) 2010-2015 oDesk
# Updated by the script
"""Main package of the python bindings for oDesk API.
For convenience some most commonly used functionalities are imported here,
so you can use::
from odesk import Client
from odesk import raise_http_error
"""
VERSION = '0.5.8'
def get_version():
return VERSION
from odesk.client import Client
from odesk.http import raise_http_error
__all__ = ["get_version", "Client", "raise_http_error"]
| odesk/python-odesk | odesk/__init__.py | Python | apache-2.0 | 516 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plugin to parse the OLECF summary/document summary information items."""
from plaso.lib import event
from plaso.lib import eventdata
from plaso.parsers.olecf_plugins import interface
class OleCfSummaryInfoEvent(event.FiletimeEvent):
"""Convenience class for an OLECF Summary info event."""
DATA_TYPE = 'olecf:summary_info'
def __init__(self, timestamp, usage, attributes):
"""Initializes the event.
Args:
timestamp: The FILETIME timestamp value.
usage: The usage string, describing the timestamp value.
attributes: A dict object containing all extracted attributes.
"""
super(OleCfSummaryInfoEvent, self).__init__(
timestamp, usage)
self.name = u'Summary Information'
for attribute_name, attribute_value in attributes.iteritems():
setattr(self, attribute_name, attribute_value)
# TODO: Move this class to a higher level (to the interface)
# so the these functions can be shared by other plugins.
class OleCfSummaryInfo(object):
"""An OLECF Summary Info object."""
_CLASS_IDENTIFIER = 'f29f85e0-4ff9-1068-ab91-08002b27b3d9'
_PROPERTY_NAMES_INT32 = {
0x000e: 'number_of_pages', # PIDSI_PAGECOUNT
0x000f: 'number_of_words', # PIDSI_WORDCOUNT
0x0010: 'number_of_characters', # PIDSI_CHARCOUNT
0x0013: 'security', # PIDSI_SECURITY
}
_PROPERTY_NAMES_STRING = {
0x0002: 'title', # PIDSI_TITLE
0x0003: 'subject', # PIDSI_SUBJECT
0x0004: 'author', # PIDSI_AUTHOR
0x0005: 'keywords', # PIDSI_KEYWORDS
0x0006: 'comments', # PIDSI_COMMENTS
0x0007: 'template', # PIDSI_TEMPLATE
0x0008: 'last_saved_by', # PIDSI_LASTAUTHOR
0x0009: 'revision_number', # PIDSI_REVNUMBER
0x0012: 'application', # PIDSI_APPNAME
}
PIDSI_CODEPAGE = 0x0001
PIDSI_EDITTIME = 0x000a
PIDSI_LASTPRINTED = 0x000b
PIDSI_CREATE_DTM = 0x000c
PIDSI_LASTSAVE_DTM = 0x000d
PIDSI_THUMBNAIL = 0x0011
def __init__(self, olecf_item, root_creation_time, root_modification_time):
"""Initialize the OLECF summary object.
Args:
olecf_item: The OLECF item (instance of pyolecf.property_set_stream).
root_creation_time: The creation time of the root OLECF item.
root_modification_time: The modification time of the root OLECF item.
"""
super(OleCfSummaryInfo, self).__init__()
self._root_creation_time = root_creation_time
self._root_modification_time = root_modification_time
self._events = []
self.attributes = {}
self._InitFromPropertySet(olecf_item.set)
def _InitFromPropertySet(self, property_set):
"""Initializes the object from a property set.
Args:
property_set: The OLECF property set (pyolecf.property_set).
"""
# Combine the values of multiple property sections
# but do not override properties that are already set.
for property_section in property_set.sections:
if property_section.class_identifier != self._CLASS_IDENTIFIER:
continue
for property_value in property_section.properties:
self._InitFromPropertyValue(property_value)
def _InitFromPropertyValue(self, property_value):
"""Initializes the object from a property value.
Args:
property_value: The OLECF property value (pyolecf.property_value).
"""
if property_value.type == interface.OleDefinitions.VT_I2:
self._InitFromPropertyValueTypeInt16(property_value)
elif property_value.type == interface.OleDefinitions.VT_I4:
self._InitFromPropertyValueTypeInt32(property_value)
elif (property_value.type == interface.OleDefinitions.VT_LPSTR or
property_value.type == interface.OleDefinitions.VT_LPWSTR):
self._InitFromPropertyValueTypeString(property_value)
elif property_value.type == interface.OleDefinitions.VT_FILETIME:
self._InitFromPropertyValueTypeFiletime(property_value)
def _InitFromPropertyValueTypeInt16(self, property_value):
"""Initializes the object from a 16-bit int type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_I2).
"""
if property_value.identifier == self.PIDSI_CODEPAGE:
# TODO: can the codepage vary per property section?
# And is it needed to interpret the ASCII strings?
# codepage = property_value.data_as_integer
pass
def _InitFromPropertyValueTypeInt32(self, property_value):
"""Initializes the object from a 32-bit int type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_I4).
"""
property_name = self._PROPERTY_NAMES_INT32.get(
property_value.identifier, None)
if property_name and not property_name in self.attributes:
self.attributes[property_name] = property_value.data_as_integer
def _InitFromPropertyValueTypeString(self, property_value):
"""Initializes the object from a string type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_LPSTR or VT_LPWSTR).
"""
property_name = self._PROPERTY_NAMES_STRING.get(
property_value.identifier, None)
if property_name and not property_name in self.attributes:
self.attributes[property_name] = property_value.data_as_string
def _InitFromPropertyValueTypeFiletime(self, property_value):
"""Initializes the object from a filetime type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_FILETIME).
"""
if property_value.identifier == self.PIDSI_LASTPRINTED:
self._events.append(
(property_value.data_as_integer, 'Document Last Printed Time'))
elif property_value.identifier == self.PIDSI_CREATE_DTM:
self._events.append(
(property_value.data_as_integer, 'Document Creation Time'))
elif property_value.identifier == self.PIDSI_LASTSAVE_DTM:
self._events.append(
(property_value.data_as_integer, 'Document Last Save Time'))
elif property_value.identifier == self.PIDSI_EDITTIME:
# property_name = 'total_edit_time'
# TODO: handle duration.
pass
def GetEventObjects(self):
"""Yields extracted event objects."""
for timestamp, timestamp_description in self._events:
yield OleCfSummaryInfoEvent(
timestamp, timestamp_description, self.attributes)
if self._root_creation_time:
yield OleCfSummaryInfoEvent(
self._root_creation_time, eventdata.EventTimestamp.CREATION_TIME,
self.attributes)
if self._root_modification_time:
yield OleCfSummaryInfoEvent(
self._root_modification_time,
eventdata.EventTimestamp.MODIFICATION_TIME, self.attributes)
class OleCfDocumentSummaryInfoEvent(event.FiletimeEvent):
"""Convenience class for an OLECF Document Summary info event."""
DATA_TYPE = 'olecf:document_summary_info'
_CLASS_IDENTIFIER = 'd5cdd502-2e9c-101b-9397-08002b2cf9ae'
_PROPERTY_NAMES_BOOL = {
0x0013: 'shared_document', # PIDDSI_SHAREDDOC
}
_PROPERTY_NAMES_INT32 = {
0x0004: 'number_of_bytes', # PIDDSI_BYTECOUNT
0x0005: 'number_of_lines', # PIDDSI_LINECOUNT
0x0006: 'number_of_paragraphs', # PIDDSI_PARCOUNT
0x0007: 'number_of_slides', # PIDDSI_SLIDECOUNT
0x0008: 'number_of_notes', # PIDDSI_NOTECOUNT
0x0009: 'number_of_hidden_slides', # PIDDSI_HIDDENCOUNT
0x000a: 'number_of_clips', # PIDDSI_MMCLIPCOUNT
0x0011: 'number_of_characters_with_white_space', # PIDDSI_CCHWITHSPACES
0x0017: 'application_version', # PIDDSI_VERSION
}
_PROPERTY_NAMES_STRING = {
0x000e: 'manager', # PIDDSI_MANAGER
0x000f: 'company', # PIDDSI_COMPANY
0x001a: 'content_type', # PIDDSI_CONTENTTYPE
0x001b: 'content_status', # PIDDSI_CONTENTSTATUS
0x001c: 'language', # PIDDSI_LANGUAGE
0x001d: 'document_version', # PIDDSI_DOCVERSION
}
PIDDSI_CODEPAGE = 0x0001
PIDDSI_CATEGORY = 0x0002
PIDDSI_PRESFORMAT = 0x0003
PIDDSI_SCALE = 0x000b
PIDDSI_HEADINGPAIR = 0x000c
PIDDSI_DOCPARTS = 0x000d
PIDDSI_LINKSDIRTY = 0x0010
PIDDSI_VERSION = 0x0017
def __init__(self, timestamp, usage, olecf_item):
"""Initializes the event.
Args:
timestamp: The FILETIME timestamp value.
usage: The usage string, describing the timestamp value.
olecf_item: The OLECF item (pyolecf.property_set_stream).
"""
super(OleCfDocumentSummaryInfoEvent, self).__init__(
timestamp, usage)
self.name = u'Document Summary Information'
self._InitFromPropertySet(olecf_item.set)
def _InitFromPropertySet(self, property_set):
"""Initializes the event from a property set.
Args:
property_set: The OLECF property set (pyolecf.property_set).
"""
# Combine the values of multiple property sections
# but do not override properties that are already set.
for property_section in property_set.sections:
if property_section.class_identifier != self._CLASS_IDENTIFIER:
continue
for property_value in property_section.properties:
self._InitFromPropertyValue(property_value)
def _InitFromPropertyValue(self, property_value):
"""Initializes the event from a property value.
Args:
property_value: The OLECF property value (pyolecf.property_value).
"""
if property_value.type == interface.OleDefinitions.VT_I2:
self._InitFromPropertyValueTypeInt16(property_value)
elif property_value.type == interface.OleDefinitions.VT_I4:
self._InitFromPropertyValueTypeInt32(property_value)
elif property_value.type == interface.OleDefinitions.VT_BOOL:
self._InitFromPropertyValueTypeBool(property_value)
elif (property_value.type == interface.OleDefinitions.VT_LPSTR or
property_value.type == interface.OleDefinitions.VT_LPWSTR):
self._InitFromPropertyValueTypeString(property_value)
def _InitFromPropertyValueTypeInt16(self, property_value):
"""Initializes the event from a 16-bit int type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_I2).
"""
if property_value.identifier == self.PIDDSI_CODEPAGE:
# TODO: can the codepage vary per property section?
# And is it needed to interpret the ASCII strings?
# codepage = property_value.data_as_integer
pass
def _InitFromPropertyValueTypeInt32(self, property_value):
"""Initializes the event from a 32-bit int type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_I4).
"""
property_name = self._PROPERTY_NAMES_INT32.get(
property_value.identifier, None)
# The application version consists of 2 16-bit values that make up
# the version number. Where the upper 16-bit is the major number
# and the lower 16-bit the minor number.
if property_value.identifier == self.PIDDSI_VERSION:
application_version = property_value.data_as_integer
setattr(self, property_name, u'{0:d}.{1:d}'.format(
application_version >> 16, application_version & 0xffff))
elif property_name and not hasattr(self, property_name):
setattr(self, property_name, property_value.data_as_integer)
def _InitFromPropertyValueTypeBool(self, property_value):
"""Initializes the event from a boolean type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_BOOL).
"""
property_name = self._PROPERTY_NAMES_BOOL.get(
property_value.identifier, None)
if property_name and not hasattr(self, property_name):
setattr(self, property_name, property_value.data_as_boolean)
def _InitFromPropertyValueTypeString(self, property_value):
"""Initializes the event from a string type property value.
Args:
property_value: The OLECF property value (pyolecf.property_value
of type VT_LPSTR or VT_LPWSTR).
"""
property_name = self._PROPERTY_NAMES_STRING.get(
property_value.identifier, None)
if property_name and not hasattr(self, property_name):
setattr(self, property_name, property_value.data_as_string)
class DocumentSummaryPlugin(interface.OlecfPlugin):
"""Plugin that parses DocumentSummary information from an OLECF file."""
NAME = 'olecf_document_summary'
REQUIRED_ITEMS = frozenset(['\005DocumentSummaryInformation'])
def GetEntries(self, root_item, items, **unused_kwargs):
"""Generate event based on the document summary item.
Args:
root_item: The root item of the OLECF file.
item_names: A list of all items discovered in the root.
Yields:
Event objects (instance of OleCfDocumentSummaryInfoEvent).
"""
creation_time, modification_time = self.GetTimestamps(root_item)
for item in items:
if creation_time:
yield OleCfDocumentSummaryInfoEvent(
creation_time, eventdata.EventTimestamp.CREATION_TIME, item)
if modification_time:
yield OleCfDocumentSummaryInfoEvent(
modification_time, eventdata.EventTimestamp.MODIFICATION_TIME,
item)
class SummaryInfoPlugin(interface.OlecfPlugin):
"""Plugin that parses the SummaryInformation item from an OLECF file."""
NAME = 'olecf_summary'
REQUIRED_ITEMS = frozenset(['\005SummaryInformation'])
def GetEntries(self, root_item, items, **unused_kwargs):
"""Generate event based on the summary information item.
Args:
root_item: The root item of the OLECF file.
item_names: A list of all items discovered in the root.
Yields:
Event objects (instance of OleCfSummaryInfoEvent).
"""
root_creation_time, root_modification_time = self.GetTimestamps(root_item)
for item in items:
summary_information_object = OleCfSummaryInfo(
item, root_creation_time, root_modification_time)
for event_object in summary_information_object.GetEventObjects():
yield event_object
| iwm911/plaso | plaso/parsers/olecf_plugins/summary.py | Python | apache-2.0 | 14,885 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TwitterRecentEntriesItem'
db.create_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterRecentEntriesItem'])
# Adding model 'TwitterSearchItem'
db.create_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200)),
('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)),
('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)),
('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterSearchItem'])
def backwards(self, orm):
# Deleting model 'TwitterRecentEntriesItem'
db.delete_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem')
# Deleting model 'TwitterSearchItem'
db.delete_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'fluentcms_twitterfeed.twitterrecententriesitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterRecentEntriesItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'})
},
u'fluentcms_twitterfeed.twittersearchitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterSearchItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twittersearchitem'", '_ormbases': ['fluent_contents.ContentItem']},
'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}),
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'})
}
}
complete_apps = ['fluentcms_twitterfeed'] | bashu/fluentcms-twitterfeed | fluentcms_twitterfeed/south_migrations/0001_initial.py | Python | apache-2.0 | 7,116 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
import StringIO
import os
import socket
from dragon.engine.clients import Clients
from dragon.openstack.common import log as logging
from dragon.openstack.common import exception
from dragon.workload_policy.actions import action
from dragon.workload_policy.actions import action_execution as ae
from oslo.config import cfg
import dragon.openstack.common.uuidutils as uuidutils
from dragon.template.heat_template import InstanceResource
LOG = logging.getLogger(__name__)
instance_image_opts = [
cfg.IntOpt('backup_image_object_size',
default=52428800,
help='The size in bytes of instance image objects')
]
CONF = cfg.CONF
CONF.register_opts(instance_image_opts)
class InstanceImageAction(action.Action):
is_global = True # store in global container
def __init__(self, context):
self.clients = Clients(context)
self._image_id = None
self._name = None
self._resource_id = None
self.data_block_size_bytes = CONF.backup_image_object_size
# super(action.Action, self).__init__(workload_action_excution_id)
def pre_protect(self, cntx, workload_action_excution_id,
resource_id):
pass
def post_protect(self, cntx, workload_action_excution_id,
resource_id):
pass
def protect(self, cntx, workload_action_excution_id,
resource_id, container_name):
LOG.debug("protecting instance (image copied) %s" % (resource_id))
instance = self.clients.nova().servers.get(resource_id)
self._image_id = instance.image['id']
self._name = instance.name
self._resource_id = resource_id
instance_copy_execution =\
ae.ActionExecution(workload_action_excution_id,
resource_id, self.id)
result = self._imagecopy(cntx, instance, container_name,
instance_copy_execution)
return result
def generate_template(self, context, template_gen):
instance = InstanceResource(self._image_id, self._name, resource_id=self._resource_id)
template_gen.add_instance(instance)
def failover(self, context, resource_id, resource_data, container_name):
return self._import_from_swift(context, resource_id,
resource_data, container_name)
def _import_from_swift(self, context, resource_id,
resource_data, container_name):
LOG.debug("resource %s data %s container %s" %
(resource_id, resource_data, container_name))
swift_client = self.clients.swift()
data_chunks = resource_data["chunks"]
image_id = resource_data["image_id"]
image_response_data = StringIO.StringIO()
for chunk in range(data_chunks):
swift_meta, image_response =\
swift_client.get_object(container_name,
image_id + "_" + str(chunk))
image_response_data.write(image_response)
try:
image = {}
image['name'] = resource_data["meta"]["name"]
image['size'] = resource_data["meta"]["size"]
image['disk_format'] = resource_data["meta"]["disk_format"]
image['container_format'] =\
resource_data["meta"]["container_format"]
image['id'] = uuidutils.generate_uuid()
image_response_data.seek(0, os.SEEK_SET)
self.clients.glance().images.create(data=image_response_data,
**image)
self._image_id = image['id']
self._name = resource_data["instance_name"]
return True
# except ImageAlreadyPresentException:
except Exception, e:
LOG.error(e)
return False
def _imagecopy(self, context, instance, container_name, action_excution):
backup_rec = {}
action_excution.set_status(context, 'uploaded to swift')
swift_conn = Clients(context).swift()
headers = {'X-Container-Meta-dr_state': 'processing'}
image = self.clients.glance().images.get(self._image_id)
# take the checksum as unique id
global_container_image_id = image._info['checksum']
image_response = image.data()
image_response_data = StringIO.StringIO()
for chunk in image_response:
image_response_data.write(chunk)
image_response_data.seek(0, os.SEEK_SET)
chunks = 0
while True:
data = image_response_data.read(self.data_block_size_bytes)
data_offset = image_response_data.tell()
LOG.debug("uploading image offset %s chunks %s"
% (data_offset, chunks))
if data == '':
break
try:
swift_conn.put_object(container_name,
global_container_image_id + "_" +
str(chunks),
data,
content_length=len(data))
chunks += 1
except socket.error as err:
dr_state = 'DR image backup failed'
action_excution.set_status(context, dr_state)
raise exception.SwiftConnectionFailed(reason=str(err))
dr_state = 'Protected'
backup_rec["metadata"] = instance.metadata
backup_rec["image_id"] = global_container_image_id
backup_rec["instance_name"] = self._name
backup_rec["meta"] = image.to_dict()
backup_rec["chunks"] = chunks
action_excution.set_status(context, dr_state)
return dr_state, backup_rec
| os-cloud-storage/openstack-workload-disaster-recovery | dragon/workload_policy/actions/plugins/instance_image_action.py | Python | apache-2.0 | 6,714 |
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class InternalFlowException(Exception):
pass
class ReturnException(InternalFlowException):
def __init__(self, value):
self._value = value
@property
def value(self):
return self._value
class BreakException(InternalFlowException):
pass
class ContinueException(InternalFlowException):
pass
class DslInvalidOperationError(Exception):
pass
class NoMethodFound(Exception):
def __init__(self, name):
super(NoMethodFound, self).__init__('Method "%s" is not found' % name)
class NoClassFound(Exception):
def __init__(self, name):
super(NoClassFound, self).__init__('Class "%s" is not found' % name)
class NoPackageFound(Exception):
def __init__(self, name):
super(NoPackageFound, self).__init__(
'Package "%s" is not found' % name)
class NoPackageForClassFound(Exception):
def __init__(self, name):
super(NoPackageForClassFound, self).__init__('Package for class "%s" '
'is not found' % name)
class NoObjectFoundError(Exception):
def __init__(self, object_id):
super(NoObjectFoundError, self).__init__(
'Object "%s" is not found in object store' % object_id)
class AmbiguousMethodName(Exception):
def __init__(self, name):
super(AmbiguousMethodName, self).__init__(
'Found more that one method "%s"' % name)
class DslContractSyntaxError(Exception):
pass
class ContractViolationException(Exception):
pass
class ValueIsMissingError(Exception):
pass
class DslSyntaxError(Exception):
pass
class PropertyAccessError(Exception):
pass
class AmbiguousPropertyNameError(PropertyAccessError):
def __init__(self, name):
super(AmbiguousPropertyNameError, self).__init__(
'Found more that one property "%s"' % name)
class NoWriteAccess(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccess, self).__init__(
'Property "%s" is immutable to the caller' % name)
class NoWriteAccessError(PropertyAccessError):
def __init__(self, name):
super(NoWriteAccessError, self).__init__(
'Property "%s" is immutable to the caller' % name)
class PropertyReadError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be read' %
(name, murano_class.name))
class PropertyWriteError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Property "%s" in class "%s" cannot be written' %
(name, murano_class.name))
class UninitializedPropertyAccessError(PropertyAccessError):
def __init__(self, name, murano_class):
super(PropertyAccessError, self).__init__(
'Access to uninitialized property '
'"%s" in class "%s" is forbidden' % (name, murano_class.name))
| sajuptpm/murano | murano/dsl/exceptions.py | Python | apache-2.0 | 3,632 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.io.graph_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tempfile
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
_VALID_FILE_PATTERN = "VALID"
_FILE_NAMES = [b"abc", b"def", b"ghi", b"jkl"]
_INVALID_FILE_PATTERN = "INVALID"
class GraphIOTest(tf.test.TestCase):
def _mock_glob(self, pattern):
if _VALID_FILE_PATTERN == pattern:
return _FILE_NAMES
self.assertEqual(_INVALID_FILE_PATTERN, pattern)
return []
def setUp(self):
super(GraphIOTest, self).setUp()
random.seed(42)
self._orig_glob = gfile.Glob
gfile.Glob = self._mock_glob
def tearDown(self):
gfile.Glob = self._orig_glob
super(GraphIOTest, self).tearDown()
def test_dequeue_batch_value_errors(self):
default_batch_size = 17
queue_capacity = 1234
num_threads = 3
name = "my_batch"
self.assertRaisesRegexp(
ValueError, "No files match",
tf.contrib.learn.io.read_batch_examples,
_INVALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, None, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, -1, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid queue_capacity",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=None,
num_threads=num_threads, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_threads",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=None, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_threads",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=-1, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, queue_capacity + 1, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=1, name=name)
self.assertRaisesRegexp(
ValueError, "Invalid num_epochs",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=-1, queue_capacity=queue_capacity, num_threads=1,
name=name)
self.assertRaisesRegexp(
ValueError, "Invalid read_batch_size",
tf.contrib.learn.io.read_batch_examples,
_VALID_FILE_PATTERN, default_batch_size, tf.TFRecordReader,
False, num_epochs=None, queue_capacity=queue_capacity,
num_threads=1, read_batch_size=0, name=name)
def test_batch_record_features(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
features = {"feature": tf.FixedLenFeature(shape=[0], dtype=tf.float32)}
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
features = tf.contrib.learn.io.read_batch_record_features(
_VALID_FILE_PATTERN, batch_size, features, randomize_input=False,
queue_capacity=queue_capacity, reader_num_threads=2,
parser_num_threads=2, name=name)
self.assertEqual("%s/fifo_queue_1_Dequeue:0" % name,
features["feature"].name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/fifo_queue" % name
parse_example_queue_name = "%s/fifo_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "FIFOQueue",
parse_example_queue_name: "FIFOQueue",
name: "QueueDequeueMany"
}, g)
self.assertAllEqual(_FILE_NAMES, sess.run(["%s:0" % file_names_name])[0])
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def test_one_epoch(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = tf.contrib.learn.io.read_batch_examples(
_VALID_FILE_PATTERN, batch_size,
reader=tf.TFRecordReader, randomize_input=True,
num_epochs=1,
queue_capacity=queue_capacity, name=name)
self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_name_queue_limit_name = (
"%s/limit_epochs/epochs" % file_name_queue_name)
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "RandomShuffleQueue",
name: "QueueDequeueUpTo",
file_name_queue_limit_name: "Variable"
}, g)
self.assertEqual(
set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def test_batch_randomized(self):
batch_size = 17
queue_capacity = 1234
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as sess:
inputs = tf.contrib.learn.io.read_batch_examples(
_VALID_FILE_PATTERN, batch_size,
reader=tf.TFRecordReader, randomize_input=True,
queue_capacity=queue_capacity, name=name)
self.assertEqual("%s:1" % name, inputs.name)
file_name_queue_name = "%s/file_name_queue" % name
file_names_name = "%s/input" % file_name_queue_name
example_queue_name = "%s/random_shuffle_queue" % name
op_nodes = test_util.assert_ops_in_graph({
file_names_name: "Const",
file_name_queue_name: "FIFOQueue",
"%s/read/TFRecordReader" % name: "TFRecordReader",
example_queue_name: "RandomShuffleQueue",
name: "QueueDequeueMany"
}, g)
self.assertEqual(
set(_FILE_NAMES), set(sess.run(["%s:0" % file_names_name])[0]))
self.assertEqual(
queue_capacity, op_nodes[example_queue_name].attr["capacity"].i)
def _create_temp_file(self, lines):
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "temp_file")
gfile.Open(filename, "w").write(lines)
return filename
def _create_sorted_temp_files(self, lines_list):
tempdir = tempfile.mkdtemp()
filenames = []
for i, lines in enumerate(lines_list):
filename = os.path.join(tempdir, "temp_file%05d" % i)
gfile.Open(filename, "w").write(lines)
filenames.append(filename)
return filenames
def test_read_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
filename, batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_read_text_lines_multifile(self):
gfile.Glob = self._orig_glob
filenames = self._create_sorted_temp_files(["ABC\n", "DEF\nGHK\n"])
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
filenames, batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"ABC"])
self.assertAllEqual(session.run(inputs), [b"DEF"])
self.assertAllEqual(session.run(inputs), [b"GHK"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_batch_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("A\nB\nC\nD\nE\n")
batch_size = 3
queue_capacity = 10
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
inputs = tf.contrib.learn.io.read_batch_examples(
[filename], batch_size, reader=tf.TextLineReader,
randomize_input=False, num_epochs=1, queue_capacity=queue_capacity,
read_batch_size=10, name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run(inputs), [b"A", b"B", b"C"])
self.assertAllEqual(session.run(inputs), [b"D", b"E"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_keyed_read_text_lines(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file("ABC\nDEF\nGHK\n")
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
keys, inputs = tf.contrib.learn.io.read_keyed_batch_examples(
filename, batch_size,
reader=tf.TextLineReader, randomize_input=False,
num_epochs=1, queue_capacity=queue_capacity, name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
self.assertAllEqual(session.run([keys, inputs]),
[[filename.encode("utf-8") + b":1"], [b"ABC"]])
self.assertAllEqual(session.run([keys, inputs]),
[[filename.encode("utf-8") + b":2"], [b"DEF"]])
self.assertAllEqual(session.run([keys, inputs]),
[[filename.encode("utf-8") + b":3"], [b"GHK"]])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
def test_keyed_parse_json(self):
gfile.Glob = self._orig_glob
filename = self._create_temp_file(
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}\n'
'{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}\n'
'{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}\n'
)
batch_size = 1
queue_capacity = 5
name = "my_batch"
with tf.Graph().as_default() as g, self.test_session(graph=g) as session:
dtypes = {"age": tf.FixedLenFeature([1], tf.int64)}
parse_fn = lambda example: tf.parse_single_example( # pylint: disable=g-long-lambda
tf.decode_json_example(example), dtypes)
keys, inputs = tf.contrib.learn.io.read_keyed_batch_examples(
filename, batch_size,
reader=tf.TextLineReader, randomize_input=False,
num_epochs=1, queue_capacity=queue_capacity,
parse_fn=parse_fn, name=name)
session.run(tf.initialize_local_variables())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[0]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":1"])
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[1]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":2"])
key, age = session.run([keys, inputs["age"]])
self.assertAllEqual(age, [[2]])
self.assertAllEqual(key, [filename.encode("utf-8") + b":3"])
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
if __name__ == "__main__":
tf.test.main()
| rew4332/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/graph_io_test.py | Python | apache-2.0 | 14,145 |
import asyncio
from aio_pika import connect, IncomingMessage, ExchangeType
loop = asyncio.get_event_loop()
async def on_message(message: IncomingMessage):
async with message.process():
print("[x] %r" % message.body)
async def main():
# Perform connection
connection = await connect(
"amqp://guest:guest@localhost/", loop=loop
)
# Creating a channel
channel = await connection.channel()
await channel.set_qos(prefetch_count=1)
logs_exchange = await channel.declare_exchange(
"logs", ExchangeType.FANOUT
)
# Declaring queue
queue = await channel.declare_queue(exclusive=True)
# Binding the queue to the exchange
await queue.bind(logs_exchange)
# Start listening the queue with name 'task_queue'
await queue.consume(on_message)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.create_task(main())
# we enter a never-ending loop that waits for data
# and runs callbacks whenever necessary.
print(" [*] Waiting for logs. To exit press CTRL+C")
loop.run_forever()
| mosquito/aio-pika | docs/source/rabbitmq-tutorial/examples/3-publish-subscribe/receive_logs.py | Python | apache-2.0 | 1,094 |
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from .models import SupportProject
# Create your views here.
def index( request ):
sp = SupportProject.objects.all()
if sp.count() == 1:
return HttpResponseRedirect( sp.first().project.get_absolute_url() )
else:
context_dict = { 'sps' : sp, }
return render( request, 'support/index.html', context_dict )
| postpdm/ich_bau | support/views.py | Python | apache-2.0 | 440 |
# Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
| fs714/drfilter | tools/novasetup.py | Python | apache-2.0 | 995 |
#!/usr/bin/env python
from absl import app
from absl.testing import absltest
from grr_response_server.databases import db_time_test
from grr_response_server.databases import mysql_test
from grr.test_lib import test_lib
class MysqlClientsTest(db_time_test.DatabaseTimeTestMixin,
mysql_test.MysqlTestBase, absltest.TestCase):
pass
if __name__ == "__main__":
app.run(test_lib.main)
| google/grr | grr/server/grr_response_server/databases/mysql_time_test.py | Python | apache-2.0 | 411 |
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import logging
import ujson as json
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from elasticsearch.exceptions import ConnectionTimeout
from .config import config
from .es_mappings import ES_MAPPINGS, ES_SIMILARITIES
class ElasticsearchBulkIndexer(object):
""" Bulk indexer for Elasticsearch """
servers = {
"docs": [config["ELASTICSEARCHDOCS"]],
"text": [config["ELASTICSEARCHTEXT"]]
}
def __init__(self, index_name, batch_size=500):
self.index_name = index_name
self.buffer = []
self.batch_size = batch_size
self.total_size = 0
self.connected = False
self.client = None
def connect(self):
""" Establish the ES connection if not already done """
if self.connected:
return
self.connected = True
self.client = Elasticsearch(self.servers[self.index_name], timeout=60)
def index(self, _id, hit):
""" Queue one document for indexing. """
if not self.connected:
self.connect()
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
self.buffer.append('{"index":{"_id":"%s"}}\n%s\n' % (
_id,
json.dumps(hit) # pylint: disable=no-member
))
if len(self.buffer) >= self.batch_size:
self.flush()
def empty(self):
""" Empty the ES index. Dangerous operation! """
if config["ENV"] not in ("local", "ci"):
raise Exception("empty() not allowed in env %s" % config["ENV"])
if self.indices().exists(index=self.index_name):
self.indices().delete(index=self.index_name)
def refresh(self):
""" Sends a "refresh" to the ES index, forcing the actual indexing of what was sent up until now """
if not self.connected:
return
if config["ENV"] not in ("local", "ci"):
raise Exception("refresh() not allowed in env %s" % config["ENV"])
self.indices().refresh(index=self.index_name)
def flush(self, retries=10):
""" Sends the current indexing batch to ES """
if len(self.buffer) == 0:
return
if not self.connected:
self.connect()
self.total_size += len(self.buffer)
logging.debug(
"ES: Flushing %s docs to index=%s (total: %s)",
len(self.buffer), self.index_name, self.total_size
)
try:
self.bulk_index()
except ConnectionTimeout, e:
if retries == 0:
raise e
time.sleep(60)
return self.flush(retries=retries - 1)
self.buffer = []
def bulk_index(self):
""" Indexes the current buffer to Elasticsearch, bypassing the bulk() helper for performance """
connection = self.client.transport.get_connection()
bulk_url = "/%s/page/_bulk" % self.index_name
body = "".join(self.buffer)
# TODO retries
# status, headers, data
status, _, _ = connection.perform_request("POST", bulk_url, body=body)
if status != 200:
raise Exception("Elasticsearch returned status=%s" % status)
# TODO: look for errors there?
# parsed = json.loads(data)
def indices(self):
""" Returns an elasticsearch.client.IndicesClient instance """
if not self.connected:
self.connect()
return IndicesClient(self.client)
def create(self, empty=False):
""" Creates the ES index """
if empty:
self.empty()
mappings = ES_MAPPINGS[self.index_name]
self.indices().create(index=self.index_name, body={
"settings": {
# TODO: this configuration should be set somewhere else! (cosr-ops?)
"number_of_shards": 5,
"number_of_replicas": 0,
# In prod we don't refresh manually so this is the only setting
# that will make ES periodically refresh to avoid storing only in temporary files
# as we index
"refresh_interval": "60s",
"similarity": ES_SIMILARITIES
},
"mappings": mappings
})
| commonsearch/cosr-back | cosrlib/es.py | Python | apache-2.0 | 4,391 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib.parse
from openstack import exceptions
from openstack import resource
class Resource(resource.Resource):
@classmethod
def find(cls, session, name_or_id, ignore_missing=True, **params):
"""Find a resource by its name or id.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param name_or_id: This resource's identifier, if needed by
the request. The default is ``None``.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the resource does not exist.
When set to ``True``, None will be returned when
attempting to find a nonexistent resource.
:param dict params: Any additional parameters to be passed into
underlying methods, such as to
:meth:`~openstack.resource.Resource.existing`
in order to pass on URI parameters.
:return: The :class:`Resource` object matching the given name or id
or None if nothing matches.
:raises: :class:`openstack.exceptions.DuplicateResource` if more
than one resource is found for this request.
:raises: :class:`openstack.exceptions.ResourceNotFound` if nothing
is found and ignore_missing is ``False``.
"""
session = cls._get_session(session)
# Try to short-circuit by looking directly for a matching ID.
try:
match = cls.existing(
id=name_or_id,
connection=session._get_connection(),
**params)
return match.fetch(session)
except exceptions.SDKException:
# DNS may return 400 when we try to do GET with name
pass
if ('name' in cls._query_mapping._mapping.keys()
and 'name' not in params):
params['name'] = name_or_id
data = cls.list(session, **params)
result = cls._get_one_match(name_or_id, data)
if result is not None:
return result
if ignore_missing:
return None
raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id))
@classmethod
def _get_next_link(cls, uri, response, data, marker, limit, total_yielded):
next_link = None
params = {}
if isinstance(data, dict):
links = data.get('links')
if links:
next_link = links.get('next')
total = data.get('metadata', {}).get('total_count')
if total:
# We have a kill switch
total_count = int(total)
if total_count <= total_yielded:
return None, params
# Parse params from Link (next page URL) into params.
# This prevents duplication of query parameters that with large
# number of pages result in HTTP 414 error eventually.
if next_link:
parts = urllib.parse.urlparse(next_link)
query_params = urllib.parse.parse_qs(parts.query)
params.update(query_params)
next_link = urllib.parse.urljoin(next_link, parts.path)
# If we still have no link, and limit was given and is non-zero,
# and the number of records yielded equals the limit, then the user
# is playing pagination ball so we should go ahead and try once more.
if not next_link and limit:
next_link = uri
params['marker'] = marker
params['limit'] = limit
return next_link, params
| stackforge/python-openstacksdk | openstack/dns/v2/_base.py | Python | apache-2.0 | 4,338 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains the tests for the generic text parser."""
import os
import unittest
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
import pyparsing
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import lexer
from plaso.lib import parser
from plaso.lib import text_parser
class TestTextEvent(event.TextEvent):
"""Test text event."""
DATA_TYPE = 'test:parser:text'
class TestTextEventFormatter(eventdata.EventFormatter):
"""Test text event formatter."""
DATA_TYPE = 'test:parser:text'
FORMAT_STRING = u'{body}'
SOURCE_LONG = 'Test Text Parser'
class TestTextParser(text_parser.SlowLexicalTextParser):
"""Implement a text parser object that can successfully parse a text file.
To be able to achieve that one function has to be implemented, the ParseDate
one.
"""
NAME = 'test_text'
tokens = [
lexer.Token('INITIAL',
r'^([\d\/]+) ', 'SetDate', 'TIME'),
lexer.Token('TIME', r'([0-9:\.]+) ', 'SetTime', 'STRING_HOST'),
lexer.Token('STRING_HOST', r'([^\-]+)- ', 'ParseStringHost', 'STRING'),
lexer.Token('STRING', '([^\n]+)', 'ParseString', ''),
lexer.Token('STRING', '\n', 'ParseMessage', 'INITIAL')]
def ParseStringHost(self, match, **_):
user, host = match.group(1).split(':')
self.attributes['hostname'] = host
self.attributes['username'] = user
def SetDate(self, match, **_):
month, day, year = match.group(1).split('/')
self.attributes['imonth'] = int(month)
self.attributes['iyear'] = int(year)
self.attributes['iday'] = int(day)
def Scan(self, unused_file_entry):
pass
def CreateEvent(self, timestamp, offset, attributes):
event_object = TestTextEvent(timestamp, attributes)
event_object.offset = offset
return event_object
class BaseParserTest(unittest.TestCase):
"""An unit test for the plaso parser library."""
def testParserNotImplemented(self):
"""Test the base class Parse function."""
self.assertRaises(TypeError, parser.BaseParser)
class TextParserTest(unittest.TestCase):
"""An unit test for the plaso parser library."""
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file relative to the test data directory.
Args:
path_segments: the path segments inside the test data directory.
Returns:
A path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
def _GetTestFileEntry(self, path):
"""Retrieves the test file entry.
Args:
path: the path of the test file.
Returns:
The test file entry (instance of dfvfs.FileEntry).
"""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=path)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def setUp(self):
pre_obj = event.PreprocessObject()
self._parser = TestTextParser(pre_obj, None)
def testTextParserFail(self):
"""Test a text parser that will not match against content."""
test_file = self._GetTestFilePath(['text_parser', 'test1.txt'])
file_entry = self._GetTestFileEntry(test_file)
text_generator = self._parser.Parse(file_entry)
self.assertRaises(errors.UnableToParseFile, list, text_generator)
def testTextParserSuccess(self):
"""Test a text parser that will match against content."""
test_file = self._GetTestFilePath(['text_parser', 'test2.txt'])
file_entry = self._GetTestFileEntry(test_file)
text_generator = self._parser.Parse(file_entry)
first_entry = text_generator.next()
second_entry = text_generator.next()
msg1, _ = eventdata.EventFormatterManager.GetMessageStrings(first_entry)
self.assertEquals(first_entry.timestamp, 1293859395000000)
self.assertEquals(msg1, 'first line.')
self.assertEquals(first_entry.hostname, 'myhost')
self.assertEquals(first_entry.username, 'myuser')
msg2, _ = eventdata.EventFormatterManager.GetMessageStrings(second_entry)
self.assertEquals(second_entry.timestamp, 693604686000000)
self.assertEquals(msg2, 'second line.')
self.assertEquals(second_entry.hostname, 'myhost')
self.assertEquals(second_entry.username, 'myuser')
class PyParserTest(unittest.TestCase):
"""Few unit tests for the pyparsing unit."""
def testPyConstantIPv4(self):
"""Run few tests to make sure the constants are working."""
self.assertTrue(self._CheckIPv4('123.51.234.52'))
self.assertTrue(self._CheckIPv4('255.254.23.1'))
self.assertTrue(self._CheckIPv4('1.1.34.2'))
self.assertFalse(self._CheckIPv4('1.1.34.258'))
self.assertFalse(self._CheckIPv4('a.1.34.258'))
self.assertFalse(self._CheckIPv4('.34.258'))
self.assertFalse(self._CheckIPv4('34.258'))
self.assertFalse(self._CheckIPv4('10.52.34.258'))
def testPyConstantOctet(self):
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_OCTET.parseString('526')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_OCTET.parseString('1026')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_OCTET.parseString(
'a9', parseAll=True)
def testPyConstantOthers(self):
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('MMo')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('M')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('March', parseAll=True)
self.assertTrue(text_parser.PyparsingConstants.MONTH.parseString('Jan'))
line = '# This is a comment.'
parsed_line = text_parser.PyparsingConstants.COMMENT_LINE_HASH.parseString(
line)
self.assertEquals(parsed_line[-1], 'This is a comment.')
self.assertEquals(len(parsed_line), 2)
def _CheckIPv4(self, ip_address):
# TODO: Add a similar IPv6 check.
try:
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(ip_address)
return True
except pyparsing.ParseException:
return False
if __name__ == '__main__':
unittest.main()
| iwm911/plaso | plaso/lib/text_parser_test.py | Python | apache-2.0 | 7,313 |
# Copyright 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from manilaclient import api_versions
from manilaclient import base
from manilaclient.openstack.common.apiclient import base as common_base
class ShareInstance(common_base.Resource):
"""A share is an extra block level storage to the OpenStack instances."""
def __repr__(self):
return "<Share: %s>" % self.id
def force_delete(self):
"""Delete the specified share ignoring its current state."""
self.manager.force_delete(self)
def reset_state(self, state):
"""Update the share with the provided state."""
self.manager.reset_state(self, state)
class ShareInstanceManager(base.ManagerWithFind):
"""Manage :class:`ShareInstances` resources."""
resource_class = ShareInstance
@api_versions.wraps("2.3")
def get(self, instance):
"""Get a share instance.
:param instance: either share object or text with its ID.
:rtype: :class:`ShareInstance`
"""
share_id = common_base.getid(instance)
return self._get("/share_instances/%s" % share_id, "share_instance")
@api_versions.wraps("2.3")
def list(self):
"""List all share instances."""
return self._list('/share_instances', 'share_instances')
def _action(self, action, instance, info=None, **kwargs):
"""Perform a share instnace 'action'.
:param action: text with action name.
:param instance: either share object or text with its ID.
:param info: dict with data for specified 'action'.
:param kwargs: dict with data to be provided for action hooks.
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/share_instances/%s/action' % common_base.getid(instance)
return self.api.client.post(url, body=body)
def _do_force_delete(self, instance, action_name="force_delete"):
"""Delete a share instance forcibly - share status will be avoided.
:param instance: either share instance object or text with its ID.
"""
return self._action(action_name, common_base.getid(instance))
@api_versions.wraps("2.3", "2.6")
def force_delete(self, instance):
return self._do_force_delete(instance, "os-force_delete")
@api_versions.wraps("2.7") # noqa
def force_delete(self, instance):
return self._do_force_delete(instance, "force_delete")
def _do_reset_state(self, instance, state, action_name):
"""Update the provided share instance with the provided state.
:param instance: either share object or text with its ID.
:param state: text with new state to set for share.
"""
return self._action(action_name, instance, {"status": state})
@api_versions.wraps("2.3", "2.6")
def reset_state(self, instance, state):
return self._do_reset_state(instance, state, "os-reset_status")
@api_versions.wraps("2.7") # noqa
def reset_state(self, instance, state):
return self._do_reset_state(instance, state, "reset_status")
| sniperganso/python-manilaclient | manilaclient/v2/share_instances.py | Python | apache-2.0 | 3,686 |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with a batch of updates / deletes."""
from gcloud._localstack import _LocalStack
from gcloud.datastore import _implicit_environ
from gcloud.datastore import helpers
from gcloud.datastore.key import _dataset_ids_equal
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
_BATCHES = _LocalStack()
class Batch(object):
"""An abstraction representing a collected group of updates / deletes.
Used to build up a bulk mutuation.
For example, the following snippet of code will put the two ``save``
operations and the delete operatiuon into the same mutation, and send
them to the server in a single API request::
>>> from gcloud.datastore.batch import Batch
>>> batch = Batch()
>>> batch.put(entity1)
>>> batch.put(entity2)
>>> batch.delete(key3)
>>> batch.commit()
You can also use a batch as a context manager, in which case the
``commit`` will be called automatically if its block exits without
raising an exception::
>>> with Batch() as batch:
... batch.put(entity1)
... batch.put(entity2)
... batch.delete(key3)
By default, no updates will be sent if the block exits with an error::
>>> from gcloud import datastore
>>> dataset = datastore.get_dataset('dataset-id')
>>> with Batch() as batch:
... do_some_work(batch)
... raise Exception() # rolls back
"""
def __init__(self, dataset_id=None, connection=None):
""" Construct a batch.
:type dataset_id: :class:`str`.
:param dataset_id: The ID of the dataset.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: The connection used to connect to datastore.
:raises: :class:`ValueError` if either a connection or dataset ID
are not set.
"""
self._connection = connection or _implicit_environ.CONNECTION
self._dataset_id = dataset_id or _implicit_environ.DATASET_ID
if self._connection is None or self._dataset_id is None:
raise ValueError('A batch must have a connection and '
'a dataset ID set.')
self._mutation = datastore_pb.Mutation()
self._auto_id_entities = []
@staticmethod
def current():
"""Return the topmost batch / transaction, or None."""
return _BATCHES.top
@property
def dataset_id(self):
"""Getter for dataset ID in which the batch will run.
:rtype: :class:`str`
:returns: The dataset ID in which the batch will run.
"""
return self._dataset_id
@property
def connection(self):
"""Getter for connection over which the batch will run.
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: The connection over which the batch will run.
"""
return self._connection
@property
def mutation(self):
"""Getter for the current mutation.
Every batch is committed with a single Mutation
representing the 'work' to be done as part of the batch.
Inside a batch, calling ``batch.put()`` with an entity, or
``batch.delete`` with a key, builds up the mutation.
This getter returns the Mutation protobuf that
has been built-up so far.
:rtype: :class:`gcloud.datastore._datastore_v1_pb2.Mutation`
:returns: The Mutation protobuf to be sent in the commit request.
"""
return self._mutation
def add_auto_id_entity(self, entity):
"""Adds an entity to the list of entities to update with IDs.
When an entity has a partial key, calling ``save()`` adds an
insert_auto_id entry in the mutation. In order to make sure we
update the Entity once the transaction is committed, we need to
keep track of which entities to update (and the order is
important).
When you call ``save()`` on an entity inside a transaction, if
the entity has a partial key, it adds itself to the list of
entities to be updated once the transaction is committed by
calling this method.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: The entity to be updated with a completed key.
:raises: ValueError if the entity's key is alread completed.
"""
if not entity.key.is_partial:
raise ValueError("Entity has a completed key")
self._auto_id_entities.append(entity)
def put(self, entity):
"""Remember an entity's state to be saved during ``commit``.
.. note::
Any existing properties for the entity will be replaced by those
currently set on this instance. Already-stored properties which do
not correspond to keys set on this instance will be removed from
the datastore.
.. note::
Property values which are "text" ('unicode' in Python2, 'str' in
Python3) map to 'string_value' in the datastore; values which are
"bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity: the entity to be saved.
:raises: ValueError if entity has no key assigned, or if the key's
``dataset_id`` does not match ours.
"""
if entity.key is None:
raise ValueError("Entity must have a key")
if not _dataset_ids_equal(self._dataset_id, entity.key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
_assign_entity_to_mutation(
self.mutation, entity, self._auto_id_entities)
def delete(self, key):
"""Remember a key to be deleted durring ``commit``.
:type key: :class:`gcloud.datastore.key.Key`
:param key: the key to be deleted.
:raises: ValueError if key is not complete, or if the key's
``dataset_id`` does not match ours.
"""
if key.is_partial:
raise ValueError("Key must be complete")
if not _dataset_ids_equal(self._dataset_id, key.dataset_id):
raise ValueError("Key must be from same dataset as batch")
key_pb = key.to_protobuf()
helpers._add_keys_to_request(self.mutation.delete, [key_pb])
def begin(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def commit(self):
"""Commits the batch.
This is called automatically upon exiting a with statement,
however it can be called explicitly if you don't want to use a
context manager.
"""
response = self.connection.commit(self._dataset_id, self.mutation)
# If the back-end returns without error, we are guaranteed that
# the response's 'insert_auto_id_key' will match (length and order)
# the request's 'insert_auto_id` entities, which are derived from
# our '_auto_id_entities' (no partial success).
for new_key_pb, entity in zip(response.insert_auto_id_key,
self._auto_id_entities):
new_id = new_key_pb.path_element[-1].id
entity.key = entity.key.completed_key(new_id)
def rollback(self):
"""No-op
Overridden by :class:`gcloud.datastore.transaction.Transaction`.
"""
pass
def __enter__(self):
_BATCHES.push(self)
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
self.commit()
else:
self.rollback()
finally:
_BATCHES.pop()
def _assign_entity_to_mutation(mutation_pb, entity, auto_id_entities):
"""Copy ``entity`` into appropriate slot of ``mutation_pb``.
If ``entity.key`` is incomplete, append ``entity`` to ``auto_id_entities``
for later fixup during ``commit``.
Helper method for ``Batch.put``.
:type mutation_pb: :class:`gcloud.datastore._datastore_v1_pb2.Mutation`
:param mutation_pb; the Mutation protobuf for the batch / transaction.
:type entity: :class:`gcloud.datastore.entity.Entity`
:param entity; the entity being updated within the batch / transaction.
:type auto_id_entities: list of :class:`gcloud.datastore.entity.Entity`
:param auto_id_entities: entiites with partial keys, to be fixed up
during commit.
"""
auto_id = entity.key.is_partial
key_pb = entity.key.to_protobuf()
key_pb = helpers._prepare_key_for_request(key_pb)
if auto_id:
insert = mutation_pb.insert_auto_id.add()
auto_id_entities.append(entity)
else:
# We use ``upsert`` for entities with completed keys, rather than
# ``insert`` or ``update``, in order not to create race conditions
# based on prior existence / removal of the entity.
insert = mutation_pb.upsert.add()
insert.key.CopyFrom(key_pb)
for name, value in entity.items():
value_is_list = isinstance(value, list)
if value_is_list and len(value) == 0:
continue
prop = insert.property.add()
# Set the name of the property.
prop.name = name
# Set the appropriate value.
helpers._set_protobuf_value(prop.value, value)
if name in entity.exclude_from_indexes:
if not value_is_list:
prop.value.indexed = False
for sub_value in prop.value.list_value:
sub_value.indexed = False
| lucemia/gcloud-python | gcloud/datastore/batch.py | Python | apache-2.0 | 10,313 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import zhimai44
class TestZhimai44Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
| sinotradition/meridian | meridian/tst/acupoints/test_zhimai44.py | Python | apache-2.0 | 297 |
# Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import os
import fixtures
from jacket.compute import exception
from jacket.compute import test
from jacket.tests.compute.unit.virt.disk.vfs import fakeguestfs
from jacket.compute.virt.disk import api as diskapi
from jacket.compute.virt.disk.vfs import guestfs as vfsguestfs
from jacket.compute.virt.image import model as imgmodel
class VirtDiskTest(test.NoDBTestCase):
def setUp(self):
super(VirtDiskTest, self).setUp()
self.useFixture(
fixtures.MonkeyPatch('compute.virt.disk.vfs.guestfs.guestfs',
fakeguestfs))
self.file = imgmodel.LocalFileImage("/some/file",
imgmodel.FORMAT_QCOW2)
def test_inject_data(self):
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_QCOW2)))
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
mandatory=('files',)))
self.assertTrue(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
key="mysshkey",
mandatory=('key',)))
os_name = os.name
os.name = 'nt' # Cause password injection to fail
self.assertRaises(exception.NovaException,
diskapi.inject_data,
imgmodel.LocalFileImage("/some/file",
imgmodel.FORMAT_RAW),
admin_password="p",
mandatory=('admin_password',))
self.assertFalse(diskapi.inject_data(
imgmodel.LocalFileImage("/some/file", imgmodel.FORMAT_RAW),
admin_password="p"))
os.name = os_name
self.assertFalse(diskapi.inject_data(
imgmodel.LocalFileImage("/some/fail/file", imgmodel.FORMAT_RAW),
key="mysshkey"))
def test_inject_data_key(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "Hello World#!/bin/sh\n# Added by " +
"Nova to ensure injected ssh keys " +
"have the right context\nrestorecon " +
"-RF root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux_append_with_newline(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "#!/bin/sh\necho done\n# Added "
"by Nova to ensure injected ssh keys have "
"the right context\nrestorecon -RF "
"root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
vfs.teardown()
def test_inject_net(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
diskapi._inject_net_into_fs("mynetconfig", vfs)
self.assertIn("/etc/network/interfaces", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
{'content': 'mynetconfig',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
vfs.teardown()
def test_inject_metadata(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
metadata = {"foo": "bar", "eek": "wizz"}
metadata = OrderedDict(sorted(metadata.items()))
diskapi._inject_metadata_into_fs(metadata, vfs)
self.assertIn("/meta.js", vfs.handle.files)
self.assertEqual({'content': '{"eek": "wizz", ' +
'"foo": "bar"}',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100},
vfs.handle.files["/meta.js"])
vfs.teardown()
def test_inject_admin_password(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
def fake_salt():
return "1234567890abcdef"
self.stubs.Set(diskapi, '_generate_salt', fake_salt)
vfs.handle.write("/etc/shadow",
"root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n")
vfs.handle.write("/etc/passwd",
"root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
diskapi._inject_admin_password_into_fs("123456", vfs)
self.assertEqual(vfs.handle.files["/etc/passwd"],
{'content': "root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:" +
"/sbin/nologin\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
shadow = vfs.handle.files["/etc/shadow"]
# if the encrypted password is only 13 characters long, then
# compute.virt.disk.api:_set_password fell back to DES.
if len(shadow['content']) == 91:
self.assertEqual(shadow,
{'content': "root:12tir.zIbWQ3c" +
":14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
else:
self.assertEqual(shadow,
{'content': "root:$1$12345678$a4ge4d5iJ5vw" +
"vbFS88TEN0:14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
vfs.teardown()
def test_inject_files_into_fs(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
diskapi._inject_files_into_fs([("/path/to/not/exists/file",
"inject-file-contents")],
vfs)
self.assertIn("/path/to/not/exists", vfs.handle.files)
shadow_dir = vfs.handle.files["/path/to/not/exists"]
self.assertEqual(shadow_dir,
{"isdir": True,
"gid": 0,
"uid": 0,
"mode": 0o744})
shadow_file = vfs.handle.files["/path/to/not/exists/file"]
self.assertEqual(shadow_file,
{"isdir": False,
"content": "inject-file-contents",
"gid": 100,
"uid": 100,
"mode": 0o700})
vfs.teardown()
def test_inject_files_into_fs_dir_exists(self):
vfs = vfsguestfs.VFSGuestFS(self.file)
vfs.setup()
called = {'make_path': False}
def fake_has_file(*args, **kwargs):
return True
def fake_make_path(*args, **kwargs):
called['make_path'] = True
self.stubs.Set(vfs, 'has_file', fake_has_file)
self.stubs.Set(vfs, 'make_path', fake_make_path)
# test for already exists dir
diskapi._inject_files_into_fs([("/path/to/exists/file",
"inject-file-contents")],
vfs)
self.assertIn("/path/to/exists/file", vfs.handle.files)
self.assertFalse(called['make_path'])
# test for root dir
diskapi._inject_files_into_fs([("/inject-file",
"inject-file-contents")],
vfs)
self.assertIn("/inject-file", vfs.handle.files)
self.assertFalse(called['make_path'])
# test for null dir
vfs.handle.files.pop("/inject-file")
diskapi._inject_files_into_fs([("inject-file",
"inject-file-contents")],
vfs)
self.assertIn("/inject-file", vfs.handle.files)
self.assertFalse(called['make_path'])
vfs.teardown()
| HybridF5/jacket | jacket/tests/compute/unit/virt/disk/test_inject.py | Python | apache-2.0 | 11,880 |
from ajenti.api import *
from ajenti.plugins import *
info = PluginInfo(
title='Hosts',
icon='sitemap',
dependencies=[
PluginDependency('main'),
],
)
def init():
import main
| lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/plugins/hosts/__init__.py | Python | apache-2.0 | 206 |
import json
import numpy as np
import cPickle as pickle
with open('../validation/v_xgboost_word_tfidf.csv') as train_file:
content = train_file.readlines()
testData = []
scores = []
element = content[1].strip("\r\n").split(",")
for i in range(1, len(content)):
element = content[i].strip("\r\n").split(",")
testData.append([element[0],element[1]])
scores.append(float(element[2]))
predictions = []
maxscore = max(scores)
minscore = min(scores)
for score in scores:
predictions.append((score-minscore)/float(maxscore-minscore))
ypred = predictions
with open('../validation/v_xgboost_word_tfidf_0-1.csv', 'w') as f1:
f1.write('qid,uid,label\n')
for i in range(0, len(ypred)):
f1.write(testData[i][0]+','+testData[i][1]+','+str(ypred[i])+'\n') | jashwanth9/Expert-recommendation-system | code/labels_range.py | Python | apache-2.0 | 806 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""List and compare most used OpenStack cloud resources."""
import argparse
import json
import subprocess
import sys
from rally.common.plugin import discover
from rally import consts
from rally import osclients
class ResourceManager(object):
REQUIRED_SERVICE = None
REPR_KEYS = ("id", "name", "tenant_id", "zone", "zoneName", "pool")
def __init__(self, clients):
self.clients = clients
def is_available(self):
if self.REQUIRED_SERVICE:
return self.REQUIRED_SERVICE in self.clients.services().values()
return True
@property
def client(self):
return getattr(self.clients, self.__class__.__name__.lower())()
def get_resources(self):
all_resources = []
cls = self.__class__.__name__.lower()
for prop in dir(self):
if not prop.startswith("list_"):
continue
f = getattr(self, prop)
resources = f() or []
resource_name = prop[5:][:-1]
for res in resources:
res_repr = []
for key in self.REPR_KEYS + (resource_name,):
if isinstance(res, dict):
value = res.get(key)
else:
value = getattr(res, key, None)
if value:
res_repr.append("%s:%s" % (key, value))
if not res_repr:
raise ValueError("Failed to represent resource %r" % res)
all_resources.append(
"%s %s %s" % (cls, resource_name, " ".join(res_repr)))
return all_resources
class Keystone(ResourceManager):
def list_users(self):
return self.client.users.list()
def list_tenants(self):
return self.client.tenants.list()
def list_roles(self):
return self.client.roles.list()
class Nova(ResourceManager):
def list_flavors(self):
return self.client.flavors.list()
def list_floating_ip_pools(self):
return self.client.floating_ip_pools.list()
def list_floating_ips(self):
return self.client.floating_ips.list()
def list_images(self):
return self.client.images.list()
def list_keypairs(self):
return self.client.keypairs.list()
def list_networks(self):
return self.client.networks.list()
def list_security_groups(self):
return self.client.security_groups.list(
search_opts={"all_tenants": True})
def list_servers(self):
return self.client.servers.list(
search_opts={"all_tenants": True})
def list_services(self):
return self.client.services.list()
def list_availability_zones(self):
return self.client.availability_zones.list()
class Neutron(ResourceManager):
REQUIRED_SERVICE = consts.Service.NEUTRON
def has_extension(self, name):
extensions = self.client.list_extensions().get("extensions", [])
return any(ext.get("alias") == name for ext in extensions)
def list_networks(self):
return self.client.list_networks()["networks"]
def list_subnets(self):
return self.client.list_subnets()["subnets"]
def list_routers(self):
return self.client.list_routers()["routers"]
def list_ports(self):
return self.client.list_ports()["ports"]
def list_floatingips(self):
return self.client.list_floatingips()["floatingips"]
def list_security_groups(self):
return self.client.list_security_groups()["security_groups"]
def list_health_monitors(self):
if self.has_extension("lbaas"):
return self.client.list_health_monitors()["health_monitors"]
def list_pools(self):
if self.has_extension("lbaas"):
return self.client.list_pools()["pools"]
def list_vips(self):
if self.has_extension("lbaas"):
return self.client.list_vips()["vips"]
class Glance(ResourceManager):
def list_images(self):
return self.client.images.list()
class Heat(ResourceManager):
REQUIRED_SERVICE = consts.Service.HEAT
def list_resource_types(self):
return self.client.resource_types.list()
def list_stacks(self):
return self.client.stacks.list()
class Cinder(ResourceManager):
def list_availability_zones(self):
return self.client.availability_zones.list()
def list_backups(self):
return self.client.backups.list()
def list_volume_snapshots(self):
return self.client.volume_snapshots.list()
def list_volume_types(self):
return self.client.volume_types.list()
def list_volumes(self):
return self.client.volumes.list(
search_opts={"all_tenants": True})
class CloudResources(object):
"""List and compare cloud resources.
resources = CloudResources(auth_url=..., ...)
saved_list = resources.list()
# Do something with the cloud ...
changes = resources.compare(saved_list)
has_changed = any(changes)
removed, added = changes
"""
def __init__(self, **kwargs):
endpoint = osclients.objects.Endpoint(**kwargs)
self.clients = osclients.Clients(endpoint)
def _deduplicate(self, lst):
"""Change list duplicates to make all items unique.
>>> resources._deduplicate(["a", "b", "c", "b", "b"])
>>> ['a', 'b', 'c', 'b (duplicate 1)', 'b (duplicate 2)'
"""
deduplicated_list = []
for value in lst:
if value in deduplicated_list:
ctr = 0
try_value = value
while try_value in deduplicated_list:
ctr += 1
try_value = "%s (duplicate %i)" % (value, ctr)
value = try_value
deduplicated_list.append(value)
return deduplicated_list
def list(self):
managers_classes = discover.itersubclasses(ResourceManager)
resources = []
for cls in managers_classes:
manager = cls(self.clients)
if manager.is_available():
resources.extend(manager.get_resources())
return sorted(self._deduplicate(resources))
def compare(self, with_list):
saved_resources = set(with_list)
current_resources = set(self.list())
removed = saved_resources - current_resources
added = current_resources - saved_resources
return sorted(list(removed)), sorted(list(added))
def main():
parser = argparse.ArgumentParser(
description=("Save list of OpenStack cloud resources or compare "
"with previously saved list."))
parser.add_argument("--credentials",
type=argparse.FileType("r"),
metavar="<path/to/credentials.json>",
help="cloud credentials in JSON format")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--dump-list",
type=argparse.FileType("w"),
metavar="<path/to/output/list.json>",
help="dump resources to given file in JSON format")
group.add_argument("--compare-with-list",
type=argparse.FileType("r"),
metavar="<path/to/existent/list.json>",
help=("compare current resources with a list from "
"given JSON file"))
args = parser.parse_args()
if args.credentials:
config = json.load(args.credentials)
else:
config = json.loads(subprocess.check_output(["rally", "deployment",
"config"]))
config.update(config.pop("admin"))
del config["type"]
resources = CloudResources(**config)
if args.dump_list:
resources_list = resources.list()
json.dump(resources_list, args.dump_list, indent=2)
elif args.compare_with_list:
given_list = json.load(args.compare_with_list)
changes = resources.compare(with_list=given_list)
removed, added = changes
sys.stdout.write(
json.dumps({"removed": removed, "added": added}, indent=2))
if any(changes):
return 0 # `1' will fail gate job
return 0
if __name__ == "__main__":
sys.exit(main())
| group-policy/rally | tests/ci/osresources.py | Python | apache-2.0 | 8,981 |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gc
import enum
import functools
import os
import os.path
import abc
import sys
import textwrap
import re
import inspect
import copy
import contextlib
import itertools
import types
import warnings
from operator import attrgetter
from datetime import datetime
from collections import OrderedDict, ChainMap
from collections.abc import Mapping
from inspect import signature
import IPython.display
from devlib.collector.dmesg import KernelLogEntry
from devlib import TargetStableError
from lisa.analysis.tasks import TasksAnalysis
from lisa.analysis.rta import RTAEventsAnalysis
from lisa.trace import requires_events, TraceEventCheckerBase, AndTraceEventChecker
from lisa.trace import Trace, TaskID
from lisa.wlgen.rta import RTA, PeriodicWload, RTAPhase, leaf_precedence
from lisa.target import Target
from lisa.utils import (
Serializable, memoized, lru_memoized, ArtifactPath, non_recursive_property,
update_wrapper_doc, ExekallTaggable, annotations_from_signature,
get_sphinx_name, optional_kwargs, group_by_value, kwargs_dispatcher,
dispatch_kwargs, Loggable, kwargs_forwarded_to, docstring_update,
is_running_ipython,
)
from lisa.datautils import df_filter_task_ids
from lisa.trace import FtraceCollector, FtraceConf, DmesgCollector, ComposedCollector
from lisa.conf import (
SimpleMultiSrcConf, KeyDesc, TopLevelKeyDesc,
)
from lisa._generic import TypedList
from lisa.pelt import pelt_settling_time
def _nested_formatter(multiline):
def sort_mapping(data):
if isinstance(data, Mapping):
# Ensure stable ordering of keys if possible
try:
data = OrderedDict(sorted(data.items()))
except TypeError:
data = data
return data
if multiline:
def format_data(data, level=0):
idt = '\n' + ' ' * 4 * level
def indent(s):
stripped = s.strip()
if '\n' in stripped:
return idt + stripped.replace('\n', idt)
else:
return stripped
if isinstance(data, TestMetric):
out = data.pretty_format(multiline=multiline)
out = indent(out) if '\n' in out else out
elif isinstance(data, Mapping):
data = sort_mapping(data)
body = '\n'.join(
f'{key}: {format_data(data, level + 1)}'
for key, data in data.items()
)
out = indent(body)
else:
out = str(data)
return out
else:
def format_data(data):
# Handle recursive mappings, like metrics of AggregatedResultBundle
if isinstance(data, Mapping):
data = sort_mapping(data)
return '{' + ', '.join(
f'{key}={format_data(data)}'
for key, data in data.items()
) + '}'
else:
return str(data)
return format_data
class TestMetric:
"""
A storage class for metrics used by tests
:param data: The data to store. Can be any base type or dict(TestMetric)
:param units: The data units
:type units: str
"""
def __init__(self, data, units=None):
self.data = data
self.units = units
def __str__(self):
return self.pretty_format(multiline=False)
def pretty_format(self, multiline=True):
"""
Pretty print the metrics.
:param multiline: If ``True``, use a multiline format.
:type multiline: bool
"""
format_data = _nested_formatter(multiline=multiline)
result = format_data(self.data)
if self.units:
result += ' ' + self.units
return result
def __repr__(self):
return f'{type(self).__name__}({self.data}, {self.units})'
@enum.unique
class Result(enum.Enum):
"""
A classification of a test result
"""
PASSED = 1
"""
The test has passed
"""
FAILED = 2
"""
The test has failed
"""
UNDECIDED = 3
"""
The test data could not be used to decide between :attr:`PASSED` or :attr:`FAILED`
"""
SKIPPED = 4
"""
The test does not make sense on this platform and should therefore be skipped.
.. note:: :attr:`UNDECIDED` should be used when the data are inconclusive
but the test still makes sense on the target.
"""
@property
def lower_name(self):
"""Return the name in lower case"""
return self.name.lower()
class ResultBundleBase(Exception):
"""
Base class for all result bundles.
.. note:: ``__init__`` is not provided as some classes uses properties to
provide some of the attributes.
"""
def __bool__(self):
"""
``True`` if the ``result`` is :attr:`Result.PASSED`, ``False``
otherwise.
"""
return self.result is Result.PASSED
def __str__(self):
return self.pretty_format(multiline=False)
def pretty_format(self, multiline=True):
format_data = _nested_formatter(multiline=multiline)
metrics_str = format_data(self.metrics)
if '\n' in metrics_str:
idt = '\n' + ' ' * 4
metrics_str = metrics_str.replace('\n', idt)
else:
metrics_str = ': ' + metrics_str
return self.result.name + metrics_str
def _repr_pretty_(self, p, cycle):
"Pretty print instances in Jupyter notebooks"
p.text(self.pretty_format())
def add_metric(self, name, data, units=None):
"""
Lets you append several test :class:`TestMetric` to the bundle.
:Parameters: :class:`TestMetric` parameters
"""
self.metrics[name] = TestMetric(data, units)
def display_and_exit(self) -> type(None):
print(f"Test result: {self}")
if self:
sys.exit(0)
else:
sys.exit(1)
class ResultBundle(ResultBundleBase):
"""
Bundle for storing test results
:param result: Indicates whether the associated test passed.
It will also be used as the truth-value of a ResultBundle.
:type result: :class:`Result`
:param utc_datetime: UTC time at which the result was collected, or
``None`` to record the current datetime.
:type utc_datetime: datetime.datetime
:param context: Contextual information to attach to the bundle.
Keep the content small, as size of :class:`ResultBundle` instances
matters a lot for storing long test sessions results.
:type context: dict(str, object)
:class:`TestMetric` can be added to an instance of this class. This can
make it easier for users of your tests to understand why a certain test
passed or failed. For instance::
def test_is_noon():
now = time.localtime().tm_hour
res = ResultBundle(Result.PASSED if now == 12 else Result.FAILED)
res.add_metric("current time", now)
return res
>>> res_bundle = test_is_noon()
>>> print(res_bundle.result.name)
FAILED
# At this point, the user can wonder why the test failed.
# Metrics are here to help, and are printed along with the result:
>>> print(res_bundle)
FAILED: current time=11
"""
def __init__(self, result, utc_datetime=None, context=None):
self.result = result
self.metrics = {}
self.utc_datetime = utc_datetime or datetime.utcnow()
self.context = context if context is not None else {}
@classmethod
def from_bool(cls, cond, *args, **kwargs):
"""
Alternate constructor where ``ResultBundle.result`` is determined from a bool
"""
result = Result.PASSED if cond else Result.FAILED
return cls(result, *args, **kwargs)
@classmethod
def raise_skip(cls, msg, from_=None, **kwargs):
"""
Raise an :class:`ResultBundle` with the :attr:`Result.SKIPPED` result,
thereby short-circuiting the rest of the test.
:param msg: Reason why the test is skipped
:type msg: str
:param from_: Other exception that lead to the test being skipped. It
will be used as the ``Y`` in ``raise X from Y``.
:type from_: Exception or None
This is typically used as a way to bail out while indicating to the user
that the test has essentially been skipped because the target does not
support what the test is testing.
"""
res = cls(Result.SKIPPED, **kwargs)
res.add_metric('skipped-reason', msg)
raise res from from_
class AggregatedResultBundle(ResultBundleBase):
"""
Aggregates many :class:`ResultBundle` into one.
:param result_bundles: List of :class:`ResultBundle` to aggregate.
:type result_bundles: list(ResultBundle)
:param name_metric: Metric to use as the "name" of each result bundle.
The value of that metric will be used as top-level key in the
aggregated metrics. If not provided, the index in the
``result_bundles`` list will be used.
:type name_metric: str
:param result: Optionally, force the ``self.result`` attribute to that
value. This is useful when the way of combining the result bundles is
not the default one, without having to make a whole new subclass.
:type result: Result
:param context: Contextual information to attach to the bundle.
Keep the content small, as size of :class:`ResultBundle` instances
matters a lot for storing long test sessions results.
:type context: dict(str, object)
This is useful for some tests that are naturally decomposed in subtests.
.. note:: Metrics of aggregated bundles will always be shown, but can be
augmented with new metrics using the usual API.
"""
def __init__(self, result_bundles, name_metric=None, result=None, context=None):
self.result_bundles = result_bundles
self.name_metric = name_metric
self.extra_metrics = {}
self.extra_context = context if context is not None else {}
self._forced_result = result
@property
def utc_datetime(self):
"""
Use the earliest ``utc_datetime`` among the aggregated bundles.
"""
return min(
result_bundle.utc_datetime
for result_bundle in self.result_bundles
)
@property
def context(self):
"""
Merge the context of all the aggregated bundles, with priority given to
last in the list.
"""
# All writes will be done in that first layer
bases = [self.extra_context]
bases.extend(
result_bundle.context
for result_bundle in self.result_bundles
)
return ChainMap(*bases)
@property
def result(self):
forced_result = self._forced_result
if forced_result is not None:
return forced_result
def predicate(combinator, result):
return combinator(
res_bundle.result is result
for res_bundle in self.result_bundles
)
if predicate(all, Result.UNDECIDED):
return Result.UNDECIDED
elif predicate(any, Result.FAILED):
return Result.FAILED
elif predicate(any, Result.PASSED):
return Result.PASSED
else:
return Result.UNDECIDED
@result.setter
def _(self, result):
self._forced_result = result
@property
def metrics(self):
def get_name(res_bundle, i):
if self.name_metric:
return res_bundle.metrics[self.name_metric]
else:
return str(i)
names = {
res_bundle: get_name(res_bundle, i)
for i, res_bundle in enumerate(self.result_bundles)
}
def get_metrics(res_bundle):
metrics = copy.copy(res_bundle.metrics)
# Since we already show it at the top-level, we can remove it from
# the nested level to remove some clutter
metrics.pop(self.name_metric, None)
return metrics
base = {
names[res_bundle]: get_metrics(res_bundle)
for res_bundle in self.result_bundles
}
if 'failed' not in base:
base['failed'] = TestMetric([
names[res_bundle]
for res_bundle in self.result_bundles
if res_bundle.result is Result.FAILED
])
top = self.extra_metrics
return ChainMap(top, base)
class TestBundleMeta(abc.ABCMeta):
"""
Metaclass of :class:`TestBundleBase`.
Method with a return annotation of :class:`ResultBundleBase` are wrapped to:
* Update the ``context`` attribute of a returned
:class:`ResultBundleBase`
* Add an ``undecided_filter`` attribute, with
:meth:`add_undecided_filter` decorator, so that any test method can
be used as a pre-filter for another one right away.
* Wrap ``_from_target`` to provide a single ``collector`` parameter,
built from the composition of the collectors provided by
``_make_collector`` methods in the base class tree.
If ``_from_target`` is defined in the class but ``from_target`` is not, a
stub is created and the annotation of ``_from_target`` is copied to the
stub. The annotation is then removed from ``_from_target`` so that it is
not picked up by exekall.
The signature of ``from_target`` is the result of merging the original
``cls.from_target`` parameters with the ones defined in ``_from_target``.
"""
@classmethod
def test_method(metacls, func):
"""
Decorator to intercept returned :class:`ResultBundle` and attach some contextual information.
"""
def update_res(test_bundle, res):
plat_info = test_bundle.plat_info
# Map context keys to PlatformInfo nested keys
keys = {
'board-name': ['name'],
'kernel-version': ['kernel', 'version']
}
context = {}
for context_key, plat_info_key in keys.items():
try:
val = plat_info.get_nested_key(plat_info_key)
except KeyError:
continue
else:
context[context_key] = val
# Only update what is strictly necessary here, so that
# AggregatedResultBundle ends up with a minimal context state.
res_context = res.context
for key, val in context.items():
if key not in res_context:
res_context[key] = val
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
res = func(self, *args, **kwargs)
except ResultBundleBase as res:
update_res(self, res)
raise
else:
if isinstance(res, ResultBundleBase):
update_res(self, res)
return res
wrapper = metacls.add_undecided_filter(wrapper)
return wrapper
@classmethod
def collector_factory(cls, f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
wrapper._COLLECTOR_FACTORY = True
return wrapper
@staticmethod
def add_undecided_filter(func):
"""
Turn any method returning a :class:`ResultBundleBase` into a decorator
that can be used as a test method filter.
The filter decorator is accessible as the ``undecided_filter``
attribute of the decorated method.
Once a test is decorated, the filter method will be run in addition to
the wrapped test, and if the filter does not succeed, the
:class:`ResultBundleBase` result will be set to
:attr:`Result.UNDECIDED`.
:Example:
.. code-block:: python
class Foo(TestBundle):
@TestBundle.add_undecided_filter
def test_foo(self, xxx=42, ...):
...
# Alternatively, ResultBundle return annotation will
# automatically decorate the method with TestBundleMeta
# metaclass.
def test_foo(self, xxx=42, ...) -> ResultBundle:
...
class Bar(Foo):
# Set xxx=55 as default, but this can be overriden when
# test_bar() is called.
@Foo.test_foo.undecided_filter(xxx=77)
def test_bar(self, yyy=43, ...) -> ResultBundle:
...
The resulting decorated method can take the union of keyword
parameters::
bar = Bar()
bar.test_bar(xxx=33, yyy=55)
# Same as
bar.test_bar(33, yyy=55)
# But this fails, since only keyword arguments can be passed to the
# wrapping pre-test
bar.test_bar(33, 55)
If there is a parameter conflict, it is detected at import time and will
result in a :exc:`TypeError`.
.. note:: Even if the pre-test does not succeed, the wrapped test is
still executed, so that the ResultBundle metrics are updated and
the artifacts still produced. This can be important in order to
manually analyse results in case the pre-filter was overly
conservative and marked a usable result as UNDECIDED.
"""
@optional_kwargs
def decorator(wrapped_test, **preset_kwargs):
# Propagate the events used by the filter
try:
used_events = func.used_events
except AttributeError:
used_events = lambda x: x
@used_events
@update_wrapper_doc(
wrapped_test,
added_by=func,
sig_from=func,
description=textwrap.dedent(
"""
The returned ``ResultBundle.result`` will be changed to
:attr:`~lisa.tests.base.Result.UNDECIDED` if {} does not
succeed (i.e. either
:attr:`~lisa.tests.base.Result.UNDECIDED` or
:attr:`~lisa.tests.base.Result.FAILED`).
{}
""").strip().format(
get_sphinx_name(func, style='rst', abbrev=True),
inspect.getdoc(func),
),
)
@kwargs_dispatcher(
{
func.__get__(0): 'filter_kwargs',
},
# Better safe than sorry, there is no guarantee that the tests
# won't step on each other's toes
allow_overlap=False,
)
@functools.wraps(wrapped_test)
def filter_wrapper(self, *args, filter_kwargs=None, **kwargs):
# Merge-in the presets
filter_kwargs = {
**preset_kwargs,
**filter_kwargs,
}
# Run the wrapped test no matter what, so we get the metrics
# and also the artifacts
res = wrapped_test(self, *args, **kwargs)
filter_res = func(self, **filter_kwargs)
res.metrics.update(filter_res.metrics)
if not filter_res:
res.result = Result.UNDECIDED
res.add_metric('undecided-reason', f'{func.__qualname__} failed')
return res
return filter_wrapper
func.undecided_filter = decorator
return func
@classmethod
def __prepare__(metacls, cls_name, bases, **kwargs):
# Decorate each method when it is bound to its name in the class'
# namespace, so that other methods can use e.g. undecided_filter
# If we do that from __new__, the decoration will happen after all
# methods are defined, just before the class object is created.
class NS(dict):
def __setitem__(self, name, f):
if isinstance(f, types.FunctionType):
# Wrap the test methods to add contextual information
sig = signature(f)
annotation = sig.return_annotation
if isinstance(annotation, type) and issubclass(annotation, ResultBundleBase):
f = metacls.test_method(f)
super().__setitem__(name, f)
return NS()
@staticmethod
def _make_collector_cm_factory(cls):
"""
Create the method in charge of creating the collector for the test.
This method is created by aggregating the ``_make_collector`` of all
base classes into one :class:`lisa.trace.ComposedCollector`.
The resulting method is then used to consume the user-level parameters
exposed by each ``_make_collector`` and turn it into a single
``collector`` parameter passed to :meth:`_from_target`.
"""
def find_factories(cls):
def predicate(f):
if isinstance(f, (classmethod, staticmethod)):
_f = f.__func__
else:
_f = f
return (
getattr(_f, '_COLLECTOR_FACTORY', False) or
(
hasattr(_f, '__wrapped__') and
find_factories(_f.__wrapped__)
)
)
factories = inspect.getmembers(cls, predicate)
return list(map(
# Unbind the method and turn it again into an unbound
# classmethod
lambda member: classmethod(member[1].__func__),
factories
))
factories_f = find_factories(cls)
# Bind the classmethods to remove the first parameter from their
# signature
factories = [
f.__get__(None, cls)
for f in factories_f
]
params = {
param: param.name
for f in factories
for param in inspect.signature(f).parameters.values()
if param.kind == param.KEYWORD_ONLY
}
for _name, _params in group_by_value(params, key_sort=attrgetter('name')).items():
if len(_params) > 1:
_params = ', '.join(map(str, _params))
raise TypeError(f'Conflicting parameters for {cls.__qualname__} collectors factory: {_params}')
params = sorted(params.keys(), key=attrgetter('name'))
@classmethod
def factory(cls, **kwargs):
factories = [
f.__get__(None, cls)
for f in factories_f
]
dispatched = dispatch_kwargs(
factories,
kwargs,
call=True,
allow_overlap=True,
)
cms = [
cm
for cm in dispatched.values()
if cm is not None
]
cms = sorted(
cms,
key=attrgetter('_COMPOSITION_ORDER'),
reverse=True,
)
cm = ComposedCollector(cms)
return cm
first_param = list(inspect.signature(factory.__func__).parameters.values())[0]
factory.__func__.__signature__ = inspect.Signature(
parameters=[first_param] + params,
)
factory.__name__ = '_make_collector_cm'
factory.__qualname__ = f'{cls.__qualname__}.{factory.__name__}'
factory.__module__ = cls.__module__
return factory
def __new__(metacls, cls_name, bases, dct, **kwargs):
new_cls = super().__new__(metacls, cls_name, bases, dct, **kwargs)
# Merge the collectors available for that class and pass the
# composed collector to _from_target
new_cls._make_collector_cm = metacls._make_collector_cm_factory(new_cls)
# If that class defines _from_target, stub from_target and move the
# annotations of _from_target to from_target. If from_target was
# already defined on that class, it's wrapped by the stub, otherwise
# super().from_target is used.
if '_from_target' in dct and not getattr(new_cls._from_target, '__isabstractmethod__', False):
assert isinstance(dct['_from_target'], classmethod)
_from_target = new_cls._from_target
# Sanity check on _from_target signature
for name, param in signature(_from_target).parameters.items():
if name != 'target' and param.kind is not inspect.Parameter.KEYWORD_ONLY:
raise TypeError(f'Non keyword parameters "{name}" are not allowed in {_from_target.__qualname__} signature')
# This is necessary since _from_target is then reassigned, and the
# closure refers to it by name
_real_from_target = _from_target
@classmethod
@kwargs_dispatcher(
{
_from_target: 'from_target_kwargs',
new_cls._make_collector_cm: 'collector_kwargs',
},
ignore=['collector'],
)
def wrapper(cls, target, from_target_kwargs, collector_kwargs):
cm = cls._make_collector_cm(**collector_kwargs)
return _real_from_target.__func__(cls, collector=cm, **from_target_kwargs)
# Make sure to get the return annotation from _real_from_target
wrapper.__func__.__signature__ = inspect.signature(wrapper.__func__).replace(
return_annotation=inspect.signature(_real_from_target.__func__).return_annotation
)
wrapper.__func__.__annotations__ = annotations_from_signature(wrapper.__func__.__signature__)
new_cls._from_target = wrapper
_from_target = new_cls._from_target
def get_keyword_only_names(f):
return {
param.name
for param in signature(f).parameters.values()
if param.kind is inspect.Parameter.KEYWORD_ONLY
}
try:
missing_params = (
get_keyword_only_names(super(bases[0], new_cls)._from_target)
- get_keyword_only_names(_from_target)
)
except AttributeError:
pass
else:
if missing_params:
raise TypeError('{}._from_target() must at least implement all the parameters of {}._from_target(). Missing parameters: {}'.format(
new_cls.__qualname__,
bases[0].__qualname__,
', '.join(sorted(missing_params))
))
if 'from_target' in dct:
# Bind the classmethod object to the class
orig_from_target = dct['from_target']
def get_orig_from_target(cls):
return orig_from_target.__get__(cls, cls)
else:
def get_orig_from_target(cls):
return super(new_cls, cls).from_target
# Make a stub that we can freely update
# Merge the signatures to get the base signature of
# super().from_target.
@kwargs_forwarded_to(_from_target.__func__)
@functools.wraps(new_cls.from_target.__func__)
def from_target(cls, *args, **kwargs):
from_target = get_orig_from_target(cls)
return from_target(*args, **kwargs)
# Hide the fact that we wrapped the function, so exekall does not
# get confused
del from_target.__wrapped__
# Fixup the names, so it is not displayed as `_from_target`
from_target.__name__ = 'from_target'
from_target.__qualname__ = new_cls.__qualname__ + '.' + from_target.__name__
# Stich the relevant docstrings
func = new_cls.from_target.__func__
from_target_doc = inspect.cleandoc(func.__doc__ or '')
_from_target_doc = inspect.cleandoc(_from_target.__doc__ or '')
if _from_target_doc:
doc = f'{from_target_doc}\n\n(**above inherited from** :meth:`{func.__module__}.{func.__qualname__}`)\n\n{_from_target_doc}\n'
else:
doc = from_target_doc
from_target.__doc__ = doc
# Make sure the annotation points to an actual class object if it
# was set, as most of the time they will be strings for factories.
# Since the wrapper's __globals__ (read-only) attribute is not
# going to contain the necessary keys to resolve that string, we
# take care of it here.
if inspect.signature(_from_target).return_annotation != inspect.Signature.empty:
# Since we set the signature manually, we also need to update
# the annotations in it
from_target.__signature__ = from_target.__signature__.replace(return_annotation=new_cls)
# Keep the annotations and the signature in sync
from_target.__annotations__ = annotations_from_signature(from_target.__signature__)
# De-annotate the _from_target function so it is not picked up by exekall
del _from_target.__func__.__annotations__
new_cls.from_target = classmethod(from_target)
return new_cls
class TestBundleBase(
Serializable,
ExekallTaggable,
abc.ABC,
docstring_update('.. note:: As a subclass of :class:`lisa.tests.base.TestBundleBase`, this class is considered as "application" and its API is therefore more subject to change than other parts of :mod:`lisa`.'),
metaclass=TestBundleMeta
):
"""
A LISA test bundle.
:param res_dir: Directory in which the target execution artifacts reside.
This will also be used to dump any artifact generated in the test code.
:type res_dir: str
:param plat_info: Various informations about the platform, that is available
to all tests.
:type plat_info: :class:`lisa.platforms.platinfo.PlatformInfo`
The point of a :class:`TestBundleBase` is to bundle in a single object all of the
required data to run some test assertion (hence the name). When inheriting
from this class, you can define test methods that use this data, and return
a :class:`ResultBundle`.
Thanks to :class:`~lisa.utils.Serializable`, instances of this class
can be serialized with minimal effort. As long as some information is stored
within an object's member, it will be automagically handled.
Please refrain from monkey-patching the object in :meth:`from_target`.
Data required by the object to run test assertions should be exposed as
``__init__`` parameters.
.. note:: All subclasses are considered as "application" code, as opposed
to most of the rest of :mod:`lisa` which is treated as a library. This
means that the classes and their API is subject to change when needs
evolve, which is not always backward compatible. It's rarely an issue
since these classes are used "manually" mostly for debugging, which is
a version-specific activity. Likewise, the set of tests will evolve as
existing tests are replaced by more general implementations, that could
be organized and named differently.
**Design notes:**
* :meth:`from_target` will collect whatever artifacts are required
from a given target, and will then return a :class:`TestBundleBase`.
Note that a default implementation is provided out of ``_from_target``.
* :meth:`from_dir` will use whatever artifacts are available in a
given directory (which should have been created by an earlier call
to :meth:`from_target` and then :meth:`to_dir`), and will then return
a :class:`TestBundleBase`.
* :attr:`VERIFY_SERIALIZATION` is there to ensure the instances can
serialized and deserialized without error.
* ``res_dir`` parameter of ``__init__`` must be stored as an attribute
without further processing, in order to support result directory
relocation.
* Test methods should have a return annotation for the
:class:`ResultBundle` to be picked up by the test runners.
**Implementation example**::
from lisa.target import Target
from lisa.platforms.platinfo import PlatformInfo
from lisa.utils import ArtifactPath
class DummyTestBundle(TestBundle):
def __init__(self, res_dir, plat_info, shell_output):
super().__init__(res_dir, plat_info)
self.shell_output = shell_output
@classmethod
def _from_target(cls, target:Target, *, res_dir:ArtifactPath) -> 'DummyTestBundle':
output = target.execute('echo $((21+21))').split()
return cls(res_dir, target.plat_info, output)
def test_output(self) -> ResultBundle:
return ResultBundle.from_bool(
any(
'42' in line
for line in self.shell_output
)
)
**Usage example**::
# Creating a Bundle from a live target
bundle = TestBundle.from_target(target, plat_info=plat_info, res_dir="/my/res/dir")
# Running some test on the bundle
res_bundle = bundle.test_foo()
# Saving the bundle on the disk
bundle.to_dir("/my/res/dir")
# Reloading the bundle from the disk
bundle = TestBundle.from_dir("/my/res/dir")
# The reloaded object can be used just like the original one.
# Keep in mind that serializing/deserializing this way will have a
# similar effect than a deepcopy.
res_bundle = bundle.test_foo()
"""
VERIFY_SERIALIZATION = True
"""
When True, this enforces a serialization/deserialization step in
:meth:`from_target`.
.. note:: The deserialized instance is thrown away in order to avoid using
what is in effect a deepcopy of the original bundle. Using that
deepcopy greatly increases the memory consumption of long running
processes.
"""
def __init__(self, res_dir, plat_info):
# It is important that res_dir is directly stored as an attribute, so
# it can be replaced by a relocated res_dir after the object is
# deserialized on another host.
# See exekall_customization.LISAAdaptor.load_db
self.res_dir = res_dir
self.plat_info = plat_info
def get_tags(self):
try:
return {'board': self.plat_info['name']}
except KeyError:
return {}
@classmethod
@abc.abstractmethod
def _from_target(cls, target, *, res_dir):
"""
:meta public:
Internals of the target factory method.
.. note:: This must be a classmethod, and all parameters except
``target`` must be keyword-only, i.e. appearing after `args*` or a
lonely `*`::
@classmethod
def _from_target(cls, target, *, foo=33, bar):
...
"""
@classmethod
def check_from_target(cls, target):
"""
Check whether the given target can be used to create an instance of this class
:raises: :class:`lisa.tests.base.ResultBundleBase` with ``result`` as
:attr:`lisa.tests.base.Result.SKIPPED` if the check fails
This method should be overriden to check your implementation requirements
"""
@classmethod
def can_create_from_target(cls, target):
"""
:returns: Whether the given target can be used to create an instance of this class
:rtype: bool
:meth:`check_from_target` is used internally, so there shouldn't be any
need to override this.
"""
try:
cls.check_from_target(target)
return True
except ResultBundleBase:
return False
@classmethod
def from_target(cls, target: Target, *, res_dir: ArtifactPath = None, **kwargs):
"""
Factory method to create a bundle using a live target
:param target: Target to connect to.
:type target: lisa.target.Target
:param res_dir: Host result directory holding artifacts.
:type res_dir: str or lisa.utils.ArtifactPath
:param custom_collector: Custom collector that will be used as a
context manager when calling the workload.
:type custom_collector: lisa.trace.CollectorBase
This is mostly boiler-plate code around
:meth:`~lisa.tests.base.TestBundleBase._from_target`, which lets us
introduce common functionalities for daughter classes. Unless you know
what you are doing, you should not override this method, but the
internal :meth:`lisa.tests.base.TestBundleBase._from_target` instead.
"""
cls.check_from_target(target)
res_dir = res_dir or target.get_res_dir(
name=cls.__qualname__,
symlink=True,
)
# Make sure that all the relevant dmesg warnings will fire when running
# things on the target, even if we already hit some warn_once warnings.
with contextlib.suppress(TargetStableError):
target.write_value('/sys/kernel/debug/clear_warn_once', '1', verify=False)
bundle = cls._from_target(target, res_dir=res_dir, **kwargs)
# We've created the bundle from the target, and have all of
# the information we need to execute the test code. However,
# we enforce the use of the offline reloading path to ensure
# it does not get broken.
if cls.VERIFY_SERIALIZATION:
bundle.to_dir(res_dir)
# Updating the res_dir breaks deserialization for some use cases
cls.from_dir(res_dir, update_res_dir=False)
return bundle
@classmethod
@TestBundleMeta.collector_factory
def _make_custom_collector(cls, *, custom_collector=None):
return custom_collector
@classmethod
def _get_filepath(cls, res_dir):
"""
:meta public:
Returns the path of the file containing the serialized object in
``res_dir`` folder.
"""
return ArtifactPath.join(res_dir, f"{cls.__qualname__}.yaml")
def _save_debug_plot(self, fig, name):
"""
Save a holoviews debug plot using the bokeh backend and show it in the
notebook cell.
"""
self.trace.ana.notebook.save_plot(
fig,
filepath=ArtifactPath.join(
self.res_dir,
f'{name}.html',
),
backend='bokeh',
)
# Check before calling display(), as running it outside a notebook will
# just print the structure of the element, which is useless
#
# TODO: See if we can capture this side effect and re-run it when a
# memoized test method is called again.
if is_running_ipython():
IPython.display.display(fig)
return fig
@classmethod
def _get_referred_objs(cls, obj, predicate=lambda x: True):
visited = set()
objs = []
def update_refs(obj):
obj_id = id(obj)
# Avoid cycles. Use the id() of the objects directly since the
# inclusion check is orders of magnitude faster than checking for
# inclusing on the object directly. It also handles well non hashable
# objects and broken __eq__ implementations.
if obj_id in visited:
return
else:
visited.add(obj_id)
# Filter-out weird objects that end up in the list and that can
# trigger a coredump on the interpreter
with warnings.catch_warnings():
warnings.simplefilter("ignore")
has_class = hasattr(obj, '__class__')
if has_class and predicate(obj):
objs.append(obj)
for sub in gc.get_referents(obj):
update_refs(sub)
update_refs(obj)
return objs
@property
def _children_test_bundles(self):
"""
:meta public:
List of references to :class:`TestBundleBase` instances ``self`` relies on
(directly *and* indirectly).
This is used for some post-deserialization fixup that need to walk the
whole graph of :class:`TestBundleBase`.
"""
# Work around:
# https://github.com/pallets/werkzeug/issues/2188
def predicate(x):
try:
return isinstance(x, TestBundleBase)
except Exception:
return False
objs = set(self._get_referred_objs(self, predicate))
objs.discard(self)
return objs
def _fixup_res_dir(self, new):
orig_root = self.res_dir
def fixup(obj):
rel = os.path.relpath(obj.res_dir, orig_root)
absolute = os.path.abspath(os.path.join(new, rel))
obj.res_dir = absolute
for child in self._children_test_bundles | {self}:
fixup(child)
@classmethod
def from_dir(cls, res_dir, update_res_dir=True):
"""
Wrapper around :meth:`lisa.utils.Serializable.from_path`.
It uses :meth:`_get_filepath` to get the name of the serialized file to
reload.
"""
res_dir = ArtifactPath(root=res_dir, relative='')
bundle = super().from_path(cls._get_filepath(res_dir))
# We need to update the res_dir to the one we were given
if update_res_dir:
bundle._fixup_res_dir(res_dir)
return bundle
def to_dir(self, res_dir):
"""
See :meth:`lisa.utils.Serializable.to_path`
"""
super().to_path(self._get_filepath(res_dir))
class FtraceTestBundleBase(TestBundleBase):
"""
Base class for test bundles needing ftrace traces.
Optionally, an ``FTRACE_CONF`` class attribute can be defined to hold
additional FTrace configuration used to record a trace while the synthetic
workload is being run. By default, the required events are extracted from
decorated test methods.
This base class ensures that each subclass will get its own copy of
``FTRACE_CONF`` attribute, and that the events specified in that
configuration are a superset of what is needed by methods using the family
of decorators :func:`lisa.trace.requires_events`. This makes sure that the
default set of events is always enough to run all defined methods, without
duplicating that information. That means that trace events are "inherited"
at the same time as the methods that need them.
The ``FTRACE_CONF`` attribute is typically built by merging these sources:
* Existing ``FTRACE_CONF`` class attribute on the
:class:`RTATestBundle` subclass
* Events required by methods using :func:`lisa.trace.requires_events`
decorator (and equivalents).
* :class:`lisa.trace.FtraceConf` specified by the user and passed to
:meth:`lisa.tests.base.TestBundleBase.from_target` as ``ftrace_conf``
parameter.
"""
TRACE_PATH = 'trace.dat'
"""
Path to the ``trace-cmd`` trace.dat file in the result directory.
"""
@classmethod
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Collect all the events that can be used by all methods available on
# that class.
ftrace_events = []
for name, obj in inspect.getmembers(cls, callable):
try:
used_events = obj.used_events
except AttributeError:
continue
else:
ftrace_events.append(used_events)
ftrace_events = AndTraceEventChecker(ftrace_events)
# Get the ftrace_conf attribute of the class, and make sure it is
# unique to that class (i.e. not shared with any other parent or
# sibling classes)
try:
ftrace_conf = cls.FTRACE_CONF
except AttributeError:
ftrace_conf = None
else:
# If the ftrace_conf attribute has been defined in a base
# class, make sure that class gets its own copy since we are
# going to modify it
if 'ftrace_conf' not in cls.__dict__:
ftrace_conf = copy.copy(ftrace_conf)
# Re-wrap into an FtraceConf so we get a change to set a correct source
# name.
ftrace_conf = FtraceConf(
conf=ftrace_conf or None,
src=cls.__qualname__,
# Let the original object decide of that.
add_default_src=False,
)
# Merge-in a new source to FtraceConf that contains the events we
# collected
ftrace_conf.add_merged_src(
src=f'{cls.__qualname__}(required)',
conf={
'events': ftrace_events,
},
)
cls.FTRACE_CONF = ftrace_conf
# Deprecated, for backward compat only, all new code uses the
# capitalized version
cls.ftrace_conf = ftrace_conf
@classmethod
@TestBundleBase.collector_factory
def _make_ftrace_collector(cls, *, target: Target, res_dir: ArtifactPath = None, ftrace_conf: FtraceConf = None):
cls_conf = cls.FTRACE_CONF or FtraceConf()
user_conf = ftrace_conf or FtraceConf()
# Make a copy of the conf, since it may be shared by multiple classes
conf = copy.copy(cls_conf)
# Merge user configuration with the test's configuration
conf.add_merged_src(
src=f'user+{cls.__qualname__}',
conf=user_conf,
optional_events=True,
)
# If there is no event, do not collect the trace unless the user asked
# for it. This can happen for classes that inherit from
# FtraceTestBundle as a convenience to users without actually needing
# it internally
if conf.get('events'):
path = ArtifactPath.join(res_dir, cls.TRACE_PATH)
return FtraceCollector.from_conf(
target=target,
conf=conf,
output_path=path,
)
else:
return None
@property
def trace_path(self):
"""
Path to the ``trace-cmd report`` trace.dat file.
"""
return ArtifactPath.join(self.res_dir, self.TRACE_PATH)
# Guard before the cache, so we don't accidentally start depending on the
# LRU cache for functionnal correctness.
@non_recursive_property
# Only cache the trace of N bundles at a time, to avoid running out of memory.
# This should not really impact the test when ran with exekall, since they
# are sequenced one after another. It would have some speed impact on
# scripts/notebooks that try to do something with a bunch of
# FtraceTestBundle.
@lru_memoized(first_param_maxsize=5)
def trace(self):
"""
:returns: a :class:`lisa.trace.TraceView`
All events specified in ``FTRACE_CONF`` are parsed from the trace,
so it is suitable for direct use in methods.
Having the trace as a property lets us defer the loading of the actual
trace to when it is first used. Also, this prevents it from being
serialized when calling :meth:`lisa.utils.Serializable.to_path` and
allows updating the underlying path before it is actually loaded to
match a different folder structure.
"""
return self.get_trace(
events=self.FTRACE_CONF["events"],
normalize_time=True,
# Soft limit on the amount of memory used by dataframes kept around
# in memory by Trace, so that we don't blow up the memory when we
# have a large-ish number of FTraceTestBundle alive at the same
# time.
max_mem_size=500e6,
# TODO: revisit that. As of pyarrow 2.0.0 and pandas 1.1.4, reading
# (and maybe writing) parquet fils seem to leak memory. This can
# take the consumption in the order of tens of gigabytes for a few
# iterations of the tests with exekall, leading to crashes.
# Therefore, disable the on-disk swap.
enable_swap=False,
)
def get_trace(self, events=None, **kwargs):
"""
:returns: a :class:`lisa.trace.Trace` collected in the standard location.
:Variable keyword arguments: Forwarded to :class:`lisa.trace.Trace`.
"""
return Trace(self.trace_path, self.plat_info, events=events, **kwargs)
class FtraceTestBundle(FtraceTestBundleBase):
"""
Dummy subclass of :class:`FtraceTestBundleBase` to be inherited from to
override :class:`OptionalFtraceTestBundle` in the inheritance tree.
"""
_make_ftrace_collector = FtraceTestBundleBase._make_ftrace_collector
class OptionalFtraceTestBundle(FtraceTestBundleBase, Loggable):
@classmethod
@TestBundleBase.collector_factory
@kwargs_forwarded_to(FtraceTestBundleBase._make_ftrace_collector)
def _make_ftrace_collector(cls, **kwargs):
try:
return super()._make_ftrace_collector(**kwargs)
except Exception as e:
cls.get_logger().warning(f'Could not create ftrace collector: {e}')
return None
class TestConfBase(SimpleMultiSrcConf):
"""
Base class for test configurations.
This class will add a ``test-conf`` top-level key above the level specified
by the class, so that if the class specifies a ``TopLevelKeyDesc('foo')``,
the actual top-level key will be ``test-conf/foo``.
"""
def __init_subclass__(cls, **kwargs):
structure = copy.copy(cls.STRUCTURE)
structure.levels = ['test-conf', *structure.levels]
cls.STRUCTURE = structure
super().__init_subclass__(**kwargs)
class DmesgTestConf(TestConfBase):
"""
Configuration class for :meth:`lisa.tests.base.DmesgTestBundle.test_dmesg`.
{generated_help}
{yaml_example}
"""
STRUCTURE = TopLevelKeyDesc('dmesg', 'Dmesg test configuration', (
KeyDesc('ignored-patterns', 'List of Python regex matching dmesg entries *content* to be ignored (see :class:`devlib.collector.dmesg.KernelLogEntry` for how the message is split)', [TypedList[str]]),
))
class DmesgTestBundleBase(TestBundleBase):
"""
Abstract Base Class for TestBundles based on dmesg output.
.. seealso: Test subclasses should inherit from :class:`DmesgTestBundle` in
order to require the features.
"""
DMESG_PATH = 'dmesg.log'
"""
Path to the dmesg log in the result directory.
"""
CANNED_DMESG_IGNORED_PATTERNS = {
'EAS-schedutil': 'Disabling EAS, schedutil is mandatory',
# On kernel >= 5.6, executable stack will trigger this issue:
# kern: warn: [555.927466] process 'root/devlib-target/bin/busybox' started with executable stack
'executable-stack': 'started with executable stack',
}
"""
Mapping of canned patterns to avoid repetition while defining
:attr:`lisa.tests.base.DmesgTestBundleBase.DMESG_IGNORED_PATTERNS` in
subclasses.
"""
DMESG_IGNORED_PATTERNS = [
CANNED_DMESG_IGNORED_PATTERNS['executable-stack'],
]
"""
List of patterns to ignore in addition to the ones passed to
:meth:`~lisa.tests.base.DmesgTestBundle.test_dmesg`.
"""
@classmethod
@TestBundleBase.collector_factory
def _make_dmesg_collector(cls, *, target: Target, res_dir: ArtifactPath = None):
path = ArtifactPath.join(res_dir, cls.DMESG_PATH)
return DmesgCollector(
target,
output_path=path,
)
@property
def dmesg_path(self):
"""
Path to the dmesg output log file
"""
return ArtifactPath.join(self.res_dir, self.DMESG_PATH)
@property
def dmesg_entries(self):
"""
List of parsed dmesg output entries
:class:`devlib.collector.dmesg.KernelLogEntry`.
"""
with open(self.dmesg_path) as f:
return list(KernelLogEntry.from_dmesg_output(f.read()))
def test_dmesg(self, level='warn', facility=None, ignored_patterns: DmesgTestConf.IgnoredPatterns = None) -> ResultBundle:
"""
Basic test on kernel dmesg output.
:param level: Any dmesg entr with a level more critical than (and
including) that will make the test fail.
:type level: str
:param facility: Only select entries emitted by the given dmesg
facility like `kern`. Note that not all versions of `dmesg` are
able to print it, so specifying it may lead to no entry being
inspected at all. If ``None``, the facility is ignored.
:type facility: str or None
:param ignored_patterns: List of regexes to ignore some messages. The
pattern list is combined with
:attr:`~lisa.tests.base.DmesgTestBundleBase.DMESG_IGNORED_PATTERNS`
class attribute.
:type ignored_patterns: list or None
"""
levels = DmesgCollector.LOG_LEVELS
# Consider as an issue all levels more critical than `level`
issue_levels = levels[:levels.index(level) + 1]
ignored_patterns = (
(ignored_patterns or []) +
(self.DMESG_IGNORED_PATTERNS or [])
)
logger = self.logger
if ignored_patterns:
logger.info(f'Will ignore patterns in dmesg output: {ignored_patterns}')
ignored_regex = [
re.compile(pattern)
for pattern in ignored_patterns
]
else:
ignored_regex = []
issues = [
entry
for entry in self.dmesg_entries
if (
entry.msg.strip()
and (entry.facility == facility if facility else True)
and (entry.level in issue_levels)
and not any(regex.search(entry.msg.strip()) for regex in ignored_regex)
)
]
res = ResultBundle.from_bool(not issues)
multiline = len(issues) > 1
res.add_metric('dmesg output', ('\n' if multiline else '') + '\n'.join(str(entry) for entry in issues))
return res
class DmesgTestBundle(DmesgTestBundleBase):
"""
Dummy subclass of :class:`DmesgTestBundleBase` to be inherited from to
override :class:`OptionalDmesgTestBundle` in the inheritance tree.
"""
test_dmesg = DmesgTestBundleBase.test_dmesg
_make_dmesg_collector = DmesgTestBundleBase._make_dmesg_collector
class OptionalDmesgTestBundle(DmesgTestBundleBase, Loggable):
@functools.wraps(DmesgTestBundleBase.test_dmesg)
def test_dmesg(self, *args, **kwargs):
try:
return super().test_dmesg(*args, **kwargs)
except FileNotFoundError:
self.logger.warning('Could not check dmesg content, as it was not collected')
return ResultBundle(result=Result.UNDECIDED)
@classmethod
@TestBundleBase.collector_factory
@kwargs_forwarded_to(DmesgTestBundleBase._make_dmesg_collector)
def _make_dmesg_collector(cls, **kwargs):
try:
return super()._make_dmesg_collector(**kwargs)
except Exception as e:
cls.get_logger().warning(f'Could not create dmesg collector: {e}')
return None
class RTATestBundle(FtraceTestBundle, DmesgTestBundle):
"""
Abstract Base Class for :class:`lisa.wlgen.rta.RTA`-powered TestBundles
.. seealso: :class:`lisa.tests.base.FtraceTestBundle` for default
``FTRACE_CONF`` content.
"""
TASK_PERIOD = 16e-3
"""
A task period in seconds you can re-use for your
:class:`lisa.wlgen.rta.RTATask` definitions.
"""
NOISE_ACCOUNTING_THRESHOLDS = {
# Idle task - ignore completely
# note: since it has multiple comms, we need to ignore them
TaskID(pid=0, comm=None): 100,
# Feeble boards like Juno/TC2 spend a while in sugov
r"^sugov:\d+$": 5,
# Some boards like Hikey960 have noisy threaded IRQs (thermal sensor
# mailbox ...)
r"^irq/\d+-.*$": 1.5,
}
"""
PID/comm specific tuning for :meth:`test_noisy_tasks`
* **keys** can be PIDs, comms, or regexps for comms.
* **values** are noisiness thresholds (%), IOW below that runtime threshold
the associated task will be ignored in the noise accounting.
"""
# Roughly 330*2 ms for PELT half life~=32ms
# This allows enough time for scheduler signals to converge.
_BUFFER_PHASE_DURATION_S = pelt_settling_time() * 2
"""
Duration of the initial buffer phase; this is a phase that copies the first
phase of each task, and that is prepended to the relevant task - this means
all task in the profile get a buffer phase.
"""
_BUFFER_PHASE_PROPERTIES = {
'name': 'buffer',
}
"""
Properties of the buffer phase, see :attr:`_BUFFER_PHASE_DURATION_S`
"""
@RTAEventsAnalysis.df_rtapp_phases_start.used_events
@RTAEventsAnalysis.df_rtapp_phases_end.used_events
@requires_events('sched_switch')
def trace_window(self, trace):
"""
The time window to consider for this :class:`RTATestBundle`
:returns: a (start, stop) tuple
Since we're using rt-app profiles, we know the name of tasks we are
interested in, so we can trim our trace scope to filter out the
setup/teardown events we don't care about.
Override this method if you need a different trace trimming.
.. warning::
Calling ``self.trace`` here will raise an :exc:`AttributeError`
exception, to avoid entering infinite recursion.
"""
swdf = trace.df_event('sched_switch')
def get_first_switch(row):
comm, pid, _ = row.name
start_time = row['Time']
task = TaskID(comm=comm, pid=pid)
start_swdf = df_filter_task_ids(swdf, [task], pid_col='next_pid', comm_col='next_comm')
pre_phase_swdf = start_swdf[start_swdf.index < start_time]
# The task with that comm and PID was never switched-in, which
# means it was still on the current CPU when it was renamed, so we
# just report phase-start.
if pre_phase_swdf.empty:
return start_time
# Otherwise, we return the timestamp of the switch
else:
return pre_phase_swdf.index[-1]
profile = self.rtapp_profile
# Find when the first rtapp phase starts, and take the associated
# sched_switch that is immediately preceding
phase_start_df = trace.ana.rta.df_rtapp_phases_start(
wlgen_profile=profile,
)
# Get rid of the buffer phase we don't care about
phase_start_df = phase_start_df[
phase_start_df['properties'].transform(lambda props: props['meta']['from_test'])
]
rta_start = phase_start_df.apply(get_first_switch, axis=1).min()
# Find when the last rtapp phase ends
rta_stop = trace.ana.rta.df_rtapp_phases_end()['Time'].max()
return (rta_start, rta_stop)
@property
def rtapp_profile(self):
"""
Compute the RTapp profile based on ``plat_info``.
"""
return self.get_rtapp_profile(self.plat_info)
_rtapp_tasks_events = requires_events('sched_switch')
@property
@_rtapp_tasks_events
@memoized
def rtapp_task_ids_map(self):
"""
Mapping of task names as specified in the rtapp profile to list of
:class:`lisa.trace.TaskID` names found in the trace.
If the task forked, the list will contain more than one item.
"""
trace = self.get_trace(events=['sched_switch'])
names = self.rtapp_profile.keys()
return {
name: task_ids
for name, task_ids in RTA.resolve_trace_task_names(trace, names).items()
}
@property
@_rtapp_tasks_events
def rtapp_task_ids(self):
"""
The rtapp task :class:`lisa.trace.TaskID` as found from the trace in
this bundle.
:return: the list of actual trace task :class:`lisa.trace.TaskID`
"""
return sorted(itertools.chain.from_iterable(self.rtapp_task_ids_map.values()))
@property
@_rtapp_tasks_events
def rtapp_tasks_map(self):
"""
Same as :func:`rtapp_task_ids_map` but with list of strings for values.
"""
return {
name: [task_id.comm for task_id in task_ids]
for name, task_ids in self.rtapp_task_ids_map.items()
}
@property
@_rtapp_tasks_events
def rtapp_tasks(self):
"""
Same as :func:`rtapp_task_ids` but as a list of string.
:return: the list of actual trace task names
"""
return [task_id.comm for task_id in self.rtapp_task_ids]
@property
def cgroup_configuration(self):
"""
Compute the cgroup configuration based on ``plat_info``
"""
return self.get_cgroup_configuration(self.plat_info)
@non_recursive_property
@lru_memoized(first_param_maxsize=5)
def trace(self):
"""
:returns: a :class:`lisa.trace.TraceView` cropped to the window given
by :meth:`trace_window`.
.. seealso:: :attr:`FtraceTestBundleBase.trace`
"""
trace = super().trace
return trace.get_view(self.trace_window(trace), clear_base_cache=True)
def df_noisy_tasks(self, with_threshold_exclusion=True):
"""
:returns: a DataFrame containing all tasks that participate to the test
noise. i.e. all non rt-app tasks.
:param with_threshold_exclusion: When set to True, known noisy services
will be ignored.
"""
df = self.trace.ana.tasks.df_tasks_runtime()
df = df.copy(deep=False)
# We don't want to account the test tasks
ignored_ids = copy.copy(self.rtapp_task_ids)
df['runtime_pct'] = df['runtime'] * (100 / self.trace.time_range)
df['pid'] = df.index
threshold_exclusion = self.NOISE_ACCOUNTING_THRESHOLDS if with_threshold_exclusion else {}
# Figure out which PIDs to exclude from the thresholds
for key, threshold in threshold_exclusion.items():
# Find out which task(s) this threshold is about
if isinstance(key, str):
comms = df.loc[df['comm'].str.match(key), 'comm']
task_ids = comms.apply(self.trace.get_task_id)
else:
# Use update=False to let None fields propagate, as they are
# used to indicate a "dont care" value
task_ids = [self.trace.get_task_id(key, update=False)]
# For those tasks, check the cumulative threshold
runtime_pct_sum = df_filter_task_ids(df,
task_ids)['runtime_pct'].sum()
if runtime_pct_sum <= threshold:
ignored_ids.extend(task_ids)
self.logger.info(f"Ignored PIDs for noise contribution: {', '.join(map(str, ignored_ids))}")
# Filter out unwanted tasks (rt-app tasks + thresholds)
df = df_filter_task_ids(df, ignored_ids, invert=True)
return df.loc[df['runtime'] > 0]
@TestBundleBase.add_undecided_filter
@TasksAnalysis.df_tasks_runtime.used_events
def test_noisy_tasks(self, *, noise_threshold_pct=None, noise_threshold_ms=None):
"""
Test that no non-rtapp ("noisy") task ran for longer than the specified thresholds
:param noise_threshold_pct: The maximum allowed runtime for noisy tasks in
percentage of the total rt-app execution time
:type noise_threshold_pct: float
:param noise_threshold_ms: The maximum allowed runtime for noisy tasks in ms
:type noise_threshold_ms: float
If both are specified, the smallest threshold (in seconds) will be used.
"""
if noise_threshold_pct is None and noise_threshold_ms is None:
raise ValueError('Both "noise_threshold_pct" and "noise_threshold_ms" cannot be None')
# No task can run longer than the recorded duration
threshold_s = self.trace.time_range
if noise_threshold_pct is not None:
threshold_s = noise_threshold_pct * self.trace.time_range / 100
if noise_threshold_ms is not None:
threshold_s = min(threshold_s, noise_threshold_ms * 1e3)
df_noise = self.df_noisy_tasks()
if df_noise.empty:
return ResultBundle.from_bool(True)
res = ResultBundle.from_bool(df_noise['runtime'].sum() < threshold_s)
pid = df_noise.index[0]
comm = df_noise['comm'].iloc[0]
duration_s = df_noise['runtime'].iloc[0]
duration_pct = df_noise['runtime_pct'].iloc[0]
metric = {"pid": pid,
"comm": comm,
"duration (abs)": TestMetric(duration_s, "s"),
"duration (rel)": TestMetric(duration_pct, "%")}
res.add_metric("noisiest task", metric)
return res
@classmethod
def unscaled_utilization(cls, plat_info, cpu, utilization_pct):
"""
Convert utilization scaled to a CPU to a 'raw', unscaled one.
:param capacity: The CPU against which ``utilization_pct``` is scaled
:type capacity: int
:param utilization_pct: The scaled utilization in %
:type utilization_pct: int
.. seealso: In most cases,
`PeriodicWload(scale_for_cpu=..., scale_for_freq=...)` is easier to
use and leads to clearer code.
"""
return PeriodicWload(
duty_cycle_pct=utilization_pct,
scale_for_cpu=cpu,
).unscaled_duty_cycle_pct(plat_info)
@classmethod
def get_rtapp_profile(cls, plat_info, **kwargs):
"""
Returns a :class:`dict` with task names as keys and
:class:`lisa.wlgen.rta.RTATask` as values.
The following modifications are done on the profile returned by
:meth:`_get_rtapp_profile`:
* A buffer phase may be inserted at the beginning of each task in order
to stabilize some kernel signals.
* A ``from_test`` meta key is added to each
:class:`lisa.wlgen.rta.RTAPhase` with a boolean value that is
``True`` if the phase comes from the test itself and ``False`` if
it was added here (e.g. the buffer phase). This allows
future-proof filtering of phases in the test code when inspecting
the profile by looking at ``phase['meta']['from_test']``.
.. note:: If you want to override the method in a subclass, override
:meth:`_get_rtapp_profile` instead.
"""
def add_buffer(task):
template_phase = task.phases[0]
wload = template_phase['wload']
task = task.with_props(meta=leaf_precedence({'from_test': True}))
if 'name' not in task:
task = task.with_props(name='test')
# Don't add the buffer phase if it has a nil duration
if not cls._BUFFER_PHASE_DURATION_S:
return task
elif isinstance(wload, PeriodicWload):
# Notes:
#
# Using a small period to allow the util_avg to be very close
# to duty_cycle, but that also makes the duty_cycle converge to
# a wrong value (rtapp looses fidelity with small periods,
# maybe due to tracing overhead). Therefore we just replicate
# the period.
ref_wload = PeriodicWload(
duration=cls._BUFFER_PHASE_DURATION_S,
)
buffer_phase = RTAPhase(
# Override some parameters with the reference ones
prop_wload=ref_wload & wload,
# Pin to the same CPUs and NUMA nodes if any, so that we
# also let the runqueue signals converge and things like
# that, if it's going to matter later.
prop_cpus=template_phase.get('cpus'),
prop_numa_nodes_membind=template_phase.get('numa_nodes_membind'),
prop_meta={'from_test': False},
properties=cls._BUFFER_PHASE_PROPERTIES,
)
# Prepend the buffer task
return buffer_phase + task
else:
return task
profile = cls._get_rtapp_profile(plat_info, **kwargs)
return {
name: add_buffer(task)
for name, task in profile.items()
}
@classmethod
@abc.abstractmethod
def _get_rtapp_profile(cls, plat_info):
"""
:meta public:
:returns: a :class:`dict` with task names as keys and
:class:`lisa.wlgen.rta.RTATask` as values
This is the method you want to override to specify what is your
synthetic workload.
"""
@classmethod
def get_cgroup_configuration(cls, plat_info):
"""
:returns: a :class:`dict` representing the configuration of a
particular cgroup.
This is a method you may optionally override to configure a cgroup for
the synthetic workload.
Example of return value::
{
'name': 'lisa_test',
'controller': 'schedtune',
'attributes' : {
'prefer_idle' : 1,
'boost': 50
}
}
"""
return {}
@classmethod
def _target_configure_cgroup(cls, target, cfg):
if not cfg:
return None
try:
cgroups = target.cgroups
except AttributeError:
ResultBundle.raise_skip('cgroups are not available on this target')
kind = cfg['controller']
try:
ctrl = cgroups.controllers[kind]
except KeyError:
ResultBundle.raise_skip(f'"{kind}" cgroup controller unavailable')
cg = ctrl.cgroup(cfg['name'])
cg.set(**cfg['attributes'])
return '/' + cg.name
@classmethod
def run_rtapp(cls, target, res_dir, profile=None, collector=None, cg_cfg=None, wipe_run_dir=True, update_cpu_capacities=None):
"""
Run the given RTA profile on the target, and collect an ftrace trace.
:param target: target to execute the workload on.
:type target: lisa.target.Target
:param res_dir: Artifact folder where the artifacts will be stored.
:type res_dir: str or lisa.utils.ArtifactPath
:param profile: ``rt-app`` profile, as a dictionary of
``dict(task_name, RTATask)``. If ``None``,
:meth:`~lisa.tests.base.RTATestBundle.get_rtapp_profile` is called
with ``target.plat_info``.
:type profile: dict(str, lisa.wlgen.rta.RTATask)
:param collector: Context manager collector to use while running rt-app.
:type collector: lisa.trace.ComposedCollector
:param cg_cfg: CGroup configuration dictionary. If ``None``,
:meth:`lisa.tests.base.RTATestBundle.get_cgroup_configuration` is
called with ``target.plat_info``.
:type cg_cfg: dict
:param wipe_run_dir: Remove the run directory on the target after
execution of the workload.
:type wipe_run_dir: bool
:param update_cpu_capacities: Attempt to update the CPU capacities
based on the calibration values of rtapp to get the most accurate
reproduction of duty cycles.
:type update_cpu_capacities: bool
"""
logger = cls.get_logger()
trace_path = ArtifactPath.join(res_dir, cls.TRACE_PATH)
profile = profile or cls.get_rtapp_profile(target.plat_info)
cg_cfg = cg_cfg or cls.get_cgroup_configuration(target.plat_info)
try:
ftrace_coll = collector['ftrace']
except KeyError:
trace_events = []
else:
trace_events = [
event.replace('userspace@rtapp_', '')
for event in ftrace_coll.events
if event.startswith('userspace@rtapp_')
]
# Coarse-grained detection, but that should be enough for our use
try:
target.execute('ls /sys/kernel/debug/tracing/')
except TargetStableError:
debugfs_needs_root = True
else:
debugfs_needs_root = False
wload = RTA.from_profile(
target=target,
profile=profile,
res_dir=res_dir,
name=f"rta_{cls.__name__.casefold()}",
trace_events=trace_events,
# Force the default value for all settings so that the test does
# not depend on the environment setup.
force_defaults=True,
no_force_default_keys=[
# Since "taskgroup" cannot be always expected to work in case
# cgroupfs is not mounted at all, we will not force a default
# value for it.
'taskgroup'
],
)
profile_str = '\n'.join(
'Task {}:\n{}'.format(
task,
textwrap.indent(str(phase), ' ' * 4)
)
for task, phase in profile.items()
)
logger.info(f'rt-app workload:\n{profile_str}')
logger.debug(f'rt-app JSON:\n{wload.conf.json}')
cgroup = cls._target_configure_cgroup(target, cg_cfg)
as_root = bool(
cgroup is not None or (trace_events and debugfs_needs_root)
)
wload = wload(
wipe_run_dir=wipe_run_dir,
cgroup=cgroup,
as_root=as_root,
update_cpu_capacities=update_cpu_capacities,
)
with target.freeze_userspace(), wload, collector:
wload.run()
return collector
# Keep compat with existing code
@classmethod
def _run_rtapp(cls, *args, **kwargs):
"""
:meta public:
Has been renamed to :meth:`~lisa.tests.base.RTATestBundle.run_rtapp`, as it really is part of the public API.
"""
return cls.run_rtapp(*args, **kwargs)
@classmethod
def _from_target(cls, target: Target, *, res_dir: ArtifactPath, collector=None) -> 'RTATestBundle':
"""
:meta public:
Factory method to create a bundle using a live target
This will execute the rt-app workload described in
:meth:`~lisa.tests.base.RTATestBundle.get_rtapp_profile`
"""
cls.run_rtapp(target, res_dir, collector=collector)
plat_info = target.plat_info
return cls(res_dir, plat_info)
class TestBundle(OptionalFtraceTestBundle, OptionalDmesgTestBundle, TestBundleBase):
"""
Dummy class used as a base class for all tests.
"""
@classmethod
def check_from_target(cls, target):
super().check_from_target(target)
online = set(target.list_online_cpus())
cpus = set(range(target.plat_info['cpus-count']))
if not online <= cpus:
raise ValueError('Online CPUs ({online}) are not a subset of detected CPUs ({cpus})')
elif online != cpus:
offline = sorted(cpus - online)
raise ResultBundle.raise_skip(f'All CPUs must be online (aka not hotplugged) before creating a TestBundle. Offline CPUs: {offline}')
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| ARM-software/lisa | lisa/tests/base.py | Python | apache-2.0 | 77,167 |
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.tasks import dashboard
class History(horizon.Panel):
name = _("History")
slug = "history"
dashboard.Tasks.register(History)
| icloudrnd/automation_tools | openstack_dashboard/dashboards/tasks/history/panel.py | Python | apache-2.0 | 243 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Contains common test fixtures used to run unit tests.
"""
import sys
# This is needed so Python can find test_tools on the path.
sys.path.append('../../..')
from test_tools.fixtures.common import *
| awsdocs/aws-doc-sdk-examples | python/example_code/apigateway/aws_service/test/conftest.py | Python | apache-2.0 | 311 |
# Copyright 2017 Erik Tollerud
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals # just in case, for py2 to be py3-ish
import pkgutil, io
import numpy as np
from matplotlib import image, cm
from matplotlib import pyplot as plt
__all__ = ['get_cat_num', 'n_cats', 'catter']
# N_cats x 72 x 72, 0 is transparent, 1 is full-cat
_CAT_DATA = np.load(io.BytesIO(pkgutil.get_data('catterplot', 'data/cats.npy')))
def get_cat_num(i):
return _CAT_DATA[i]
def n_cats():
return len(_CAT_DATA)
def catter(x, y, s=40, c=None, cat='random', alpha=1, ax=None, cmap=None,
aspects='auto'):
"""
A catter plot (scatter plot with cats). Most arguments are interpreted
the same as the matplotlib `scatter` function, except that ``s`` is the
*data* size of the symbol (not pixel). Additional kwargs include:
``cat`` can be:
* int : the index of the cat symbol to use - you can use
``catterplot.n_cats()`` to get the number of cats available
* a squence of ints : must match the data, but otherwise interpreted as for
a scalar
* 'random'/'rand' : random cats
``ax`` can be:
* None: use the current default (pyplot) axes
* an `Axes` : random cats
``aspects`` can be:
* 'auto': the cats length-to-width is set to be square given the spread of
inputs
* a float: the height/width of the cats. If not 1, ``s`` is interpreted as
the geometric mean of the sizes
* a sequence of floats: much match data, gives height/width
"""
if ax is None:
ax = plt.gca()
if c is not None:
if cmap is None:
cmap = plt.rcParams['image.cmap']
smap = cm.ScalarMappable(cmap=cmap)
rgba = smap.to_rgba(c)
else:
rgba = np.ones((len(x), 4))
rgba[:, 3] *= alpha
if np.isscalar(s) or s.shape==tuple():
s = np.ones(len(x))*s
# otherwise assume shapes match
if cat in ('rand', 'random'):
cats = np.random.randint(n_cats(), size=len(x))
else:
try:
cats = np.ones(len(x)) * cat
except TypeError as e:
raise TypeError('`cat` argument needs to be "random", a scalar, or match the input.', e)
if aspects == 'auto':
aspects = np.ptp(y)/np.ptp(x)
if np.isscalar(aspects) or aspects.shape==tuple():
aspects = np.ones(len(x)) * aspects
ims = []
for xi, yi, si, ci, cati, aspecti in zip(x, y, s, rgba, cats, aspects):
data = get_cat_num(cati)
offsetx = si * aspecti**-0.5 / (2 * data.shape[0])
offsety = si * aspecti**0.5 / (2 * data.shape[1])
im = image.AxesImage(ax, extent=(xi - offsetx, xi + offsetx,
yi - offsety, yi + offsety))
if c is None:
# defaults to fading "black"
cdata = 1-data
else:
# leave just the alpha to control the fading
cdata = np.ones(data.shape)
imarr = np.transpose([cdata*ci[0], cdata*ci[1], cdata*ci[2],
data*ci[3]], (1, 2, 0))
im.set_data(imarr)
ims.append(im)
for im in ims:
ax.add_image(im)
#ax.autoscale_view()
# for some reason autoscaling fails for images. So we'll just force it via
# scatter...
sc = plt.scatter(x, y)
sc.remove()
return ims
| eteq/catterplotpy | catterplot/core.py | Python | apache-2.0 | 4,056 |
from typing import Any, List, Tuple, Dict #cast
from sphinx.application import Sphinx
# from sphinx.ext.autodoc import Documenter
from sphinx.ext.autodoc import ModuleLevelDocumenter
from sphinx.pycode import ModuleAnalyzer, PycodeError
#from sphinx.domains.python import PythonDomain
from sphinx.locale import __
from sphinx.domains.python import PyObject
from sphinx import addnodes
from sphinx.util.inspect import signature as Signature
from sphinx.util.inspect import stringify_signature
import logging
logger = logging.getLogger(__name__)
# we can get source code first line numbers with this module for object
import inspect
from fontbakery.callable import (
FontbakeryCallable
, FontBakeryCondition
, FontBakeryCheck
, Disabled
, FontBakeryExpectedValue
)
# mute the style checks for unused names
# will be removed eventually
if False: #pylint: disable=using-constant-test
FontbakeryCallable
FontBakeryCondition
FontBakeryCheck
Disabled
FontBakeryExpectedValue
__version__ = '0.0.1'
# ModuleLevelDocumenter(Documenter): Specialized Documenter subclass for objects on module level (functions,
# classes, data/constants). Implements: resolve_name
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autodoc/__init__.py#L850
# Documenter
class FontBakeryCallableDocumenter(ModuleLevelDocumenter):
"""
Specialized Documenter subclass for instances of FontBakeryCheck.
"""
objtype = 'fontbakerycallable'
can_doc_cls = FontbakeryCallable
member_order = 30
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
# type: (Any, str, bool, Any) -> bool
return isinstance(member, cls.can_doc_cls)
def format_args(self): # pylint: disable=arguments-differ # I am really not sure what went wrong here...
# type: () -> str
# We use the original signature from the wrapped _function
has_retval = isinstance(self.object, FontBakeryCondition)
if not hasattr(self.object, '_func'):
# FIXME! I don't know what's this.
return None
sig = Signature(self.object._func, bound_method=False, has_retval=has_retval)
args = stringify_signature(sig)
# escape backslashes for reST
args = args.replace('\\', '\\\\')
return args
def format_name(self):
# I'm using this to inject some new info into the check
# search for the separator ":::" in this document to see where
# the info is received. This is not a clean solution!
#
# in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autodoc/__init__.py#L374
# it says:
# > This normally should be something that can be parsed by the generated
# > directive, but doesn't need to be (Sphinx will display it unparsed
# > then).
# See below in `handle_signature`
# where that ipdb debugger is started, usually that eception would be
# dropped and we drop out of signature building. (RAISED here in `_handle_signature`
# The ValueError when the regex doesn't match...)
# seems like the slash (/) Is killing most of the header!
# Otherwise the ids display fine, the dots are fine.
# Also, in any case of name change, the [source] view is killed (removed!)
# the document and also genindex.html anchor works so far (with 7 instead of /)
#
res = super().format_name()
if self.objtype == 'fontbakerycheck':
# A bit hackish, splitting somwhere else by ::: to retrieve the checkid
# we can get the source file first line number of self.object:
lineno = inspect.getsourcelines(self.object)[1]
res = self.object.id + ':::' + f'{lineno}' + ':::' + res#.replace('/', '7')
# else:
# res = super().format_name()
# print('formatted name:', res)
# > formatted name: com.google.fonts/check/xavgcharwidth:::59:::com_google_fonts_check_xavgcharwidth
# > formatted name: bold_wght_coord
return res
# handle_signature: com_google_fonts_check_post_table_version(ttFont, is_ttf) <desc_signature first="False"/>
# sig signature: com_google_fonts_check_post_table_version(ttFont, is_ttf)
# result: ('com_google_fonts_check_post_table_version', None) signode: <desc_signature class="" first="False" fullname="com_google_fonts_check_post_table_version" module="fontbakery.profiles.post"><desc_annotation xml:space="preserve">FontBakeryCheck </desc_annotation><desc_addname xml:space="preserve">fontbakery.profiles.post.</desc_addname><desc_name xml:space="preserve">com_google_fonts_check_post_table_version</desc_name><desc_parameterlist xml:space="preserve"><desc_parameter xml:space="preserve">ttFont</desc_parameter><desc_parameter xml:space="preserve">is_ttf</desc_parameter></desc_parameterlist></desc_signature>
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
# type: (Any, str, bool, bool) -> None
"""Generate reST for the object given by *self.name*, and possibly for
its members.
If *more_content* is given, include that content. If *real_modname* is
given, use that module name to find attribute docs. If *check_module* is
True, only generate if the object is defined in the module name it is
imported from. If *all_members* is True, document all members.
"""
# print('generate', more_content, real_modname, check_module, all_members)
# print(self.name)
# print('---------------------')
# > generate None fontbakery.profiles.post True True
# > fontbakery.profiles.post::com_google_fonts_check_post_table_version
# > ---------------------
#
# > generate None fontbakery.profiles.shared_conditions True True
# > fontbakery.profiles.shared_conditions::glyph_metrics_stats
# > ---------------------
if not self.parse_name():
# need a module to import
logger.warning(
__('don\'t know which module to import for autodocumenting '
'%r (try placing a "module" or "currentmodule" directive '
'in the document, or giving an explicit module name)') %
self.name, type='autodoc')
return
# now, import the module and get object to document
if not self.import_object():
return
# doesn't do anything!
# if self.objtype == 'fontbakerycheck':
# self.name = self.object.id
# If there is no real module defined, figure out which to use.
# The real module is used in the module analyzer to look up the module
# where the attribute documentation would actually be found in.
# This is used for situations where you have a module that collects the
# functions and classes of internal submodules.
self.real_modname = real_modname or self.get_real_modname() # type: str
# try to also get a source code analyzer for attribute docs
try:
self.analyzer = ModuleAnalyzer.for_module(self.real_modname)
# parse right now, to get PycodeErrors on parsing (results will
# be cached anyway)
self.analyzer.find_attr_docs()
except PycodeError as err:
logger.debug('[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
self.analyzer = None
# at least add the module.__file__ as a dependency
if hasattr(self.module, '__file__') and self.module.__file__:
self.directive.filename_set.add(self.module.__file__)
else:
self.directive.filename_set.add(self.analyzer.srcname)
# check __module__ of object (for members not given explicitly)
if check_module:
if not self.check_module():
return
sourcename = self.get_sourcename()
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
self.add_line('', sourcename)
# format the object's signature, if any
sig = self.format_signature()
# generate the directive header and options, if applicable
self.add_directive_header(sig)
self.add_line('', sourcename)
# e.g. the module directive doesn't have content
self.indent += self.content_indent
# add all content (from docstrings, attribute docs etc.)
self.add_content(more_content)
# document members, if possible
self.document_members(all_members)
class FontBakeryCheckDocumenter(FontBakeryCallableDocumenter):
objtype = 'fontbakerycheck'
can_doc_cls = FontBakeryCheck
class FontBakeryConditionDocumenter(FontBakeryCallableDocumenter):
objtype = 'fontbakerycondition'
can_doc_cls = FontBakeryCondition
from sphinx.domains.python import _pseudo_parse_arglist
import re
# REs for Python signatures
py_sig_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
# PyObject: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/domains/python.py#L189
# PyObject is a subclass of sphinx.directives.ObjectDescription
# ObjectDescription is a sphinx.util.docutils.SphinxDirective
# SphinxDirective is a docutils.parsers.rst.Directive
class PyFontBakeryObject(PyObject):
"""
Description of a class-like object (classes, interfaces, exceptions).
"""
allow_nesting = True
@property
def pretty_objtype(self):
if self.objtype.startswith('fontbakery'):
suffix = self.objtype[len('fontbakery'):]
return 'FontBakery' + suffix[0].upper() + suffix[1:]
return self.objtype
def get_signature_prefix(self, sig):
# type: (str) -> str
# import ipdb
# ipdb.set_trace()
# print('sig signature:', sig)
# > sig signature: com_google_fonts_check_all_glyphs_have_codepoints(ttFont)
return self.pretty_objtype + ' '
# this is bullshit, returns two values but manipulates
# signode massively, which is undocumented.
# signode is an instance of <class 'sphinx.addnodes.desc_signature'>
# from https://github.com/sphinx-doc/sphinx/blob/master/sphinx/domains/python.py#L237
def _handle_signature(self, cid, lineno, sig, signode):
# type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
This is the xml string result of signode, whitespace is not
equivalent for readability.
<desc_signature
class=""
first="False"
fullname="com.google.fonts/check/all_glyphs_have_codepoints"
module="fontbakery.profiles.cmap"
>
<desc_annotation
xml:space="preserve">FontBakeryCheck </desc_annotation>
<desc_addname
xml:space="preserve">fontbakery.profiles.cmap.</desc_addname>
<desc_name
xml:space="preserve">com_google_fonts_check_all_glyphs_have_codepoints</desc_name>
<desc_parameterlist
xml:space="preserve">
<desc_parameter xml:space="preserve">ttFont</desc_parameter>
</desc_parameterlist>
</desc_signature>
"""
m = py_sig_re.match(sig)
if m is None:
# this is the immediate fail!!!
raise ValueError
prefix, name, arglist, retann = m.groups()
# print('prefix, name, arglist, retann =', prefix, name, arglist, retann)
# > prefix, name, arglist, retann = None com_google_fonts_check_all_glyphs_have_codepoints ttFont None
# determine module and class name (if applicable), as well as full name
modname = self.options.get('module', self.env.ref_context.get('py:module'))
classname = self.env.ref_context.get('py:class')
if classname:
add_module = False
if prefix and (prefix == classname or
prefix.startswith(classname + ".")):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname):].lstrip('.')
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
add_module = True
if prefix:
classname = prefix.rstrip('.')
fullname = prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
signode.attributes['lineno'] = lineno
#sig_prefix = self.get_signature_prefix(sig)
#if sig_prefix:
# signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif add_module and self.env.config.add_module_names:
if modname and modname != 'exceptions':
# exceptions are a special case, since they are documented in the
# 'exceptions' module.
#nodetext = modname + ' ID: '
#signode += addnodes.desc_addname(nodetext, nodetext)
pass
signode += addnodes.desc_name(name, cid)
if arglist:
_pseudo_parse_arglist(signode, arglist)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
anno = self.options.get('annotation')
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return cid, prefix
def handle_signature(self, sig, signode):
# print('>>>>>>>>>>>>>>>>>handle_signature:', sig, signode)
# > >>>>>>>>>>>>>>>>>handle_signature: com.google.fonts/check/all_glyphs_have_codepoints:::36:::com_google_fonts_check_all_glyphs_have_codepoints(ttFont) <desc_signature first="False"/>
cid = None
if ':::' in sig:
cid, lineno, sig = sig.split(':::')
# print('GOT id:', cid, lineno, 'for:', sig)
# > GOT id: com.google.fonts/check/all_glyphs_have_codepoints 36 for: com_google_fonts_check_all_glyphs_have_codepoints(ttFont)
res = '(NONE!)'
try:
res = self._handle_signature(cid, lineno, sig, signode) if cid is not None\
else super().handle_signature(sig, signode)
except Exception as e:
print('!!!', e)
raise e
return res
# This ends in: path-to-docs/html/genindex.html
def get_index_text(self, modname, name):
# type: (str, Tuple[str, str]) -> str
return f'{name[0]} ({self.pretty_objtype} in {modname})'
# fontbakerycheck
# modname: fontbakery.profiles.cmap
# name_cls:('com_google_fonts_check_all_glyphs_have_codepoints', None)
# return f' {self.objtype} modname: {modname} name_cls:{name_cls}'
def add_target_and_index(self, name_cls, sig, signode):
# type: (Tuple[str, str], str, addnodes.desc_signature) -> None
modname = self.options.get('module', self.env.ref_context.get('py:module'))
# fullname = (modname and modname + '.' or '') + name_cls[0]
fullname = name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
# note, there will be a change to this in a future release
# https://github.com/sphinx-doc/sphinx/commit/259be8716ad4b2332aa4d7693d73400eb06fa7d7
## in the past (now)
objects = self.env.domaindata['py']['objects']
if fullname in objects:
self.state_machine.reporter.warning(
'duplicate object description of %s, ' % fullname +
'other instance in ' +
self.env.doc2path(objects[fullname][0]) +
', use :noindex: for one of them',
line=self.lineno)
objects[fullname] = (self.env.docname, self.objtype)
## in the future
# domain = cast(PythonDomain, self.env.get_domain('py'))
# domain.note_object(fullname, self.objtype)
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single',
indextext,
fullname,
'',
None))
# Copied a lot from napoleon extension:
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/napoleon/__init__.py
# To get started, hooking into autodoc seems the way to go, hence that was
# a good fit.
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
the ``setup()`` function, which in turn notifies Sphinx of everything
the extension offers.
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
See Also
--------
`The Sphinx documentation on Extensions
<http://sphinx-doc.org/extensions.html>`_
`The Extension Tutorial <http://sphinx-doc.org/extdev/tutorial.html>`_
`The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_
"""
if not isinstance(app, Sphinx):
# probably called by tests
return {'version': __version__, 'parallel_read_safe': True}
# _patch_python_domain()
#=> this:
app.add_autodocumenter(FontBakeryCallableDocumenter)
app.add_autodocumenter(FontBakeryCheckDocumenter)
app.add_autodocumenter(FontBakeryConditionDocumenter)
# https://github.com/sphinx-doc/sphinx/blob/master/sphinx/domains/python.py
app.add_directive_to_domain('py', 'fontbakerycallable', PyFontBakeryObject, override=False)
app.add_directive_to_domain('py', 'fontbakerycheck', PyFontBakeryObject, override=False)
app.add_directive_to_domain('py', 'fontbakerycondition', PyFontBakeryObject, override=False)
# => see e.g.: https://github.com/sphinx-doc/sphinx/blob/master/sphinx/ext/autodoc/__init__.py#L984
app.setup_extension('sphinx.ext.autodoc')
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
#for name, (default, rebuild) in Config._config_values.items():
# app.add_config_value(name, default, rebuild)
return {'version': __version__, 'parallel_read_safe': True}
def _skip_member(app, what, name, obj, skip, options):
# type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
members or init methods are included in the generated documentation:
* ``napoleon_include_init_with_doc`` --
include init methods if they have docstrings
* ``napoleon_include_private_with_doc`` --
include private members if they have docstrings
* ``napoleon_include_special_with_doc`` --
include special members if they have docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
what : str
A string specifying the type of the object to which the member
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The name of the member.
obj : module, class, exception, function, method, or attribute.
For example, if the member is the __init__ method of class A, then
`obj` will be `A.__init__`.
skip : bool
A boolean indicating if autodoc will skip this member if `_skip_member`
does not override the decision
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Returns
-------
bool
True if the member should be skipped during creation of the docs,
False if it should be included in the docs.
"""
if name in ['check_skip_filter',
'conditions',
'configs',
'description',
'documentation',
'force',
'id',
'is_librebarcode',
'name',
'proposal',
'rationale',
'severity']:
return True
else:
return None
def _process_docstring(app, what, name, obj, options, lines):
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
of docstring lines that `_process_docstring` modifies in place to change
what Sphinx outputs.
The following settings in conf.py control what styles of docstrings will
be parsed:
* ``napoleon_google_docstring`` -- parse Google style docstrings
* ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process.
what : str
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
lines : list of str
The lines of the docstring, see above.
.. note:: `lines` is modified *in place*
"""
if hasattr(obj, 'rationale') and obj.rationale:
lines.append("**Rationale:**")
for line in obj.rationale.split('\n'):
lines.append(line)
if hasattr(obj, 'proposal') and obj.proposal:
proposal = obj.proposal
if not isinstance(obj.proposal, list):
proposal = [obj.proposal]
proposals = [p for p in proposal if "legacy:" not in p]
legacy_name = [p.split('legacy:')[1] for p in proposal if "legacy:" in p]
if legacy_name:
lines.append(f"**Legacy check** originally simply called '{legacy_name[0]}'."
f" We used to lack richer metadata back in 2015. We're open to"
f" further improvements to this description.")
else:
if proposals:
lines.append(f"**Originally proposed at** {proposals.pop(0)}")
if proposals:
proposals = ' / '.join(proposals)
lines.append(f"**Some additional changes** were proposed at {proposals}")
| googlefonts/fontbakery | Lib/fontbakery/sphinx_extensions/profile.py | Python | apache-2.0 | 24,923 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monorail client."""
import json
from google.oauth2 import service_account
from google.auth.transport import requests as google_requests
import requests
_API_BASE = 'https://api-dot-monorail-prod.appspot.com/prpc'
_TARGET_AUDIENCE = 'https://monorail-prod.appspot.com'
_XSSI_PREFIX = ')]}\'\n'
class Client:
"""Monorail client."""
def __init__(self, project, service_account_info):
self.project = project
self._service_account_info = service_account_info
def get_issue(self, issue_id):
"""Get issue data."""
credentials = service_account.IDTokenCredentials.from_service_account_info(
self._service_account_info, target_audience=_TARGET_AUDIENCE)
credentials.refresh(google_requests.Request())
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
credentials.apply(headers)
url = f'{_API_BASE}/monorail.v3.Issues/GetIssue'
body = {'name': f'projects/{self.project}/issues/{issue_id}'}
resp = requests.post(url, json=body, headers=headers)
resp.raise_for_status()
result = resp.text
if result.startswith(_XSSI_PREFIX):
result = result[len(_XSSI_PREFIX):]
return json.loads(result)
| google/osv | gcp/appengine/monorail.py | Python | apache-2.0 | 1,791 |
# Copyright 2016-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions for performing actions on rooms."""
import itertools
import logging
import math
import random
import string
from collections import OrderedDict
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Collection,
Dict,
List,
Optional,
Tuple,
)
import attr
from typing_extensions import TypedDict
from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
HistoryVisibility,
JoinRules,
Membership,
RoomCreationPreset,
RoomEncryptionAlgorithms,
RoomTypes,
)
from synapse.api.errors import (
AuthError,
Codes,
HttpResponseException,
LimitExceededError,
NotFoundError,
StoreError,
SynapseError,
)
from synapse.api.filtering import Filter
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_power_levels_contents
from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.federation import get_domains_from_state
from synapse.rest.admin._base import assert_user_is_admin
from synapse.storage.databases.main.relations import BundledAggregations
from synapse.storage.state import StateFilter
from synapse.streams import EventSource
from synapse.types import (
JsonDict,
MutableStateMap,
Requester,
RoomAlias,
RoomID,
RoomStreamToken,
StateMap,
StreamToken,
UserID,
create_requester,
)
from synapse.util import stringutils
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import parse_and_validate_server_name
from synapse.visibility import filter_events_for_client
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
FIVE_MINUTES_IN_MS = 5 * 60 * 1000
@attr.s(slots=True, frozen=True, auto_attribs=True)
class EventContext:
events_before: List[EventBase]
event: EventBase
events_after: List[EventBase]
state: List[EventBase]
aggregations: Dict[str, BundledAggregations]
start: str
end: str
class RoomCreationHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.hs = hs
self.spam_checker = hs.get_spam_checker()
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self.config = hs.config
self.request_ratelimiter = hs.get_request_ratelimiter()
# Room state based off defined presets
self._presets_dict: Dict[str, Dict[str, Any]] = {
RoomCreationPreset.PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": False,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.TRUSTED_PRIVATE_CHAT: {
"join_rules": JoinRules.INVITE,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": True,
"guest_can_join": True,
"power_level_content_override": {"invite": 0},
},
RoomCreationPreset.PUBLIC_CHAT: {
"join_rules": JoinRules.PUBLIC,
"history_visibility": HistoryVisibility.SHARED,
"original_invitees_have_ops": False,
"guest_can_join": False,
"power_level_content_override": {},
},
}
# Modify presets to selectively enable encryption by default per homeserver config
for preset_name, preset_config in self._presets_dict.items():
encrypted = (
preset_name
in self.config.room.encryption_enabled_by_default_for_room_presets
)
preset_config["encrypted"] = encrypted
self._replication = hs.get_replication_data_handler()
# linearizer to stop two upgrades happening at once
self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
# If a user tries to update the same room multiple times in quick
# succession, only process the first attempt and return its result to
# subsequent requests
self._upgrade_response_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "room_upgrade", timeout_ms=FIVE_MINUTES_IN_MS
)
self._server_notices_mxid = hs.config.servernotices.server_notices_mxid
self.third_party_event_rules = hs.get_third_party_event_rules()
async def upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
) -> str:
"""Replace a room with a new room with a different version
Args:
requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced
new_version: the new room version to use
Returns:
the new room id
Raises:
ShadowBanError if the requester is shadow-banned.
"""
await self.request_ratelimiter.ratelimit(requester)
user_id = requester.user.to_string()
# Check if this room is already being upgraded by another person
for key in self._upgrade_response_cache.keys():
if key[0] == old_room_id and key[1] != user_id:
# Two different people are trying to upgrade the same room.
# Send the second an error.
#
# Note that this of course only gets caught if both users are
# on the same homeserver.
raise SynapseError(
400, "An upgrade for this room is currently in progress"
)
# Upgrade the room
#
# If this user has sent multiple upgrade requests for the same room
# and one of them is not complete yet, cache the response and
# return it to all subsequent requests
ret = await self._upgrade_response_cache.wrap(
(old_room_id, user_id),
self._upgrade_room,
requester,
old_room_id,
new_version, # args for _upgrade_room
)
return ret
async def _upgrade_room(
self, requester: Requester, old_room_id: str, new_version: RoomVersion
) -> str:
"""
Args:
requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced
new_versions: the version to upgrade the room to
Raises:
ShadowBanError if the requester is shadow-banned.
"""
user_id = requester.user.to_string()
assert self.hs.is_mine_id(user_id), "User must be our own: %s" % (user_id,)
# start by allocating a new room id
r = await self.store.get_room(old_room_id)
if r is None:
raise NotFoundError("Unknown room id %s" % (old_room_id,))
new_room_id = await self._generate_room_id(
creator_id=user_id,
is_public=r["is_public"],
room_version=new_version,
)
logger.info("Creating new room %s to replace %s", new_room_id, old_room_id)
# we create and auth the tombstone event before properly creating the new
# room, to check our user has perms in the old room.
(
tombstone_event,
tombstone_context,
) = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Tombstone,
"state_key": "",
"room_id": old_room_id,
"sender": user_id,
"content": {
"body": "This room has been replaced",
"replacement_room": new_room_id,
},
},
)
old_room_version = await self.store.get_room_version(old_room_id)
validate_event_for_room_version(old_room_version, tombstone_event)
await self._event_auth_handler.check_auth_rules_from_context(
old_room_version, tombstone_event, tombstone_context
)
await self.clone_existing_room(
requester,
old_room_id=old_room_id,
new_room_id=new_room_id,
new_room_version=new_version,
tombstone_event_id=tombstone_event.event_id,
)
# now send the tombstone
await self.event_creation_handler.handle_new_client_event(
requester=requester,
event=tombstone_event,
context=tombstone_context,
)
old_room_state = await tombstone_context.get_current_state_ids()
# We know the tombstone event isn't an outlier so it has current state.
assert old_room_state is not None
# update any aliases
await self._move_aliases_to_new_room(
requester, old_room_id, new_room_id, old_room_state
)
# Copy over user push rules, tags and migrate room directory state
await self.room_member_handler.transfer_room_state_on_room_upgrade(
old_room_id, new_room_id
)
# finally, shut down the PLs in the old room, and update them in the new
# room.
await self._update_upgraded_room_pls(
requester,
old_room_id,
new_room_id,
old_room_state,
)
return new_room_id
async def _update_upgraded_room_pls(
self,
requester: Requester,
old_room_id: str,
new_room_id: str,
old_room_state: StateMap[str],
) -> None:
"""Send updated power levels in both rooms after an upgrade
Args:
requester: the user requesting the upgrade
old_room_id: the id of the room to be replaced
new_room_id: the id of the replacement room
old_room_state: the state map for the old room
Raises:
ShadowBanError if the requester is shadow-banned.
"""
old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, ""))
if old_room_pl_event_id is None:
logger.warning(
"Not supported: upgrading a room with no PL event. Not setting PLs "
"in old room."
)
return
old_room_pl_state = await self.store.get_event(old_room_pl_event_id)
# we try to stop regular users from speaking by setting the PL required
# to send regular events and invites to 'Moderator' level. That's normally
# 50, but if the default PL in a room is 50 or more, then we set the
# required PL above that.
pl_content = dict(old_room_pl_state.content)
users_default = int(pl_content.get("users_default", 0))
restricted_level = max(users_default + 1, 50)
updated = False
for v in ("invite", "events_default"):
current = int(pl_content.get(v, 0))
if current < restricted_level:
logger.debug(
"Setting level for %s in %s to %i (was %i)",
v,
old_room_id,
restricted_level,
current,
)
pl_content[v] = restricted_level
updated = True
else:
logger.debug("Not setting level for %s (already %i)", v, current)
if updated:
try:
await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"room_id": old_room_id,
"sender": requester.user.to_string(),
"content": pl_content,
},
ratelimit=False,
)
except AuthError as e:
logger.warning("Unable to update PLs in old room: %s", e)
await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.PowerLevels,
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": old_room_pl_state.content,
},
ratelimit=False,
)
async def clone_existing_room(
self,
requester: Requester,
old_room_id: str,
new_room_id: str,
new_room_version: RoomVersion,
tombstone_event_id: str,
) -> None:
"""Populate a new room based on an old room
Args:
requester: the user requesting the upgrade
old_room_id : the id of the room to be replaced
new_room_id: the id to give the new room (should already have been
created with _gemerate_room_id())
new_room_version: the new room version to use
tombstone_event_id: the ID of the tombstone event in the old room.
"""
user_id = requester.user.to_string()
if not await self.spam_checker.user_may_create_room(user_id):
raise SynapseError(
403, "You are not permitted to create rooms", Codes.FORBIDDEN
)
creation_content: JsonDict = {
"room_version": new_room_version.identifier,
"predecessor": {"room_id": old_room_id, "event_id": tombstone_event_id},
}
# Check if old room was non-federatable
# Get old room's create event
old_room_create_event = await self.store.get_create_event_for_room(old_room_id)
# Check if the create event specified a non-federatable room
if not old_room_create_event.content.get(EventContentFields.FEDERATE, True):
# If so, mark the new room as non-federatable as well
creation_content[EventContentFields.FEDERATE] = False
initial_state = {}
# Replicate relevant room events
types_to_copy: List[Tuple[str, Optional[str]]] = [
(EventTypes.JoinRules, ""),
(EventTypes.Name, ""),
(EventTypes.Topic, ""),
(EventTypes.RoomHistoryVisibility, ""),
(EventTypes.GuestAccess, ""),
(EventTypes.RoomAvatar, ""),
(EventTypes.RoomEncryption, ""),
(EventTypes.ServerACL, ""),
(EventTypes.RelatedGroups, ""),
(EventTypes.PowerLevels, ""),
]
# If the old room was a space, copy over the room type and the rooms in
# the space.
if (
old_room_create_event.content.get(EventContentFields.ROOM_TYPE)
== RoomTypes.SPACE
):
creation_content[EventContentFields.ROOM_TYPE] = RoomTypes.SPACE
types_to_copy.append((EventTypes.SpaceChild, None))
old_room_state_ids = await self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types(types_to_copy)
)
# map from event_id to BaseEvent
old_room_state_events = await self.store.get_events(old_room_state_ids.values())
for k, old_event_id in old_room_state_ids.items():
old_event = old_room_state_events.get(old_event_id)
if old_event:
# If the event is an space child event with empty content, it was
# removed from the space and should be ignored.
if k[0] == EventTypes.SpaceChild and not old_event.content:
continue
initial_state[k] = old_event.content
# deep-copy the power-levels event before we start modifying it
# note that if frozen_dicts are enabled, `power_levels` will be a frozen
# dict so we can't just copy.deepcopy it.
initial_state[
(EventTypes.PowerLevels, "")
] = power_levels = copy_power_levels_contents(
initial_state[(EventTypes.PowerLevels, "")]
)
# Resolve the minimum power level required to send any state event
# We will give the upgrading user this power level temporarily (if necessary) such that
# they are able to copy all of the state events over, then revert them back to their
# original power level afterwards in _update_upgraded_room_pls
# Copy over user power levels now as this will not be possible with >100PL users once
# the room has been created
# Calculate the minimum power level needed to clone the room
event_power_levels = power_levels.get("events", {})
if not isinstance(event_power_levels, dict):
event_power_levels = {}
state_default = power_levels.get("state_default", 50)
try:
state_default_int = int(state_default) # type: ignore[arg-type]
except (TypeError, ValueError):
state_default_int = 50
ban = power_levels.get("ban", 50)
try:
ban = int(ban) # type: ignore[arg-type]
except (TypeError, ValueError):
ban = 50
needed_power_level = max(
state_default_int, ban, max(event_power_levels.values())
)
# Get the user's current power level, this matches the logic in get_user_power_level,
# but without the entire state map.
user_power_levels = power_levels.setdefault("users", {})
if not isinstance(user_power_levels, dict):
user_power_levels = {}
users_default = power_levels.get("users_default", 0)
current_power_level = user_power_levels.get(user_id, users_default)
try:
current_power_level_int = int(current_power_level) # type: ignore[arg-type]
except (TypeError, ValueError):
current_power_level_int = 0
# Raise the requester's power level in the new room if necessary
if current_power_level_int < needed_power_level:
user_power_levels[user_id] = needed_power_level
await self._send_events_for_new_room(
requester,
new_room_id,
# we expect to override all the presets with initial_state, so this is
# somewhat arbitrary.
preset_config=RoomCreationPreset.PRIVATE_CHAT,
invite_list=[],
initial_state=initial_state,
creation_content=creation_content,
ratelimit=False,
)
# Transfer membership events
old_room_member_state_ids = await self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types([(EventTypes.Member, None)])
)
# map from event_id to BaseEvent
old_room_member_state_events = await self.store.get_events(
old_room_member_state_ids.values()
)
for old_event in old_room_member_state_events.values():
# Only transfer ban events
if (
"membership" in old_event.content
and old_event.content["membership"] == "ban"
):
await self.room_member_handler.update_membership(
requester,
UserID.from_string(old_event.state_key),
new_room_id,
"ban",
ratelimit=False,
content=old_event.content,
)
# XXX invites/joins
# XXX 3pid invites
async def _move_aliases_to_new_room(
self,
requester: Requester,
old_room_id: str,
new_room_id: str,
old_room_state: StateMap[str],
) -> None:
# check to see if we have a canonical alias.
canonical_alias_event = None
canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event_id:
canonical_alias_event = await self.store.get_event(canonical_alias_event_id)
await self.store.update_aliases_for_room(old_room_id, new_room_id)
if not canonical_alias_event:
return
# If there is a canonical alias we need to update the one in the old
# room and set one in the new one.
old_canonical_alias_content = dict(canonical_alias_event.content)
new_canonical_alias_content = {}
canonical = canonical_alias_event.content.get("alias")
if canonical and self.hs.is_mine_id(canonical):
new_canonical_alias_content["alias"] = canonical
old_canonical_alias_content.pop("alias", None)
# We convert to a list as it will be a Tuple.
old_alt_aliases = list(old_canonical_alias_content.get("alt_aliases", []))
if old_alt_aliases:
old_canonical_alias_content["alt_aliases"] = old_alt_aliases
new_alt_aliases = new_canonical_alias_content.setdefault("alt_aliases", [])
for alias in canonical_alias_event.content.get("alt_aliases", []):
try:
if self.hs.is_mine_id(alias):
new_alt_aliases.append(alias)
old_alt_aliases.remove(alias)
except Exception:
logger.info(
"Invalid alias %s in canonical alias event %s",
alias,
canonical_alias_event_id,
)
if not old_alt_aliases:
old_canonical_alias_content.pop("alt_aliases")
# If a canonical alias event existed for the old room, fire a canonical
# alias event for the new room with a copy of the information.
try:
await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": old_room_id,
"sender": requester.user.to_string(),
"content": old_canonical_alias_content,
},
ratelimit=False,
)
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
logger.error("Unable to send updated alias events in old room: %s", e)
try:
await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.CanonicalAlias,
"state_key": "",
"room_id": new_room_id,
"sender": requester.user.to_string(),
"content": new_canonical_alias_content,
},
ratelimit=False,
)
except SynapseError as e:
# again I'm not really expecting this to fail, but if it does, I'd rather
# we returned the new room to the client at this point.
logger.error("Unable to send updated alias events in new room: %s", e)
async def create_room(
self,
requester: Requester,
config: JsonDict,
ratelimit: bool = True,
creator_join_profile: Optional[JsonDict] = None,
) -> Tuple[dict, int]:
"""Creates a new room.
Args:
requester:
The user who requested the room creation.
config : A dict of configuration options.
ratelimit: set to False to disable the rate limiter
creator_join_profile:
Set to override the displayname and avatar for the creating
user in this room. If unset, displayname and avatar will be
derived from the user's profile. If set, should contain the
values to go in the body of the 'join' event (typically
`avatar_url` and/or `displayname`.
Returns:
First, a dict containing the keys `room_id` and, if an alias
was, requested, `room_alias`. Secondly, the stream_id of the
last persisted event.
Raises:
SynapseError if the room ID couldn't be stored, or something went
horribly wrong.
ResourceLimitError if server is blocked to some resource being
exceeded
"""
user_id = requester.user.to_string()
await self.auth.check_auth_blocking(requester=requester)
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to create rooms
is_requester_admin = True
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
# Let the third party rules modify the room creation config if needed, or abort
# the room creation entirely with an exception.
await self.third_party_event_rules.on_create_room(
requester, config, is_requester_admin=is_requester_admin
)
invite_3pid_list = config.get("invite_3pid", [])
invite_list = config.get("invite", [])
if not is_requester_admin and not (
await self.spam_checker.user_may_create_room(user_id)
):
raise SynapseError(
403, "You are not permitted to create rooms", Codes.FORBIDDEN
)
if ratelimit:
await self.request_ratelimiter.ratelimit(requester)
room_version_id = config.get(
"room_version", self.config.server.default_room_version.identifier
)
if not isinstance(room_version_id, str):
raise SynapseError(400, "room_version must be a string", Codes.BAD_JSON)
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if room_version is None:
raise SynapseError(
400,
"Your homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
room_alias = None
if "room_alias_name" in config:
for wchar in string.whitespace:
if wchar in config["room_alias_name"]:
raise SynapseError(400, "Invalid characters in room alias")
room_alias = RoomAlias(config["room_alias_name"], self.hs.hostname)
mapping = await self.store.get_association_from_room_alias(room_alias)
if mapping:
raise SynapseError(400, "Room alias already taken", Codes.ROOM_IN_USE)
for i in invite_list:
try:
uid = UserID.from_string(i)
parse_and_validate_server_name(uid.domain)
except Exception:
raise SynapseError(400, "Invalid user_id: %s" % (i,))
if (invite_list or invite_3pid_list) and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
# Allow the request to go through, but remove any associated invites.
invite_3pid_list = []
invite_list = []
if invite_list or invite_3pid_list:
try:
# If there are invites in the request, see if the ratelimiting settings
# allow that number of invites to be sent from the current user.
await self.room_member_handler.ratelimit_multiple_invites(
requester,
room_id=None,
n_invites=len(invite_list) + len(invite_3pid_list),
update=False,
)
except LimitExceededError:
raise SynapseError(400, "Cannot invite so many users at once")
await self.event_creation_handler.assert_accepted_privacy_policy(requester)
power_level_content_override = config.get("power_level_content_override")
if (
power_level_content_override
and "users" in power_level_content_override
and user_id not in power_level_content_override["users"]
):
raise SynapseError(
400,
"Not a valid power_level_content_override: 'users' did not contain %s"
% (user_id,),
)
visibility = config.get("visibility", None)
is_public = visibility == "public"
room_id = await self._generate_room_id(
creator_id=user_id,
is_public=is_public,
room_version=room_version,
)
# Check whether this visibility value is blocked by a third party module
allowed_by_third_party_rules = await (
self.third_party_event_rules.check_visibility_can_be_modified(
room_id, visibility
)
)
if not allowed_by_third_party_rules:
raise SynapseError(403, "Room visibility value not allowed.")
if is_public:
room_aliases = []
if room_alias:
room_aliases.append(room_alias.to_string())
if not self.config.roomdirectory.is_publishing_room_allowed(
user_id, room_id, room_aliases
):
# Let's just return a generic message, as there may be all sorts of
# reasons why we said no. TODO: Allow configurable error messages
# per alias creation rule?
raise SynapseError(403, "Not allowed to publish room")
directory_handler = self.hs.get_directory_handler()
if room_alias:
await directory_handler.create_association(
requester=requester,
room_id=room_id,
room_alias=room_alias,
servers=[self.hs.hostname],
check_membership=False,
)
preset_config = config.get(
"preset",
RoomCreationPreset.PRIVATE_CHAT
if visibility == "private"
else RoomCreationPreset.PUBLIC_CHAT,
)
raw_initial_state = config.get("initial_state", [])
initial_state = OrderedDict()
for val in raw_initial_state:
initial_state[(val["type"], val.get("state_key", ""))] = val["content"]
creation_content = config.get("creation_content", {})
# override any attempt to set room versions via the creation_content
creation_content["room_version"] = room_version.identifier
last_stream_id = await self._send_events_for_new_room(
requester,
room_id,
preset_config=preset_config,
invite_list=invite_list,
initial_state=initial_state,
creation_content=creation_content,
room_alias=room_alias,
power_level_content_override=power_level_content_override,
creator_join_profile=creator_join_profile,
ratelimit=ratelimit,
)
if "name" in config:
name = config["name"]
(
_,
last_stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Name,
"room_id": room_id,
"sender": user_id,
"state_key": "",
"content": {"name": name},
},
ratelimit=False,
)
if "topic" in config:
topic = config["topic"]
(
_,
last_stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.Topic,
"room_id": room_id,
"sender": user_id,
"state_key": "",
"content": {"topic": topic},
},
ratelimit=False,
)
# we avoid dropping the lock between invites, as otherwise joins can
# start coming in and making the createRoom slow.
#
# we also don't need to check the requester's shadow-ban here, as we
# have already done so above (and potentially emptied invite_list).
with (await self.room_member_handler.member_linearizer.queue((room_id,))):
content = {}
is_direct = config.get("is_direct", None)
if is_direct:
content["is_direct"] = is_direct
for invitee in invite_list:
(
_,
last_stream_id,
) = await self.room_member_handler.update_membership_locked(
requester,
UserID.from_string(invitee),
room_id,
"invite",
ratelimit=False,
content=content,
new_room=True,
)
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
id_access_token = invite_3pid.get("id_access_token") # optional
address = invite_3pid["address"]
medium = invite_3pid["medium"]
# Note that do_3pid_invite can raise a ShadowBanError, but this was
# handled above by emptying invite_3pid_list.
last_stream_id = await self.hs.get_room_member_handler().do_3pid_invite(
room_id,
requester.user,
medium,
address,
id_server,
requester,
txn_id=None,
id_access_token=id_access_token,
)
result = {"room_id": room_id}
if room_alias:
result["room_alias"] = room_alias.to_string()
# Always wait for room creation to propagate before returning
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(room_id),
"events",
last_stream_id,
)
return result, last_stream_id
async def _send_events_for_new_room(
self,
creator: Requester,
room_id: str,
preset_config: str,
invite_list: List[str],
initial_state: MutableStateMap,
creation_content: JsonDict,
room_alias: Optional[RoomAlias] = None,
power_level_content_override: Optional[JsonDict] = None,
creator_join_profile: Optional[JsonDict] = None,
ratelimit: bool = True,
) -> int:
"""Sends the initial events into a new room.
`power_level_content_override` doesn't apply when initial state has
power level state event content.
Returns:
The stream_id of the last event persisted.
"""
creator_id = creator.user.to_string()
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content}
e.update(event_keys)
e.update(kwargs)
return e
async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
event = create(etype, content, **kwargs)
logger.debug("Sending %s in new room", etype)
# Allow these events to be sent even if the user is shadow-banned to
# allow the room creation to complete.
(
_,
last_stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
creator,
event,
ratelimit=False,
ignore_shadow_ban=True,
)
return last_stream_id
try:
config = self._presets_dict[preset_config]
except KeyError:
raise SynapseError(
400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON
)
creation_content.update({"creator": creator_id})
await send(etype=EventTypes.Create, content=creation_content)
logger.debug("Sending %s in new room", EventTypes.Member)
await self.room_member_handler.update_membership(
creator,
creator.user,
room_id,
"join",
ratelimit=ratelimit,
content=creator_join_profile,
new_room=True,
)
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
last_sent_stream_id = await send(
etype=EventTypes.PowerLevels, content=pl_content
)
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
"users_default": 0,
"events": {
EventTypes.Name: 50,
EventTypes.PowerLevels: 100,
EventTypes.RoomHistoryVisibility: 100,
EventTypes.CanonicalAlias: 50,
EventTypes.RoomAvatar: 50,
EventTypes.Tombstone: 100,
EventTypes.ServerACL: 100,
EventTypes.RoomEncryption: 100,
},
"events_default": 0,
"state_default": 50,
"ban": 50,
"kick": 50,
"redact": 50,
"invite": 50,
"historical": 100,
}
if config["original_invitees_have_ops"]:
for invitee in invite_list:
power_level_content["users"][invitee] = 100
# Power levels overrides are defined per chat preset
power_level_content.update(config["power_level_content_override"])
if power_level_content_override:
power_level_content.update(power_level_content_override)
last_sent_stream_id = await send(
etype=EventTypes.PowerLevels, content=power_level_content
)
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.CanonicalAlias,
content={"alias": room_alias.to_string()},
)
if (EventTypes.JoinRules, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]}
)
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.RoomHistoryVisibility,
content={"history_visibility": config["history_visibility"]},
)
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.GuestAccess,
content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
)
for (etype, state_key), content in initial_state.items():
last_sent_stream_id = await send(
etype=etype, state_key=state_key, content=content
)
if config["encrypted"]:
last_sent_stream_id = await send(
etype=EventTypes.RoomEncryption,
state_key="",
content={"algorithm": RoomEncryptionAlgorithms.DEFAULT},
)
return last_sent_stream_id
async def _generate_room_id(
self,
creator_id: str,
is_public: bool,
room_version: RoomVersion,
) -> str:
# autogen room IDs and try to create it. We may clash, so just
# try a few times till one goes through, giving up eventually.
attempts = 0
while attempts < 5:
try:
random_string = stringutils.random_string(18)
gen_room_id = RoomID(random_string, self.hs.hostname).to_string()
await self.store.store_room(
room_id=gen_room_id,
room_creator_user_id=creator_id,
is_public=is_public,
room_version=room_version,
)
return gen_room_id
except StoreError:
attempts += 1
raise StoreError(500, "Couldn't generate a room ID.")
class RoomContextHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
async def get_event_context(
self,
requester: Requester,
room_id: str,
event_id: str,
limit: int,
event_filter: Optional[Filter],
use_admin_priviledge: bool = False,
) -> Optional[EventContext]:
"""Retrieves events, pagination tokens and state around a given event
in a room.
Args:
requester
room_id
event_id
limit: The maximum number of events to return in total
(excluding state).
event_filter: the filter to apply to the events returned
(excluding the target event_id)
use_admin_priviledge: if `True`, return all events, regardless
of whether `user` has access to them. To be used **ONLY**
from the admin API.
Returns:
dict, or None if the event isn't found
"""
user = requester.user
if use_admin_priviledge:
await assert_user_is_admin(self.auth, requester.user)
before_limit = math.floor(limit / 2.0)
after_limit = limit - before_limit
users = await self.store.get_users_in_room(room_id)
is_peeking = user.to_string() not in users
async def filter_evts(events: List[EventBase]) -> List[EventBase]:
if use_admin_priviledge:
return events
return await filter_events_for_client(
self.storage, user.to_string(), events, is_peeking=is_peeking
)
event = await self.store.get_event(
event_id, get_prev_content=True, allow_none=True
)
if not event:
return None
filtered = await filter_evts([event])
if not filtered:
raise AuthError(403, "You don't have permission to access that event.")
results = await self.store.get_events_around(
room_id, event_id, before_limit, after_limit, event_filter
)
events_before = results.events_before
events_after = results.events_after
if event_filter:
events_before = await event_filter.filter(events_before)
events_after = await event_filter.filter(events_after)
events_before = await filter_evts(events_before)
events_after = await filter_evts(events_after)
# filter_evts can return a pruned event in case the user is allowed to see that
# there's something there but not see the content, so use the event that's in
# `filtered` rather than the event we retrieved from the datastore.
event = filtered[0]
# Fetch the aggregations.
aggregations = await self.store.get_bundled_aggregations(
itertools.chain(events_before, (event,), events_after),
user.to_string(),
)
if events_after:
last_event_id = events_after[-1].event_id
else:
last_event_id = event_id
if event_filter and event_filter.lazy_load_members:
state_filter = StateFilter.from_lazy_load_member_list(
ev.sender
for ev in itertools.chain(
events_before,
(event,),
events_after,
)
)
else:
state_filter = StateFilter.all()
# XXX: why do we return the state as of the last event rather than the
# first? Shouldn't we be consistent with /sync?
# https://github.com/matrix-org/matrix-doc/issues/687
state = await self.state_store.get_state_for_events(
[last_event_id], state_filter=state_filter
)
state_events = list(state[last_event_id].values())
if event_filter:
state_events = await event_filter.filter(state_events)
# We use a dummy token here as we only care about the room portion of
# the token, which we replace.
token = StreamToken.START
return EventContext(
events_before=events_before,
event=event,
events_after=events_after,
state=await filter_evts(state_events),
aggregations=aggregations,
start=await token.copy_and_replace("room_key", results.start).to_string(
self.store
),
end=await token.copy_and_replace("room_key", results.end).to_string(
self.store
),
)
class TimestampLookupHandler:
def __init__(self, hs: "HomeServer"):
self.server_name = hs.hostname
self.store = hs.get_datastores().main
self.state_handler = hs.get_state_handler()
self.federation_client = hs.get_federation_client()
async def get_event_for_timestamp(
self,
requester: Requester,
room_id: str,
timestamp: int,
direction: str,
) -> Tuple[str, int]:
"""Find the closest event to the given timestamp in the given direction.
If we can't find an event locally or the event we have locally is next to a gap,
it will ask other federated homeservers for an event.
Args:
requester: The user making the request according to the access token
room_id: Room to fetch the event from
timestamp: The point in time (inclusive) we should navigate from in
the given direction to find the closest event.
direction: ["f"|"b"] to indicate whether we should navigate forward
or backward from the given timestamp to find the closest event.
Returns:
A tuple containing the `event_id` closest to the given timestamp in
the given direction and the `origin_server_ts`.
Raises:
SynapseError if unable to find any event locally in the given direction
"""
local_event_id = await self.store.get_event_id_for_timestamp(
room_id, timestamp, direction
)
logger.debug(
"get_event_for_timestamp: locally, we found event_id=%s closest to timestamp=%s",
local_event_id,
timestamp,
)
# Check for gaps in the history where events could be hiding in between
# the timestamp given and the event we were able to find locally
is_event_next_to_backward_gap = False
is_event_next_to_forward_gap = False
if local_event_id:
local_event = await self.store.get_event(
local_event_id, allow_none=False, allow_rejected=False
)
if direction == "f":
# We only need to check for a backward gap if we're looking forwards
# to ensure there is nothing in between.
is_event_next_to_backward_gap = (
await self.store.is_event_next_to_backward_gap(local_event)
)
elif direction == "b":
# We only need to check for a forward gap if we're looking backwards
# to ensure there is nothing in between
is_event_next_to_forward_gap = (
await self.store.is_event_next_to_forward_gap(local_event)
)
# If we found a gap, we should probably ask another homeserver first
# about more history in between
if (
not local_event_id
or is_event_next_to_backward_gap
or is_event_next_to_forward_gap
):
logger.debug(
"get_event_for_timestamp: locally, we found event_id=%s closest to timestamp=%s which is next to a gap in event history so we're asking other homeservers first",
local_event_id,
timestamp,
)
# Find other homeservers from the given state in the room
curr_state = await self.state_handler.get_current_state(room_id)
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name
]
# Loop through each homeserver candidate until we get a succesful response
for domain in likely_domains:
try:
remote_response = await self.federation_client.timestamp_to_event(
domain, room_id, timestamp, direction
)
logger.debug(
"get_event_for_timestamp: response from domain(%s)=%s",
domain,
remote_response,
)
# TODO: Do we want to persist this as an extremity?
# TODO: I think ideally, we would try to backfill from
# this event and run this whole
# `get_event_for_timestamp` function again to make sure
# they didn't give us an event from their gappy history.
remote_event_id = remote_response.event_id
origin_server_ts = remote_response.origin_server_ts
# Only return the remote event if it's closer than the local event
if not local_event or (
abs(origin_server_ts - timestamp)
< abs(local_event.origin_server_ts - timestamp)
):
return remote_event_id, origin_server_ts
except (HttpResponseException, InvalidResponseError) as ex:
# Let's not put a high priority on some other homeserver
# failing to respond or giving a random response
logger.debug(
"Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
domain,
type(ex).__name__,
ex,
ex.args,
)
except Exception as ex:
# But we do want to see some exceptions in our code
logger.warning(
"Failed to fetch /timestamp_to_event from %s because of exception(%s) %s args=%s",
domain,
type(ex).__name__,
ex,
ex.args,
)
if not local_event_id:
raise SynapseError(
404,
"Unable to find event from %s in direction %s" % (timestamp, direction),
errcode=Codes.NOT_FOUND,
)
return local_event_id, local_event.origin_server_ts
class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
async def get_new_events(
self,
user: UserID,
from_key: RoomStreamToken,
limit: Optional[int],
room_ids: Collection[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[EventBase], RoomStreamToken]:
# We just ignore the key for now.
to_key = self.get_current_key()
if from_key.topological:
logger.warning("Stream has topological part!!!! %r", from_key)
from_key = RoomStreamToken(None, from_key.stream)
app_service = self.store.get_app_service_by_user_id(user.to_string())
if app_service:
# We no longer support AS users using /sync directly.
# See https://github.com/matrix-org/matrix-doc/issues/1144
raise NotImplementedError()
else:
room_events = await self.store.get_membership_changes_for_user(
user.to_string(), from_key, to_key
)
room_to_events = await self.store.get_room_events_stream_for_rooms(
room_ids=room_ids,
from_key=from_key,
to_key=to_key,
limit=limit or 10,
order="ASC",
)
events = list(room_events)
events.extend(e for evs, _ in room_to_events.values() for e in evs)
events.sort(key=lambda e: e.internal_metadata.order)
if limit:
events[:] = events[:limit]
if events:
end_key = events[-1].internal_metadata.after
else:
end_key = to_key
return events, end_key
def get_current_key(self) -> RoomStreamToken:
return self.store.get_room_max_token()
def get_current_key_for_room(self, room_id: str) -> Awaitable[str]:
return self.store.get_room_events_max_id(room_id)
class ShutdownRoomResponse(TypedDict):
"""
Attributes:
kicked_users: An array of users (`user_id`) that were kicked.
failed_to_kick_users:
An array of users (`user_id`) that that were not kicked.
local_aliases:
An array of strings representing the local aliases that were
migrated from the old room to the new.
new_room_id: A string representing the room ID of the new room.
"""
kicked_users: List[str]
failed_to_kick_users: List[str]
local_aliases: List[str]
new_room_id: Optional[str]
class RoomShutdownHandler:
DEFAULT_MESSAGE = (
"Sharing illegal content on this server is not permitted and rooms in"
" violation will be blocked."
)
DEFAULT_ROOM_NAME = "Content Violation Notification"
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.room_member_handler = hs.get_room_member_handler()
self._room_creation_handler = hs.get_room_creation_handler()
self._replication = hs.get_replication_data_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
async def shutdown_room(
self,
room_id: str,
requester_user_id: str,
new_room_user_id: Optional[str] = None,
new_room_name: Optional[str] = None,
message: Optional[str] = None,
block: bool = False,
) -> ShutdownRoomResponse:
"""
Shuts down a room. Moves all local users and room aliases automatically
to a new room if `new_room_user_id` is set. Otherwise local users only
leave the room without any information.
The new room will be created with the user specified by the
`new_room_user_id` parameter as room administrator and will contain a
message explaining what happened. Users invited to the new room will
have power level `-10` by default, and thus be unable to speak.
The local server will only have the power to move local user and room
aliases to the new room. Users on other servers will be unaffected.
Args:
room_id: The ID of the room to shut down.
requester_user_id:
User who requested the action and put the room on the
blocking list.
new_room_user_id:
If set, a new room will be created with this user ID
as the creator and admin, and all users in the old room will be
moved into that room. If not set, no new room will be created
and the users will just be removed from the old room.
new_room_name:
A string representing the name of the room that new users will
be invited to. Defaults to `Content Violation Notification`
message:
A string containing the first message that will be sent as
`new_room_user_id` in the new room. Ideally this will clearly
convey why the original room was shut down.
Defaults to `Sharing illegal content on this server is not
permitted and rooms in violation will be blocked.`
block:
If set to `True`, users will be prevented from joining the old
room. This option can also be used to pre-emptively block a room,
even if it's unknown to this homeserver. In this case, the room
will be blocked, and no further action will be taken. If `False`,
attempting to delete an unknown room is invalid.
Defaults to `False`.
Returns: a dict containing the following keys:
kicked_users: An array of users (`user_id`) that were kicked.
failed_to_kick_users:
An array of users (`user_id`) that that were not kicked.
local_aliases:
An array of strings representing the local aliases that were
migrated from the old room to the new.
new_room_id:
A string representing the room ID of the new room, or None if
no such room was created.
"""
if not new_room_name:
new_room_name = self.DEFAULT_ROOM_NAME
if not message:
message = self.DEFAULT_MESSAGE
if not RoomID.is_valid(room_id):
raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
# Action the block first (even if the room doesn't exist yet)
if block:
# This will work even if the room is already blocked, but that is
# desirable in case the first attempt at blocking the room failed below.
await self.store.block_room(room_id, requester_user_id)
if not await self.store.get_room(room_id):
# if we don't know about the room, there is nothing left to do.
return {
"kicked_users": [],
"failed_to_kick_users": [],
"local_aliases": [],
"new_room_id": None,
}
if new_room_user_id is not None:
if not self.hs.is_mine_id(new_room_user_id):
raise SynapseError(
400, "User must be our own: %s" % (new_room_user_id,)
)
room_creator_requester = create_requester(
new_room_user_id, authenticated_entity=requester_user_id
)
info, stream_id = await self._room_creation_handler.create_room(
room_creator_requester,
config={
"preset": RoomCreationPreset.PUBLIC_CHAT,
"name": new_room_name,
"power_level_content_override": {"users_default": -10},
},
ratelimit=False,
)
new_room_id = info["room_id"]
logger.info(
"Shutting down room %r, joining to new room: %r", room_id, new_room_id
)
# We now wait for the create room to come back in via replication so
# that we can assume that all the joins/invites have propagated before
# we try and auto join below.
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(new_room_id),
"events",
stream_id,
)
else:
new_room_id = None
logger.info("Shutting down room %r", room_id)
users = await self.store.get_users_in_room(room_id)
kicked_users = []
failed_to_kick_users = []
for user_id in users:
if not self.hs.is_mine_id(user_id):
continue
logger.info("Kicking %r from %r...", user_id, room_id)
try:
# Kick users from room
target_requester = create_requester(
user_id, authenticated_entity=requester_user_id
)
_, stream_id = await self.room_member_handler.update_membership(
requester=target_requester,
target=target_requester.user,
room_id=room_id,
action=Membership.LEAVE,
content={},
ratelimit=False,
require_consent=False,
)
# Wait for leave to come in over replication before trying to forget.
await self._replication.wait_for_stream_position(
self.hs.config.worker.events_shard_config.get_instance(room_id),
"events",
stream_id,
)
await self.room_member_handler.forget(target_requester.user, room_id)
# Join users to new room
if new_room_user_id:
await self.room_member_handler.update_membership(
requester=target_requester,
target=target_requester.user,
room_id=new_room_id,
action=Membership.JOIN,
content={},
ratelimit=False,
require_consent=False,
)
kicked_users.append(user_id)
except Exception:
logger.exception(
"Failed to leave old room and join new room for %r", user_id
)
failed_to_kick_users.append(user_id)
# Send message in new room and move aliases
if new_room_user_id:
await self.event_creation_handler.create_and_send_nonmember_event(
room_creator_requester,
{
"type": "m.room.message",
"content": {"body": message, "msgtype": "m.text"},
"room_id": new_room_id,
"sender": new_room_user_id,
},
ratelimit=False,
)
aliases_for_room = await self.store.get_aliases_for_room(room_id)
await self.store.update_aliases_for_room(
room_id, new_room_id, requester_user_id
)
else:
aliases_for_room = []
return {
"kicked_users": kicked_users,
"failed_to_kick_users": failed_to_kick_users,
"local_aliases": aliases_for_room,
"new_room_id": new_room_id,
}
| matrix-org/synapse | synapse/handlers/room.py | Python | apache-2.0 | 64,472 |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
from datetime import datetime
import logging
from types import NoneType
from google.appengine.ext import ndb, deferred, db
from google.appengine.ext.ndb.query import Cursor
from typing import Optional, List, Union, Tuple
from mcfw.rpc import returns, arguments
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.jobs.matching import rebuild_matches_check_current
from rogerthat.bizz.jobs.notifications import calculate_next_reminder
from rogerthat.bizz.jobs.translations import localize as localize_jobs
from rogerthat.capi.jobs import newJobs
from rogerthat.consts import JOBS_WORKER_QUEUE
from rogerthat.dal.mobile import get_mobile_key_by_account
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import NdbUserProfile
from rogerthat.models.jobs import JobOffer, JobMatchingCriteria, JobMatchingCriteriaNotifications, JobMatch, \
JobMatchStatus, JobNotificationSchedule, JobOfferSourceType
from rogerthat.rpc import users
from rogerthat.rpc.models import RpcCAPICall, RpcException
from rogerthat.rpc.rpc import mapping, logError, CAPI_KEYWORD_ARG_PRIORITY, \
PRIORITY_HIGH
from rogerthat.service.api.messaging import add_chat_members
from rogerthat.to.jobs import GetJobsResponseTO, JobOfferTO, NewJobsResponseTO, \
NewJobsRequestTO, SaveJobsCriteriaResponseTO, GetJobsCriteriaResponseTO, \
JobKeyLabelTO, JobCriteriaLocationTO, JobCriteriaNotificationsTO, JobCriteriaGeoLocationTO, \
SaveJobsCriteriaRequestTO, JobOfferChatActionTO, JobOfferOpenActionTO, GetJobChatInfoResponseTO, JobChatAnonymousTO, \
CreateJobChatResponseTO, CreateJobChatRequestTO, JobsInfoTO, JobOfferProviderTO
from rogerthat.translations import localize
from rogerthat.utils import now, get_epoch_from_datetime
from rogerthat.utils.location import coordinates_to_city
from solutions.common.jobs.models import JobSolicitation
TAG_JOB_CHAT = '__rt__.jobs_chat'
CONTRACT_TYPES = [
'contract_type_001',
'contract_type_002',
'contract_type_003',
'contract_type_004',
'contract_type_005',
'contract_type_006',
'contract_type_007',
]
JOB_DOMAINS = [
'job_domain_001',
'job_domain_002',
'job_domain_003',
'job_domain_004',
'job_domain_005',
'job_domain_006',
'job_domain_007',
'job_domain_008',
'job_domain_009',
'job_domain_010',
'job_domain_011',
'job_domain_012',
'job_domain_013',
'job_domain_014',
'job_domain_015',
'job_domain_016',
'job_domain_017',
'job_domain_018',
'job_domain_019',
'job_domain_020',
'job_domain_021',
'job_domain_022',
'job_domain_023',
'job_domain_024',
]
def get_job_criteria(app_user):
# type: (users.User) -> GetJobsCriteriaResponseTO
user_profile = get_user_profile(app_user)
response = GetJobsCriteriaResponseTO()
response.location = JobCriteriaLocationTO()
response.location.address = None
response.location.geo = None
response.location.distance = 20000 # 20 Km
response.contract_types = []
response.job_domains = []
response.keywords = []
response.notifications = JobCriteriaNotificationsTO()
response.notifications.timezone = None
response.notifications.how_often = JobNotificationSchedule.NEVER
response.notifications.delivery_day = 'monday'
response.notifications.delivery_time = 64800 # 18:00
job_criteria = JobMatchingCriteria.create_key(app_user).get() # type: JobMatchingCriteria
for contract_type in CONTRACT_TYPES:
to = JobKeyLabelTO()
to.key = contract_type
to.label = localize_jobs(user_profile.language, contract_type)
to.enabled = contract_type in job_criteria.contract_types if job_criteria else False
response.contract_types.append(to)
response.contract_types.sort(key=lambda item: item.label)
for domain in JOB_DOMAINS:
to = JobKeyLabelTO()
to.key = domain
to.label = localize_jobs(user_profile.language, domain)
to.enabled = domain in job_criteria.job_domains if job_criteria else False
response.job_domains.append(to)
response.job_domains.sort(key=lambda item: item.label)
if job_criteria:
response.active = job_criteria.active
response.location = JobCriteriaLocationTO()
response.location.address = job_criteria.address
response.location.geo = JobCriteriaGeoLocationTO()
response.location.geo.latitude = job_criteria.geo_location.lat
response.location.geo.longitude = job_criteria.geo_location.lon
response.location.distance = job_criteria.distance
response.keywords = job_criteria.keywords
if job_criteria.notifications:
response.notifications.how_often = job_criteria.notifications.how_often
if job_criteria.notifications.delivery_day:
response.notifications.delivery_day = job_criteria.notifications.delivery_day
if job_criteria.notifications.delivery_time:
response.notifications.delivery_time = job_criteria.notifications.delivery_time
else:
response.active = True # user first usage
return response
@returns(SaveJobsCriteriaResponseTO)
@arguments(app_user=users.User, request=SaveJobsCriteriaRequestTO)
def save_job_criteria(app_user, request):
# type: (users.User, SaveJobsCriteriaRequestTO) -> SaveJobsCriteriaResponseTO
job_criteria_key = JobMatchingCriteria.create_key(app_user)
job_criteria = job_criteria_key.get() # type: JobMatchingCriteria
new_job_profile = not job_criteria
if new_job_profile:
if not request.criteria:
return SaveJobsCriteriaResponseTO(active=False, new_profile=new_job_profile)
job_criteria = JobMatchingCriteria(key=job_criteria_key)
job_criteria.last_load_request = datetime.utcnow()
job_criteria.demo = get_community(get_user_profile(app_user).community_id).demo
original_job_criteria = None
else:
original_job_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
notifications = None
job_criteria.active = request.active
if request.criteria:
location = request.criteria.location
notifications = request.criteria.notifications
if location.geo:
job_criteria.geo_location = ndb.GeoPt(location.geo.latitude, location.geo.longitude)
if location.address:
job_criteria.address = location.address
else:
job_criteria.address = coordinates_to_city(job_criteria.geo_location.lat,
job_criteria.geo_location.lon)
job_criteria.distance = location.distance
job_criteria.contract_types = sorted(request.criteria.contract_types)
job_criteria.job_domains = sorted(request.criteria.job_domains)
job_criteria.keywords = sorted(request.criteria.keywords)
if not job_criteria.job_domains:
raise RpcException('at_least_one_job_domain_required', app_user)
if not job_criteria.contract_types:
raise RpcException('at_least_one_contract_type_required', app_user)
updated_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
should_build_matches = original_job_criteria != updated_criteria
should_calculate_reminder = should_build_matches
should_clear_notifications = should_build_matches
og_notifications = job_criteria.notifications and job_criteria.notifications.to_dict()
if not job_criteria.notifications:
job_criteria.notifications = JobMatchingCriteriaNotifications()
job_criteria.notifications.how_often = JobNotificationSchedule.NEVER
if notifications and notifications.timezone:
job_criteria.notifications.timezone = notifications.timezone
if job_criteria.notifications.how_often != notifications.how_often:
delayed_notification_types = (JobNotificationSchedule.AT_MOST_ONCE_A_DAY,
JobNotificationSchedule.AT_MOST_ONCE_A_WEEK)
if job_criteria.notifications.how_often in delayed_notification_types and \
notifications.how_often not in delayed_notification_types:
should_clear_notifications = True
job_criteria.notifications.how_often = notifications.how_often
job_criteria.notifications.delivery_day = notifications.delivery_day
job_criteria.notifications.delivery_time = notifications.delivery_time
if not should_calculate_reminder:
should_calculate_reminder = job_criteria.notifications.to_dict() != og_notifications
job_criteria.put()
if should_build_matches:
deferred.defer(rebuild_matches_check_current, app_user, _queue=JOBS_WORKER_QUEUE)
if should_calculate_reminder:
deferred.defer(calculate_next_reminder, app_user, should_clear_notifications, _queue=JOBS_WORKER_QUEUE)
return SaveJobsCriteriaResponseTO(active=job_criteria.active, new_profile=new_job_profile)
def get_oca_logo_url(language):
if language.startswith('nl'):
return 'https://storage.googleapis.com/oca-files/jobs/OCA-nl.png'
return 'https://storage.googleapis.com/oca-files/jobs/OCA.png'
def get_jobs_for_activity_type(app_user, activity_type, cursor, ids):
# type: (users.User, unicode, Optional[unicode], List[int]) -> GetJobsResponseTO
job_criteria_key = JobMatchingCriteria.create_key(app_user)
user_profile_key = NdbUserProfile.createKey(app_user)
keys = [job_criteria_key, user_profile_key]
job_criteria, user_profile = ndb.get_multi(keys) # type: Optional[JobMatchingCriteria], NdbUserProfile
resp = GetJobsResponseTO()
if not job_criteria or not job_criteria.active:
resp.is_profile_active = False
resp.items = []
resp.cursor = None
resp.has_more = False
else:
if cursor is None and activity_type == JobOfferTO.ACTIVITY_TYPE_NEW:
job_criteria.last_load_request = datetime.utcnow()
job_criteria.put()
resp.items, resp.cursor, resp.has_more = _get_jobs(activity_type, app_user, cursor, user_profile.language, ids)
resp.is_profile_active = True
info = JobsInfoTO()
info.title = localize(user_profile.language, 'app_jobs_title')
info.description = localize(user_profile.language, 'app_jobs_description')
info.providers = [
JobOfferProviderTO(image_url=get_oca_logo_url(user_profile.language)),
JobOfferProviderTO(image_url='https://storage.googleapis.com/oca-files/jobs/VDAB.jpg'),
]
resp.info = info
return resp
def bulk_save_jobs(app_user, job_ids, status):
# type: (users.User, List[int], int) -> List[int]
keys = [JobMatch.create_key(app_user, job_id) for job_id in job_ids]
matches = ndb.get_multi(keys) # type: List[JobMatch]
to_put = []
for match in matches:
if not match:
continue
match.status = status
to_put.append(match)
ndb.put_multi(to_put)
return [match.get_job_id() for match in to_put]
@mapping('com.mobicage.capi.jobs.new_jobs_response_handler')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=NewJobsResponseTO)
def new_jobs_response_handler(context, result):
pass
def _get_jobs(activity_type, app_user, cursor, language, ids):
# type: (str, users.User, Optional[str], str, List[int]) -> Tuple[List[JobOfferTO], Optional[str], bool]
fetch_size = 20
start_cursor = Cursor.from_websafe_string(cursor) if cursor else None
if activity_type == JobOfferTO.ACTIVITY_TYPE_NEW:
qry = JobMatch.list_new_by_app_user(app_user)
elif activity_type == JobOfferTO.ACTIVITY_TYPE_HISTORY:
qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.DELETED)
elif activity_type == JobOfferTO.ACTIVITY_TYPE_STARRED:
qry = JobMatch.list_by_app_user_and_status(app_user, JobMatchStatus.STARRED)
else:
raise Exception('Unknown activity type %s' % activity_type)
job_matches_keys, new_cursor, has_more = qry.fetch_page(
fetch_size, start_cursor=start_cursor, keys_only=True) # type: List[ndb.Key], Cursor, bool
match_keys = [JobMatch.create_key(app_user, job_id) for job_id in ids if job_id] + \
[key for key in job_matches_keys if key.id() not in ids]
offer_keys = [JobOffer.create_key(match_key.id()) for match_key in match_keys]
models = ndb.get_multi(match_keys + offer_keys) # type: List[Union[JobMatch, JobOffer]]
job_matches = models[0: len(models) / 2]
job_offers = models[len(models) / 2:]
items = []
to_put = []
for match, job_offer in zip(job_matches, job_offers): # type: JobMatch, JobOffer
if not match:
# this should only happen when the job was requested using the 'ids' property
# like when the jobs activity is opened via a button on a news item
if job_offer.id not in ids:
logging.warning('Expected JobMatch to exist, creating it anyway...')
logging.debug('Creating manual JobMatch entry for job %d', job_offer.id)
match = JobMatch.manually_create(app_user, job_offer.id)
to_put.append(match)
timestamp = get_epoch_from_datetime(match.update_date)
items.append(JobOfferTO.from_job_offer(job_offer, timestamp, language,
get_job_offer_actions(job_offer, match, language)))
ndb.put_multi(to_put)
return items, new_cursor.to_websafe_string().decode('utf-8') if new_cursor else None, has_more
def get_job_offer_actions(job_offer, match, language):
# type: (JobOffer, JobMatch, str) -> List[Union[JobOfferChatActionTO, JobOfferOpenActionTO]]
actions = []
if job_offer.source.type == JobOfferSourceType.OCA:
action = JobOfferChatActionTO()
action.label = localize(language, 'open_chat')
action.chat_key = match.chat_key # possibly None
action.icon = 'fa-comment'
actions.append(action)
return actions
def send_new_jobs_for_activity_types(app_user, activity_types):
user_profile = get_user_profile(app_user)
if not user_profile.get_mobiles():
return
request = NewJobsRequestTO()
request.creation_time = now()
request.activity_types = activity_types
mobiles = db.get([get_mobile_key_by_account(mobile_detail.account) for mobile_detail in user_profile.get_mobiles().values()])
for mobile in mobiles:
ios_push_id = None
if mobile.is_ios:
ios_push_id = mobile.iOSPushId
kwargs = {}
if ios_push_id:
kwargs[CAPI_KEYWORD_ARG_PRIORITY] = PRIORITY_HIGH
newJobs(new_jobs_response_handler, logError, app_user, request=request, MOBILE_ACCOUNT=mobile, **kwargs)
def get_job_chat_info(app_user, job_id):
# type: (users.User, int) -> GetJobChatInfoResponseTO
keys = [JobOffer.create_key(job_id), JobMatch.create_key(app_user, job_id)]
job_offer, job_match = ndb.get_multi(keys) # type: JobOffer, JobMatch
job_sln_id = long(job_offer.source.id)
solicitation = JobSolicitation.list_by_job_and_user(users.User(job_offer.service_email),
job_sln_id,
app_user.email()).get() # type: Optional[JobSolicitation]
lang = get_user_profile(app_user).language
response = GetJobChatInfoResponseTO()
response.anonymous = JobChatAnonymousTO()
response.job_id = job_id
response.anonymous.enabled = True
response.anonymous.default_value = False
response.default_text = ''
response.info_text = localize(lang, 'job_info_text')
if solicitation:
# User has already applied before, but deleted the chat.
# Add him back to the chat and return the original chat key.
job_match.chat_key = solicitation.chat_key
response.chat_key = solicitation.chat_key
with users.set_user(users.User(job_offer.service_email)):
add_chat_members(solicitation.chat_key, [app_user.email()])
job_match.put()
return response
def create_job_chat(app_user, request):
# type: (users.User, CreateJobChatRequestTO) -> CreateJobChatResponseTO
keys = [JobMatch.create_key(app_user, request.job_id),
JobOffer.create_key(request.job_id)]
job_match, job_offer = ndb.get_multi(keys) # type: JobMatch, JobOffer
if not job_match.chat_key:
# If you ever want to create a separate service for jobs, you'll have to create a service api callback for this
from solutions.common.jobs.solicitations import create_job_solicitation
message_key = create_job_solicitation(app_user, job_offer, request)
job_match.chat_key = message_key
job_match.put()
response = CreateJobChatResponseTO()
response.message_key = job_match.chat_key
return response
| our-city-app/oca-backend | src/rogerthat/bizz/jobs/__init__.py | Python | apache-2.0 | 17,649 |
#!/usr/bin/env python3
import argparse
import ipaddress
import gevent
import gevent.wsgi
import hashlib
import json
import traceback
from gevent import monkey
from werkzeug.exceptions import (BadRequest, HTTPException,
InternalServerError, NotFound)
from werkzeug.routing import Map, Rule, RequestRedirect
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
monkey.patch_all()
IMAGE_METHOD = 'tftp'
BOOTSCRIPT = """#!ipxe
:retry
dhcp && isset ${{filename}} || goto retry
echo Booting from ${{filename}}
kernel {image_method}://${{next-server}}/vmlinuz.img quiet pixie_server=${{next-server}} \
ip=${{ip}}::${{gateway}}:${{netmask}}::eth0:none:${{dns}} {wipe} pixie_root_size={root_size} \
pixie_swap_size={swap_size} pixie_sha224={sha224} {extra_args} || goto error
initrd {image_method}://${{next-server}}//initrd.img || goto error
boot || goto error
error:
shell
"""
CONFIGSCRIPT = """#!ipxe
:retry
dhcp && isset ${{filename}} || goto retry
echo Booting from ${{filename}}
kernel {image_method}://${{next-server}}/vmlinuz.img quiet \
ip=${{ip}}::${{gateway}}:${{netmask}}::eth0:none:${{dns}} \
SERVER_IP=${{next-server}}{collector_prefix} || goto error
initrd {image_method}://${{next-server}}//doconfig.img || goto error
boot || goto error
error:
shell
"""
class ScriptHandler(object):
def __init__(self, configs, collector_prefix):
self.configs = []
self.default_config = dict()
self.default_config['image_method'] = IMAGE_METHOD
self.default_config['collector_prefix'] = collector_prefix
for config in configs:
self.configs.append(self.load_config(config))
self.router = Map([
Rule('/', methods=['GET'], endpoint='default'),
Rule('/wipe', methods=['GET'], endpoint='wipe')
])
def load_config(self, config):
with open(config) as c:
cfg = json.load(c)
m = hashlib.sha224()
m.update(bytes(cfg['subnet'], 'utf-8'))
m.update(bytes(cfg['swap_size']))
m.update(bytes(cfg['root_size']))
m.update(bytes(cfg['extra_args'], 'utf-8'))
# TODO: check sizes
for f in cfg['hashes']:
with open(f, 'rb') as fl:
for line in fl:
m.update(line)
cfg['sha224'] = m.hexdigest()
cfg['subnet'] = ipaddress.ip_network(cfg['subnet'])
cfg['image_method'] = IMAGE_METHOD
return cfg
@responder
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except:
traceback.print_exc()
return InternalServerError()
def wsgi_app(self, environ, start_response):
route = self.router.bind_to_environ(environ)
try:
endpoint, args = route.match()
except RequestRedirect as e:
return e
except HTTPException:
return NotFound()
request = Request(environ)
get_args = dict(request.args)
if endpoint == 'wipe':
get_args['wipe'] = 'pixie_wipe=force'
else:
get_args['wipe'] = ""
response = Response()
response.mimetype = 'text/plain'
response.status_code = 200
config = None
if 'ip' in get_args:
ip_addr = ipaddress.ip_address(get_args['ip'][0])
for cfg in self.configs:
if ip_addr in cfg['subnet']:
config = cfg
if config is None:
response.data = CONFIGSCRIPT.format(**self.default_config)
else:
for (k, v) in config.items():
get_args[k] = v
response.data = BOOTSCRIPT.format(**get_args)
return response
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="pixied",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("configs", action="store", type=str, nargs="+",
help="config files to load")
parser.add_argument("-a", "--addr", action="store", type=str, default="0.0.0.0",
help="address to bind to (default '0.0.0.0')")
parser.add_argument("-p", "--port", action="store", type=int, default=8080,
help="port to bind to (default 8080)")
parser.add_argument("-c", "--collector-prefix", action="store", type=str, default="/pixie_collector",
help="prefix on which the collector is served")
args = parser.parse_args()
server = gevent.wsgi.WSGIServer(
(args.addr, args.port), ScriptHandler(args.configs, args.collector_prefix))
gevent.spawn(server.serve_forever).join()
| algorithm-ninja/pixie | src/pixied.py | Python | apache-2.0 | 4,785 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import six
import webob.dec
import webob.exc
from manila.api.openstack import wsgi
from manila.i18n import _
from manila import utils
from manila.wsgi import common as base_wsgi
LOG = log.getLogger(__name__)
class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
_status_to_type = {}
@staticmethod
def status_to_type(status):
if not FaultWrapper._status_to_type:
for clazz in utils.walk_class_hierarchy(webob.exc.HTTPError):
FaultWrapper._status_to_type[clazz.code] = clazz
return FaultWrapper._status_to_type.get(
status, webob.exc.HTTPInternalServerError)()
def _error(self, inner, req):
if isinstance(inner, UnicodeDecodeError):
msg = _("Error decoding your request. Either the URL or the "
"request body contained characters that could not be "
"decoded by Manila.")
return wsgi.Fault(webob.exc.HTTPBadRequest(explanation=msg))
LOG.exception("Caught error: %s", inner)
safe = getattr(inner, 'safe', False)
headers = getattr(inner, 'headers', None)
status = getattr(inner, 'code', 500)
if status is None:
status = 500
msg_dict = dict(url=req.url, status=status)
LOG.info("%(url)s returned with HTTP %(status)d", msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
# NOTE(johannes): We leave the explanation empty here on
# purpose. It could possibly have sensitive information
# that should not be returned back to the user. See
# bugs 868360 and 874472
# NOTE(eglynn): However, it would be over-conservative and
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
outer.explanation = '%s: %s' % (inner.__class__.__name__,
six.text_type(inner))
return wsgi.Fault(outer)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
return self._error(ex, req)
| bswartz/manila | manila/api/middleware/fault.py | Python | apache-2.0 | 3,089 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import constants
from construct import Byte, Struct, Enum, Bytes, Const, Array, Renamed, Int16ul
Short = Int16ul
RobotInfo = "robot_info" / Struct(
"penalty" / Enum(Byte, constants.SPLPenalty),
"secs_till_unpenalised" / Byte,
"number_of_yellow_cards" / Byte,
"number_of_red_cards" / Byte
)
TeamInfo = "team" / Struct(
"team_number" / Byte,
"team_color" / Enum(Byte, constants.SPLTeamColor),
"score" / Byte,
"penalty_shot" / Byte, # penalty shot counter
"single_shots" / Short, # bits represent penalty shot success
"coach_sequence" / Byte,
"coach_message" / Bytes(253),
"coach"/ RobotInfo,
"players" / Array(11, RobotInfo)
)
GameState = "gamedata" / Struct(
"header" / Const(constants.GAMECONTROLLER_STRUCT_HEADER, Bytes(4)),
"version" / Const(constants.GAMECONTROLLER_STRUCT_VERSION, Short),
"packet_number" / Byte,
"players_per_team" / Byte,
"game_type" / Byte,
"game_state" / Enum(Byte, constants.State),
"first_half" / Byte,
"kick_of_team" / Byte,
"secondary_state" / Enum(Byte, constants.State2),
"secondary_state_info" / Bytes(4),
"drop_in_team" / Byte,
"drop_in_time" / Short,
"seconds_remaining" / Short,
"secondary_seconds_remaining" / Short,
"teams" / Array(2, TeamInfo)
)
ReturnData = "returndata" / Struct(
"header" / Const(b"RGrt", Bytes(4)),
"version" / Const(constants.GAMECONTROLLER_RESPONSE_VERSION, Byte),
"team" / Byte,
"player" / Byte,
"message" / Byte
)
| remvo/zstt-ros | src/gamecontroller_msgs/src/gamestate.py | Python | apache-2.0 | 1,560 |
# Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import platform, subprocess, operator, os, shutil, re, sys
from glob import glob
class MesonException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
def __str__(self):
return os.path.join(self.subdir, self.fname)
def __repr__(self):
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(os.path.join(self.subdir, self.fname))
@staticmethod
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname):
return File(False, '', fname)
def rel_to_builddir(self, build_to_src):
if self.is_built:
return os.path.join(self.subdir, self.fname)
else:
return os.path.join(build_to_src, self.subdir, self.fname)
def endswith(self, ending):
return self.fname.endswith(ending)
def split(self, s):
return self.fname.split(s)
def __eq__(self, other):
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self):
return hash((self.fname, self.subdir, self.is_built))
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise RuntimeError('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
def flatten(item):
if not isinstance(item, list):
return item
result = []
for i in item:
if isinstance(i, list):
result += flatten(i)
else:
result.append(i)
return result
def is_osx():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_debianlike():
return os.path.isfile('/etc/debian_version')
def exe_exists(arglist):
try:
p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
return True
except FileNotFoundError:
pass
return False
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
def grab_leading_numbers(vstr):
result = []
for x in vstr.split('.'):
try:
result.append(int(x))
except ValueError:
break
return result
numpart = re.compile('[0-9.]+')
def version_compare(vstr1, vstr2):
match = numpart.match(vstr1.strip())
if match is None:
raise MesonException('Uncomparable version string %s.' % vstr1)
vstr1 = match.group(0)
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
varr1 = grab_leading_numbers(vstr1)
varr2 = grab_leading_numbers(vstr2)
return cmpop(varr1, varr2)
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs():
if is_windows():
return ['C:/mingw/lib'] # Fixme
if is_osx():
return ['/usr/lib'] # Fix me as well.
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
plat = subprocess.check_output(['uname', '-m']).decode().strip()
# This is a terrible hack. I admit it and I'm really sorry.
# I just don't know what the correct solution is.
if plat == 'i686':
plat = 'i386'
if plat.startswith('arm'):
plat = 'arm'
unixdirs += glob('/usr/lib/' + plat + '*')
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
unixdirs += glob('/lib/' + plat + '*')
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
unixdirs += glob('/lib/' + plat + '*')
return unixdirs
def do_replacement(regex, line, confdata):
match = re.search(regex, line)
while match:
varname = match.group(1)
if varname in confdata.keys():
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
raise RuntimeError('Tried to replace a variable with something other than a string or int.')
else:
var = ''
line = line.replace('@' + varname + '@', var)
match = re.search(regex, line)
return line
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s', line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata):
try:
with open(src, encoding='utf-8') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
regex = re.compile(r'[^\\]?@([-a-zA-Z0-9_]+)@')
result = []
for line in data:
if line.startswith('#mesondefine'):
line = do_mesondefine(line, confdata)
else:
line = do_replacement(regex, line, confdata)
result.append(line)
dst_tmp = dst + '~'
with open(dst_tmp, 'w') as f:
f.writelines(result)
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
def dump_conf_header(ofilename, cdata):
with open(ofilename, 'w') as ofile:
ofile.write('''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
''')
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
ofile.write('/* %s */\n' % desc)
if isinstance(v, bool):
if v:
ofile.write('#define %s\n\n' % k)
else:
ofile.write('#undef %s\n\n' % k)
elif isinstance(v, (int, str)):
ofile.write('#define %s %s\n\n' % (k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'r') as f1, open(dst_tmp, 'r') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def stringlistify(item):
if isinstance(item, str):
item = [item]
if not isinstance(item, list):
raise MesonException('Item is not an array')
for i in item:
if not isinstance(i, str):
raise MesonException('List item not a string.')
return item
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
| centricular/meson | mesonbuild/mesonlib.py | Python | apache-2.0 | 11,768 |
Subsets and Splits