text
stringlengths 3
1.05M
|
---|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import sys
from spack import *
class Mpich(AutotoolsPackage):
"""MPICH is a high performance and widely portable implementation of
the Message Passing Interface (MPI) standard."""
homepage = "https://www.mpich.org"
url = "https://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz"
git = "https://github.com/pmodels/mpich.git"
list_url = "https://www.mpich.org/static/downloads/"
list_depth = 1
maintainers = ['raffenet', 'yfguo']
executables = ['^mpichversion$']
version('develop', submodules=True)
version('3.4.2', sha256='5c19bea8b84e8d74cca5f047e82b147ff3fba096144270e3911ad623d6c587bf')
version('3.4.1', sha256='8836939804ef6d492bcee7d54abafd6477d2beca247157d92688654d13779727')
version('3.4', sha256='ce5e238f0c3c13ab94a64936060cff9964225e3af99df1ea11b130f20036c24b')
version('3.3.2', sha256='4bfaf8837a54771d3e4922c84071ef80ffebddbb6971a006038d91ee7ef959b9')
version('3.3.1', sha256='fe551ef29c8eea8978f679484441ed8bb1d943f6ad25b63c235d4b9243d551e5')
version('3.3', sha256='329ee02fe6c3d101b6b30a7b6fb97ddf6e82b28844306771fa9dd8845108fa0b')
version('3.2.1', sha256='5db53bf2edfaa2238eb6a0a5bc3d2c2ccbfbb1badd79b664a1a919d2ce2330f1')
version('3.2', sha256='0778679a6b693d7b7caff37ff9d2856dc2bfc51318bf8373859bfa74253da3dc')
version('3.1.4', sha256='f68b5330e94306c00ca5a1c0e8e275c7f53517d01d6c524d51ce9359d240466b')
version('3.1.3', sha256='afb690aa828467721e9d9ab233fe00c68cae2b7b930d744cb5f7f3eb08c8602c')
version('3.1.2', sha256='37c3ba2d3cd3f4ea239497d9d34bd57a663a34e2ea25099c2cbef118c9156587')
version('3.1.1', sha256='455ccfaf4ec724d2cf5d8bff1f3d26a958ad196121e7ea26504fd3018757652d')
version('3.1', sha256='fcf96dbddb504a64d33833dc455be3dda1e71c7b3df411dfcf9df066d7c32c39')
version('3.0.4', sha256='cf638c85660300af48b6f776e5ecd35b5378d5905ec5d34c3da7a27da0acf0b3')
variant('hwloc', default=True, description='Use external hwloc package')
variant('hydra', default=True, description='Build the hydra process manager')
variant('romio', default=True, description='Enable ROMIO MPI I/O implementation')
variant('verbs', default=False, description='Build support for OpenFabrics verbs.')
variant('slurm', default=False, description='Enable SLURM support')
variant('wrapperrpath', default=True, description='Enable wrapper rpath')
variant(
'pmi',
default='pmi',
description='''PMI interface.''',
values=('off', 'pmi', 'pmi2', 'pmix'),
multi=False
)
variant(
'device',
default='ch4',
description='''Abstract Device Interface (ADI)
implementation. The ch4 device is in experimental state for versions
before 3.4.''',
values=('ch3', 'ch4'),
multi=False
)
variant(
'netmod',
default='ofi',
description='''Network module. Only single netmod builds are
supported. For ch3 device configurations, this presumes the
ch3:nemesis communication channel. ch3:sock is not supported by this
spack package at this time.''',
values=('tcp', 'mxm', 'ofi', 'ucx'),
multi=False
)
variant('pci', default=(sys.platform != 'darwin'),
description="Support analyzing devices on PCI bus")
variant('libxml2', default=True,
description='Use libxml2 for XML support instead of the custom '
'minimalistic implementation')
variant('argobots', default=False,
description='Enable Argobots support')
variant('fortran', default=True, description='Enable Fortran support')
provides('mpi@:3.1')
provides('mpi@:3.0', when='@:3.1')
provides('mpi@:2.2', when='@:1.2')
provides('mpi@:2.1', when='@:1.1')
provides('mpi@:2.0', when='@:1.0')
filter_compiler_wrappers(
'mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort', relative_root='bin'
)
# Fix using an external hwloc
# See https://github.com/pmodels/mpich/issues/4038
# and https://github.com/pmodels/mpich/pull/3540
# landed in v3.4b1 v3.4a3
patch('https://github.com/pmodels/mpich/commit/8a851b317ee57366cd15f4f28842063d8eff4483.patch',
sha256='eb982de3366d48cbc55eb5e0df43373a45d9f51df208abf0835a72dc6c0b4774',
when='@3.3:3.3.99 +hwloc')
# fix MPI_Barrier segmentation fault
# see https://lists.mpich.org/pipermail/discuss/2016-May/004764.html
# and https://lists.mpich.org/pipermail/discuss/2016-June/004768.html
patch('mpich32_clang.patch', when='@3.2:3.2.0%clang')
patch('mpich32_clang.patch', when='@3.2:3.2.0%apple-clang')
# Fix SLURM node list parsing
# See https://github.com/pmodels/mpich/issues/3572
# and https://github.com/pmodels/mpich/pull/3578
# Even though there is no version 3.3.0, we need to specify 3.3:3.3.0 in
# the when clause, otherwise the patch will be applied to 3.3.1, too.
patch('https://github.com/pmodels/mpich/commit/b324d2de860a7a2848dc38aefb8c7627a72d2003.patch',
sha256='c7d4ecf865dccff5b764d9c66b6a470d11b0b1a5b4f7ad1ffa61079ad6b5dede',
when='@3.3:3.3.0')
# This patch for Libtool 2.4.2 enables shared libraries for NAG and is
# applied by MPICH starting version 3.1.
patch('nag_libtool_2.4.2_0.patch', when='@:3.0%nag')
# This patch for Libtool 2.4.2 fixes the problem with '-pthread' flag and
# enables convenience libraries for NAG. Starting version 3.1, the order of
# checks for FC and F77 is changed, therefore we need to apply the patch in
# two steps (the patch files can be merged once the support for versions
# 3.1 and older is dropped).
patch('nag_libtool_2.4.2_1.patch', when='@:3.1.3%nag')
patch('nag_libtool_2.4.2_2.patch', when='@:3.1.3%nag')
# This patch for Libtool 2.4.6 does the same as the previous two. The
# problem is not fixed upstream yet and the upper version constraint is
# given just to avoid application of the patch to the develop version.
patch('nag_libtool_2.4.6.patch', when='@3.1.4:3.3%nag')
depends_on('findutils', type='build')
depends_on('pkgconfig', type='build')
depends_on('[email protected]:', when='@3.3: +hwloc')
depends_on('libfabric', when='netmod=ofi')
# The ch3 ofi netmod results in crashes with libfabric 1.7
# See https://github.com/pmodels/mpich/issues/3665
depends_on('libfabric@:1.6', when='device=ch3 netmod=ofi')
depends_on('ucx', when='netmod=ucx')
# The dependencies on libpciaccess and libxml2 come from the embedded
# hwloc, which, before version 3.3, was used only for Hydra.
depends_on('libpciaccess', when="@:3.2+hydra+pci")
depends_on('libxml2', when='@:3.2+hydra+libxml2')
# Starting with version 3.3, MPICH uses hwloc directly.
depends_on('libpciaccess', when="@3.3:+pci")
depends_on('libxml2', when='@3.3:+libxml2')
# Starting with version 3.3, Hydra can use libslurm for nodelist parsing
depends_on('slurm', when='+slurm')
depends_on('pmix', when='pmi=pmix')
# +argobots variant requires Argobots
depends_on('argobots', when='+argobots')
# building from git requires regenerating autotools files
depends_on('[email protected]:', when='@develop', type=("build"))
depends_on('[email protected]:', when='@develop', type=("build"))
depends_on("m4", when="@develop", type=("build")),
depends_on("[email protected]:", when='@develop', type=("build"))
# building with "+hwloc' also requires regenerating autotools files
depends_on('[email protected]:', when='@3.3:3.3.99 +hwloc', type="build")
depends_on('[email protected]:', when='@3.3:3.3.99 +hwloc', type="build")
depends_on("m4", when="@3.3:3.3.99 +hwloc", type="build"),
depends_on("[email protected]:", when='@3.3:3.3.99 +hwloc', type="build")
# MPICH's Yaksa submodule requires python to configure
depends_on("[email protected]:", when="@develop", type="build")
conflicts('device=ch4', when='@:3.2')
conflicts('netmod=ofi', when='@:3.1.4')
conflicts('netmod=ucx', when='device=ch3')
conflicts('netmod=mxm', when='device=ch4')
conflicts('netmod=mxm', when='@:3.1.3')
conflicts('netmod=tcp', when='device=ch4')
conflicts('pmi=pmi2', when='device=ch3 netmod=ofi')
conflicts('pmi=pmix', when='device=ch3')
conflicts('pmi=pmix', when='+hydra')
# MPICH does not require libxml2 and libpciaccess for versions before 3.3
# when ~hydra is set: prevent users from setting +libxml2 and +pci in this
# case to avoid generating an identical MPICH installation.
conflicts('+pci', when='@:3.2~hydra')
conflicts('+libxml2', when='@:3.2~hydra')
# see https://github.com/pmodels/mpich/pull/5031
conflicts('%clang@:7', when='@3.4:3.4.1')
@run_after('configure')
def patch_cce(self):
# Configure misinterprets output from the cce compiler
# Patching configure instead should be possible, but a first
# implementation failed in obscure ways that were not worth
# tracking down when this worked
if self.spec.satisfies('%cce'):
filter_file('-L -L', '', 'config.lt', string=True)
filter_file('-L -L', '', 'libtool', string=True)
filter_file('-L -L', '', 'config.status', string=True)
@classmethod
def determine_version(cls, exe):
output = Executable(exe)(output=str, error=str)
match = re.search(r'MPICH Version:\s+(\S+)', output)
return match.group(1) if match else None
@classmethod
def determine_variants(cls, exes, version):
def get_spack_compiler_spec(path):
spack_compilers = spack.compilers.find_compilers([path])
actual_compiler = None
# check if the compiler actually matches the one we want
for spack_compiler in spack_compilers:
if os.path.dirname(spack_compiler.cc) == path:
actual_compiler = spack_compiler
break
return actual_compiler.spec if actual_compiler else None
def is_enabled(text):
if text in set(['t', 'true', 'enabled', 'enable', 'with',
'yes', '1']):
return True
return False
def is_disabled(text):
if text in set(['f', 'false', 'disabled', 'disable',
'without', 'no', '0']):
return True
return False
results = []
for exe in exes:
variants = ''
output = Executable(exe)(output=str, error=str)
if re.search(r'--with-hwloc-prefix=embedded', output):
variants += '~hwloc'
if re.search(r'--with-pm=hydra', output):
variants += '+hydra'
else:
variants += '~hydra'
match = re.search(r'--(\S+)-romio', output)
if match and is_enabled(match.group(1)):
variants += '+romio'
elif match and is_disabled(match.group(1)):
variants += '~romio'
if re.search(r'--with-ibverbs', output):
variants += '+verbs'
elif re.search(r'--without-ibverbs', output):
variants += '~verbs'
match = re.search(r'--enable-wrapper-rpath=(\S+)', output)
if match and is_enabled(match.group(1)):
variants += '+wrapperrpath'
match = re.search(r'--enable-wrapper-rpath=(\S+)', output)
if match and is_disabled(match.group(1)):
variants += '~wrapperrpath'
if re.search(r'--disable-fortran', output):
variants += '~fortran'
match = re.search(r'--with-slurm=(\S+)', output)
if match and is_enabled(match.group(1)):
variants += '+slurm'
if re.search(r'--enable-libxml2', output):
variants += '+libxml2'
elif re.search(r'--disable-libxml2', output):
variants += '~libxml2'
if re.search(r'--with-thread-package=argobots', output):
variants += '+argobots'
if re.search(r'--with-pmi=no', output):
variants += ' pmi=off'
elif re.search(r'--with-pmi=simple', output):
variants += ' pmi=pmi'
elif re.search(r'--with-pmi=pmi2/simple', output):
variants += ' pmi=pmi2'
elif re.search(r'--with-pmix', output):
variants += ' pmi=pmix'
match = re.search(r'MPICH Device:\s+(ch3|ch4)', output)
if match:
variants += ' device=' + match.group(1)
match = re.search(r'--with-device=ch.\S+(ucx|ofi|mxm|tcp)', output)
if match:
variants += ' netmod=' + match.group(1)
match = re.search(r'MPICH CC:\s+(\S+)', output)
compiler_spec = get_spack_compiler_spec(
os.path.dirname(match.group(1)))
if compiler_spec:
variants += '%' + str(compiler_spec)
results.append(variants)
return results
def setup_build_environment(self, env):
env.unset('F90')
env.unset('F90FLAGS')
# https://bugzilla.redhat.com/show_bug.cgi?id=1795817
if self.spec.satisfies('%gcc@10:'):
env.set('FFLAGS', '-fallow-argument-mismatch')
# Same fix but for macOS - avoids issue #17934
if self.spec.satisfies('%apple-clang@11:'):
env.set('FFLAGS', '-fallow-argument-mismatch')
if self.spec.satisfies('%clang@11:'):
env.set('FFLAGS', '-fallow-argument-mismatch')
def setup_run_environment(self, env):
# Because MPI implementations provide compilers, they have to add to
# their run environments the code to make the compilers available.
# For Cray MPIs, the regular compiler wrappers *are* the MPI wrappers.
# Cray MPIs always have cray in the module name, e.g. "cray-mpich"
external_modules = self.spec.external_modules
if external_modules and 'cray' in external_modules[0]:
# This is intended to support external MPICH instances registered
# by Spack on Cray machines prior to a879c87; users defining an
# external MPICH entry for Cray should generally refer to the
# "cray-mpich" package
env.set('MPICC', spack_cc)
env.set('MPICXX', spack_cxx)
env.set('MPIF77', spack_fc)
env.set('MPIF90', spack_fc)
else:
env.set('MPICC', join_path(self.prefix.bin, 'mpicc'))
env.set('MPICXX', join_path(self.prefix.bin, 'mpic++'))
env.set('MPIF77', join_path(self.prefix.bin, 'mpif77'))
env.set('MPIF90', join_path(self.prefix.bin, 'mpif90'))
def setup_dependent_build_environment(self, env, dependent_spec):
self.setup_run_environment(env)
env.set('MPICH_CC', spack_cc)
env.set('MPICH_CXX', spack_cxx)
env.set('MPICH_F77', spack_f77)
env.set('MPICH_F90', spack_fc)
env.set('MPICH_FC', spack_fc)
def setup_dependent_package(self, module, dependent_spec):
spec = self.spec
# For Cray MPIs, the regular compiler wrappers *are* the MPI wrappers.
# Cray MPIs always have cray in the module name, e.g. "cray-mpich"
external_modules = spec.external_modules
if external_modules and 'cray' in external_modules[0]:
spec.mpicc = spack_cc
spec.mpicxx = spack_cxx
spec.mpifc = spack_fc
spec.mpif77 = spack_f77
else:
spec.mpicc = join_path(self.prefix.bin, 'mpicc')
spec.mpicxx = join_path(self.prefix.bin, 'mpic++')
if '+fortran' in spec:
spec.mpifc = join_path(self.prefix.bin, 'mpif90')
spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
spec.mpicxx_shared_libs = [
join_path(self.prefix.lib, 'libmpicxx.{0}'.format(dso_suffix)),
join_path(self.prefix.lib, 'libmpi.{0}'.format(dso_suffix))
]
def autoreconf(self, spec, prefix):
"""Not needed usually, configure should be already there"""
# If configure exists nothing needs to be done
if (os.path.exists(self.configure_abs_path) and
not spec.satisfies('@3.3:3.3.99 +hwloc')):
return
# Else bootstrap with autotools
bash = which('bash')
bash('./autogen.sh')
@run_before('autoreconf')
def die_without_fortran(self):
# Until we can pass variants such as +fortran through virtual
# dependencies depends_on('mpi'), require Fortran compiler to
# avoid delayed build errors in dependents.
# The user can work around this by disabling Fortran explicitly
# with ~fortran
f77 = self.compiler.f77
fc = self.compiler.fc
fortran_missing = f77 is None or fc is None
if '+fortran' in self.spec and fortran_missing:
raise InstallError(
'mpich +fortran requires Fortran compilers. Configure '
'Fortran compiler or disable Fortran support with ~fortran'
)
def configure_args(self):
spec = self.spec
config_args = [
'--disable-silent-rules',
'--enable-shared',
'--with-hwloc-prefix={0}'.format(
spec['hwloc'].prefix if '^hwloc' in spec else 'embedded'),
'--with-pm={0}'.format('hydra' if '+hydra' in spec else 'no'),
'--{0}-romio'.format('enable' if '+romio' in spec else 'disable'),
'--{0}-ibverbs'.format('with' if '+verbs' in spec else 'without'),
'--enable-wrapper-rpath={0}'.format('no' if '~wrapperrpath' in
spec else 'yes')
]
if '~fortran' in spec:
config_args.append('--disable-fortran')
if '+slurm' in spec:
config_args.append('--with-slurm=yes')
config_args.append('--with-slurm-include={0}'.format(
spec['slurm'].prefix.include))
config_args.append('--with-slurm-lib={0}'.format(
spec['slurm'].prefix.lib))
else:
config_args.append('--with-slurm=no')
if 'pmi=off' in spec:
config_args.append('--with-pmi=no')
elif 'pmi=pmi' in spec:
config_args.append('--with-pmi=simple')
elif 'pmi=pmi2' in spec:
config_args.append('--with-pmi=pmi2/simple')
elif 'pmi=pmix' in spec:
config_args.append('--with-pmix={0}'.format(spec['pmix'].prefix))
# setup device configuration
device_config = ''
if 'device=ch4' in spec:
device_config = '--with-device=ch4:'
elif 'device=ch3' in spec:
device_config = '--with-device=ch3:nemesis:'
if 'netmod=ucx' in spec:
device_config += 'ucx'
elif 'netmod=ofi' in spec:
device_config += 'ofi'
elif 'netmod=mxm' in spec:
device_config += 'mxm'
elif 'netmod=tcp' in spec:
device_config += 'tcp'
config_args.append(device_config)
# Specify libfabric or ucx path explicitly, otherwise
# configure might fall back to an embedded version.
if 'netmod=ofi' in spec:
config_args.append('--with-libfabric={0}'.format(
spec['libfabric'].prefix))
if 'netmod=ucx' in spec:
config_args.append('--with-ucx={0}'.format(
spec['ucx'].prefix))
# In other cases the argument is redundant.
if '@:3.2+hydra' in spec or '@3.3:' in spec:
# The root configure script passes the argument to the configure
# scripts of all instances of hwloc (there are three copies of it:
# for hydra, for hydra2, and for MPICH itself).
config_args += self.enable_or_disable('libxml2')
# If +argobots specified, add argobots option
if '+argobots' in spec:
config_args.append('--with-thread-package=argobots')
config_args.append('--with-argobots=' + spec['argobots'].prefix)
return config_args
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from diem import diem_types, utils, InvalidAccountAddressError, InvalidSubAddressError, jsonrpc
import pytest
def test_account_address():
with pytest.raises(InvalidAccountAddressError):
utils.account_address(bytes.fromhex("aaaa"))
with pytest.raises(InvalidAccountAddressError):
utils.account_address("aaaa")
with pytest.raises(InvalidAccountAddressError):
utils.account_address("0000000000000000000000000a550c1x")
valid_address = "0000000000000000000000000a550c18"
address = utils.account_address(valid_address)
assert address
assert utils.account_address(address) == address
assert utils.account_address(bytes.fromhex(valid_address)) == address
def test_sub_address():
with pytest.raises(InvalidSubAddressError):
utils.sub_address(bytes.fromhex("aa"))
assert utils.sub_address(bytes.fromhex("aaaaaaaaaaaaaaaa")) is not None
with pytest.raises(InvalidSubAddressError):
utils.sub_address("aa")
sub_address = utils.sub_address("aaaaaaaaaaaaaaaa")
assert sub_address is not None
assert isinstance(sub_address, bytes)
def test_currency_code():
ccode = utils.currency_code("XUS")
assert isinstance(ccode, diem_types.TypeTag)
code = utils.type_tag_to_str(ccode)
assert code == "XUS"
with pytest.raises(TypeError):
utils.currency_code(False)
with pytest.raises(TypeError):
utils.type_tag_to_str(False)
def test_decode_transaction_script():
script_bytes = "e001a11ceb0b010000000701000202020403061004160205181d0735600895011000000001010000020001000003020301010004010300010501060c0108000506080005030a020a020005060c05030a020a020109000b4469656d4163636f756e741257697468647261774361706162696c6974791b657874726163745f77697468647261775f6361706162696c697479087061795f66726f6d1b726573746f72655f77697468647261775f6361706162696c69747900000000000000000000000000000001010104010c0b0011000c050e050a010a020b030b0438000b05110202010700000000000000000000000000000001035855530358555300040393e341892c775c38fd524757d641f0a20100e1f5050000000004000400"
script_call = utils.decode_transaction_script(script_bytes)
assert type(script_call).__name__ == "ScriptCall__PeerToPeerWithMetadata"
assert script_call.amount == 100_000_000
script_call = utils.decode_transaction_script(jsonrpc.TransactionData(script_bytes=script_bytes))
assert type(script_call).__name__ == "ScriptCall__PeerToPeerWithMetadata"
assert script_call.amount == 100_000_000
script_call = utils.decode_transaction_script(
jsonrpc.Transaction(transaction=jsonrpc.TransactionData(script_bytes=script_bytes))
)
assert type(script_call).__name__ == "ScriptCall__PeerToPeerWithMetadata"
assert script_call.amount == 100_000_000
with pytest.raises(TypeError):
utils.decode_transaction_script(False)
def test_balance():
account = jsonrpc.Account(
balances=[
jsonrpc.Amount(amount=32, currency="XUS"),
jsonrpc.Amount(amount=33, currency="XDX"),
]
)
assert utils.balance(account, "XUS") == 32
assert utils.balance(account, "XDX") == 33
assert utils.balance(account, "unknown") == 0
|
import pytest
from fuzzyoctodisco.pic import *
@pytest.fixture
def pic_object():
picture = Picture("pictures/lena.png")
picture.open()
return picture
@pytest.fixture
def pic_object_raw(pic_object):
pic_object.img2raw()
return pic_object
@pytest.fixture
def pic_object_faces(pic_object_raw):
pic_object_raw.find_faces()
return pic_object_raw
def test_open_wrong_file():
with pytest.raises(Exception, match="can't be read as an image with Pillow"):
picture = Picture("find_faces/test/test_pic.py")
picture.open()
def test_image2raw(pic_object):
im_arr = image2raw(pic_object.im)
assert im_arr.shape == (512, 512, 3)
pixel = im_arr[19][19]
assert pixel.tolist() == [225, 130, 109]
def test_find_faces_without_raw(pic_object):
with pytest.raises(Exception):
pic_object.find_faces()
def test_find_face(pic_object_raw):
face_location = find_faces(pic_object_raw.raw)
assert face_location == [(218, 219, 373, 374)]
def test_face_crop_whithout_find_faces(pic_object_raw):
with pytest.raises(Exception):
pic_object.face_crop()
def test_face_crop_for_a_surnumerous_face(pic_object_faces):
with pytest.raises(Exception):
pic_object_faces.face_crop(whichface=1)
def test_face_crop(pic_object_faces):
pic_object_faces.face_crop()
pic_object_faces.img2raw()
assert pic_object_faces.raw.shape == (155, 155, 3)
assert pic_object_faces.face_location == [(0, 0, 155, 155)]
def test_rotate_90(pic_object):
pic_object.rotate(Picture.ROTATE_90)
im_arr = image2raw(pic_object.im)
assert im_arr.shape == (512, 512, 3)
pixel = im_arr[512 - (19 + 1)][19]
assert pixel.tolist() == [225, 130, 109]
def test_clone(pic_object_raw):
clone = pic_object_raw.clone()
clone.img2raw()
assert clone.raw.tolist() == pic_object_raw.raw.tolist()
def test_pic_per_face(pic_object_faces):
pics = pic_object_faces.get_faces_as_Pic()
assert len(pics) == 1
pic = pics[0]
pic.img2raw()
pic_object_faces.face_crop()
pic_object_faces.img2raw()
assert pic.raw.tolist() == pic_object_faces.raw.tolist()
|
'use strict';
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
var __chunk_1 = require('../chunk-cf203200.js');
var React = _interopDefault(require('react'));
require('@carbon/icon-helpers');
require('prop-types');
var FlashOff24 =
/*#__PURE__*/
React.forwardRef(function FlashOff24(props, ref) {
return React.createElement(__chunk_1.Icon, __chunk_1._extends({
width: 24,
height: 24,
viewBox: "0 0 32 32",
ref: ref
}, props), React.createElement("path", {
d: "M11.13 6.89L11.8 4h8l-2 9H23l-2.49 3.25 1.43 1.43 3.87-5.07a1 1 0 0 0 .11-1A1 1 0 0 0 25 11H20.25L22 3.22a1 1 0 0 0-.2-.85A1 1 0 0 0 21 2H11a1 1 0 0 0-1 .77L9.46 5.22zM30 28.59l-9.31-9.31h0l-1.42-1.43h0L10.6 9.19h0L8.93 7.52h0L3.41 2 2 3.41 8.4 9.82 7 15.77A1 1 0 0 0 8 17h4.83L11 28.85a1 1 0 0 0 .6 1.07A1.09 1.09 0 0 0 12 30a1 1 0 0 0 .79-.39l6.68-8.73L28.59 30zM9.26 15l.81-3.52L13.59 15zm4.32 10.28L15 16.37 18 19.45z"
}), props.children);
});
module.exports = FlashOff24;
|
import importlib
from functools import wraps
from typing import Optional, Union
from unittest.mock import patch
from .utils.logging import get_logger
from .utils.patching import patch_submodule
from .utils.streaming_download_manager import (
xbasename,
xdirname,
xet_parse,
xglob,
xisdir,
xisfile,
xjoin,
xlistdir,
xopen,
xpandas_read_csv,
xpandas_read_excel,
xpathglob,
xpathjoin,
xpathname,
xpathopen,
xpathparent,
xpathrglob,
xpathstem,
xpathsuffix,
xwalk,
)
logger = get_logger(__name__)
def extend_module_for_streaming(module_path, use_auth_token: Optional[Union[str, bool]] = None):
"""Extend the module to support streaming.
We patch some functions in the module to use `fsspec` to support data streaming:
- We use `fsspec.open` to open and read remote files. We patch the module function:
- `open`
- We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module
functions:
- `os.path.join`
- `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator)
The patched functions are replaced with custom functions defined to work with the
:class:`~utils.streaming_download_manager.StreamingDownloadManager`.
Args:
module_path: Path to the module to be extended.
use_auth_token: Whether to use authentication token.
"""
module = importlib.import_module(module_path)
if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming:
return
def wrap_auth(function):
@wraps(function)
def wrapper(*args, **kwargs):
return function(*args, use_auth_token=use_auth_token, **kwargs)
wrapper._decorator_name_ = "wrap_auth"
return wrapper
# open files in a streaming fashion
patch_submodule(module, "open", wrap_auth(xopen)).start()
patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start()
patch_submodule(module, "os.walk", wrap_auth(xwalk)).start()
patch_submodule(module, "glob.glob", wrap_auth(xglob)).start()
# allow to navigate in remote zip files
patch_submodule(module, "os.path.join", xjoin).start()
patch_submodule(module, "os.path.dirname", xdirname).start()
patch_submodule(module, "os.path.basename", xbasename).start()
# allow checks on paths
patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start()
patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start()
if hasattr(module, "Path"):
patch.object(module.Path, "joinpath", xpathjoin).start()
patch.object(module.Path, "__truediv__", xpathjoin).start()
patch.object(module.Path, "open", wrap_auth(xpathopen)).start()
patch.object(module.Path, "glob", wrap_auth(xpathglob)).start()
patch.object(module.Path, "rglob", wrap_auth(xpathrglob)).start()
patch.object(module.Path, "parent", property(fget=xpathparent)).start()
patch.object(module.Path, "name", property(fget=xpathname)).start()
patch.object(module.Path, "stem", property(fget=xpathstem)).start()
patch.object(module.Path, "suffix", property(fget=xpathsuffix)).start()
patch_submodule(module, "pd.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start()
patch_submodule(module, "pd.read_excel", xpandas_read_excel, attrs=["__version__"]).start()
# xml.etree.ElementTree
for submodule in ["ElementTree", "ET"]:
patch_submodule(module, f"{submodule}.parse", wrap_auth(xet_parse)).start()
module._patched_for_streaming = True
|
// jshint ignore: start
(function() {
var browser = false;
var is_iOS = /iP(hone|od|ad)/.test(navigator.platform) && /Apple Computer/.test(navigator.vendor);
var isIE10 = (Function('/*@cc_on return document.documentMode===10@*/')());
var isIE11 = (!(window.ActiveXObject) && "ActiveXObject" in window);
var isChrome = /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor);
var isSafari = /Safari/.test(navigator.userAgent) && /Apple Computer/.test(navigator.vendor);
var isFirefox = /Mozilla/.test(navigator.userAgent) && navigator.vendor === '';
browser = (browser === false && ( is_iOS )) ? 'ios' : false;
browser = (browser === false && ( isIE10 )) ? 'ie10' : browser;
browser = (browser === false && ( isIE11 )) ? 'ie11' : browser;
browser = (browser === false && ( isChrome )) ? 'chrome' : browser;
browser = (browser === false && ( isSafari )) ? 'safari' : browser;
browser = (browser === false && ( isFirefox )) ? 'firefox' : browser;
var addBrowserClass = function(browser) {
$("html").addClass(browser);
};
var iOS_Browser = function(browser) {
var ver;
(function() {
var v = (navigator.appVersion).match(/OS (\d+)_(\d+)_?(\d+)?/);
ver = [parseInt(v[1], 10), parseInt(v[2], 10), parseInt(v[3] || 0, 10)];
})();
browser = "ios" + ver[0];
if (browser === 'ios7') {
window.addEventListener("resize", function() {
$('.entry').hide().fadeIn(1);
}, false);
// iOS 8 Simulator returns '10'
} else if (browser === 'ios10') {
browser = 'ios8';
}
addBrowserClass(browser);
};
var browserActions = {
'ie10': addBrowserClass,
'ie11': addBrowserClass,
'chrome': addBrowserClass,
'safari': addBrowserClass,
'firefox': addBrowserClass,
'ios': iOS_Browser,
};
function processBrowser(browser) {
if (typeof browserActions[browser] !== 'function') {
return;
}
//console.log('browser processed!');
//console.log('browser: ' + browser);
return browserActions[browser](browser);
};
processBrowser(browser);
}());
// jshint ignore: end |
// credit to: https://codingsans.com/blog/node-config-best-practices for config file structure
const env = process.env.NODE_ENV ? process.env.NODE_ENV : 'development';
const development = {
appName: "Node Console Boilerplate - Dev"
};
const staging = {
appName: "Node Console Boilerplate - Staging"
};
const production = {
appName: "Node Console Boilerplate - Production"
};
const config = {
development,
staging,
production
};
export default config[env]; |
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""End-to-end tests for tensorflow_hub."""
import os
import tarfile
import tempfile
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_hub import test_utils
from tensorflow_hub import tf_utils
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops.lookup_ops import index_to_string_table_from_file
# pylint: enable=g-direct-tensorflow-import
class End2EndTest(tf.test.TestCase):
def setUp(self):
super(End2EndTest, self).setUp()
# Set current directory to test temp directory where we can create
# files and serve them through the HTTP server.
os.chdir(self.get_temp_dir())
self.server_port = test_utils.start_http_server()
def _stateless_module_fn(self):
"""Simple module that squares an input."""
x = tf.compat.v1.placeholder(tf.int64)
y = x*x
hub.add_signature(inputs=x, outputs=y)
def _create_tgz(self, export_path, archive_name="test_module.tgz"):
os.chdir(export_path)
tar = tarfile.open(archive_name, "w")
for directory, subdirs, files in tf.compat.v1.gfile.Walk(export_path):
for subdir in subdirs:
tar.add(subdir)
for file_name in files:
full_path = os.path.join(directory, file_name)
tar.add(full_path[len(export_path)+1:])
tar.close()
def _generate_module(self):
module_export_path = os.path.join(self.get_temp_dir(), "module")
test_utils.export_module(module_export_path)
self._create_tgz(module_export_path)
def test_http_locations(self):
with tf.Graph().as_default():
self._generate_module()
m = hub.Module("http://localhost:%d/test_module.tgz" % self.server_port)
out = m(11)
with tf.compat.v1.Session() as sess:
self.assertAllClose(sess.run(out), 121)
# Test caching using custom filesystem (file://) to make sure that the
# TF Hub library can operate on such paths.
try:
root_dir = "file://%s" % self.get_temp_dir()
cache_dir = "%s_%s" % (root_dir, "cache")
tf.compat.v1.gfile.MakeDirs(cache_dir)
os.environ["TFHUB_CACHE_DIR"] = cache_dir
m = hub.Module("http://localhost:%d/test_module.tgz" % self.server_port)
out = m(11)
with tf.compat.v1.train.MonitoredSession() as sess:
self.assertAllClose(sess.run(out), 121)
cache_content = sorted(tf.compat.v1.gfile.ListDirectory(cache_dir))
logging.info("Cache context: %s", str(cache_content))
self.assertEqual(2, len(cache_content))
self.assertTrue(cache_content[1].endswith(".descriptor.txt"))
module_files = sorted(tf.compat.v1.gfile.ListDirectory(
os.path.join(cache_dir, cache_content[0])))
self.assertListEqual(
["assets", "saved_model.pb", "tfhub_module.pb", "variables"],
module_files)
finally:
os.unsetenv("TFHUB_CACHE_DIR")
def test_module_export_vocab_on_custom_fs(self):
root_dir = "file://%s" % self.get_temp_dir()
export_dir = "%s_%s" % (root_dir, "export")
tf.compat.v1.gfile.MakeDirs(export_dir)
# Create a module with a vocab file located on a custom filesystem.
vocab_dir = os.path.join(root_dir, "vocab_location")
tf.compat.v1.gfile.MakeDirs(vocab_dir)
vocab_filename = os.path.join(vocab_dir, "tokens.txt")
tf_utils.atomic_write_string_to_file(vocab_filename, "one", False)
def create_assets_module_fn():
def assets_module_fn():
indices = tf.compat.v1.placeholder(dtype=tf.int64, name="indices")
table = index_to_string_table_from_file(
vocabulary_file=vocab_filename, default_value="UNKNOWN")
outputs = table.lookup(indices)
hub.add_signature(inputs=indices, outputs=outputs)
return assets_module_fn
with tf.Graph().as_default():
assets_module_fn = create_assets_module_fn()
spec = hub.create_module_spec(assets_module_fn)
embedding_module = hub.Module(spec)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.tables_initializer())
embedding_module.export(export_dir, sess)
module_files = tf.compat.v1.gfile.ListDirectory(export_dir)
self.assertListEqual(
["assets", "saved_model.pb", "tfhub_module.pb", "variables"],
sorted(module_files))
module_files = tf.compat.v1.gfile.ListDirectory(os.path.join(export_dir,
"assets"))
self.assertListEqual(["tokens.txt"], module_files)
def test_resolve(self):
with tf.Graph().as_default():
self._generate_module()
module_dir = hub.resolve(
"http://localhost:%d/test_module.tgz" % self.server_port)
self.assertIn(tempfile.gettempdir(), module_dir)
module_files = sorted(tf.compat.v1.gfile.ListDirectory(module_dir))
self.assertEqual(
["assets", "saved_model.pb", "tfhub_module.pb", "variables"],
module_files)
def test_load(self):
if not hasattr(tf.compat.v1.saved_model, "load_v2"):
try:
hub.load("@my/tf2_module/2")
self.fail("Failure expected. hub.load() not supported in TF 1.x")
except NotImplementedError:
pass
elif tf.compat.v1.executing_eagerly():
class AdderModule(tf.train.Checkpoint):
@tf.function(
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def add(self, x):
return x + x + 1.
to_export = AdderModule()
save_dir = os.path.join(self.get_temp_dir(), "saved_model_v2")
tf.saved_model.save(to_export, save_dir)
module_name = "test_module_v2.tgz"
self._create_tgz(save_dir, module_name)
restored_module = hub.load(
"http://localhost:%d/%s" % (self.server_port, module_name))
self.assertIsNotNone(restored_module)
self.assertTrue(hasattr(restored_module, "add"))
def test_load_v1(self):
if (not hasattr(tf.compat.v1.saved_model, "load_v2") or
not tf.compat.v1.executing_eagerly()):
return # The test only applies when running V2 mode.
full_module_path = test_utils.get_test_data_path("half_plus_two_v1.tar.gz")
os.chdir(os.path.dirname(full_module_path))
server_port = test_utils.start_http_server()
handle = "http://localhost:%d/half_plus_two_v1.tar.gz" % server_port
hub.load(handle)
if __name__ == "__main__":
tf.test.main()
|
/**
* Arduino Mega with RAMPS v1.0, v1.1, v1.2 pin assignments
*/
#if !defined(__AVR_ATmega1280__) && !defined(__AVR_ATmega2560__)
#error Oops! Make sure you have 'Arduino Mega' selected from the 'Tools -> Boards' menu.
#endif
// Uncomment the following line for RAMPS v1.0
//#define RAMPS_V_1_0
#define X_STEP_PIN 26
#define X_DIR_PIN 28
#define X_ENABLE_PIN 24
#define X_MIN_PIN 3
#define X_MAX_PIN 2
#define Y_STEP_PIN 38
#define Y_DIR_PIN 40
#define Y_ENABLE_PIN 36
#define Y_MIN_PIN 16
#define Y_MAX_PIN 17
#define Z_STEP_PIN 44
#define Z_DIR_PIN 46
#define Z_ENABLE_PIN 42
#define Z_MIN_PIN 18
#define Z_MAX_PIN 19
#define E0_STEP_PIN 32
#define E0_DIR_PIN 34
#define E0_ENABLE_PIN 30
#define SDPOWER 48
#define SDSS 53
#define LED_PIN 13
#define PS_ON_PIN -1
#define KILL_PIN -1
#ifdef RAMPS_V_1_0 // RAMPS_V_1_0
#define HEATER_0_PIN 12 // RAMPS 1.0
#define HEATER_BED_PIN -1 // RAMPS 1.0
#define FAN_PIN 11 // RAMPS 1.0
#else // RAMPS_V_1_1 or RAMPS_V_1_2
#define HEATER_0_PIN 10 // RAMPS 1.1
#define HEATER_BED_PIN 8 // RAMPS 1.1
#define FAN_PIN 9 // RAMPS 1.1
#endif
#define HEATER_1_PIN -1
#define HEATER_2_PIN -1
#define TEMP_0_PIN 2 // MUST USE ANALOG INPUT NUMBERING NOT DIGITAL OUTPUT NUMBERING!!!!!!!!!
#define TEMP_1_PIN -1
#define TEMP_2_PIN -1
#define TEMP_BED_PIN 1 // MUST USE ANALOG INPUT NUMBERING NOT DIGITAL OUTPUT NUMBERING!!!!!!!!!
// SPI for Max6675 Thermocouple
#ifndef SDSUPPORT
#define MAX6675_SS 66// Do not use pin 53 if there is even the remote possibility of using Display/SD card
#else
#define MAX6675_SS 66// Do not use pin 49 as this is tied to the switch inside the SD card socket to detect if there is an SD card present
#endif
#ifndef SDSUPPORT
// these pins are defined in the SD library if building with SD support
#define SCK_PIN 52
#define MISO_PIN 50
#define MOSI_PIN 51
#endif
|
macDetailCallback("24a074000000/24",[{"d":"2014-10-01","t":"add","a":"1 Infinite Loop\nCupertino CA 95014\n\n","c":"UNITED STATES","o":"Apple"},{"d":"2015-09-01","t":"change","a":"1 Infinite Loop Cupertino CA US 95014","c":"US","o":"Apple, Inc."}]);
|
import React from "react";
import classnames from "classnames";
import { motion } from "framer-motion";
import "./filter.scss";
const filters = ["Branding", "Packaging", "Communication", "User Experience"];
const filterVariants = {
active: { y: 0, backgroundColor: "#333333" },
inActive: { y: 0, backgroundColor: "#DDDDDD" },
hover: { backgroundColor: "#DDDDDD" },
};
const Filter = ({ active, toggleActive }) => {
return (
<motion.div className="filter-container">
{filters.map((filter, index) => (
<motion.button
key={index}
className={classnames("filter-buttons", {
"active-filter": active.includes(filter),
})}
onClick={() => toggleActive(filter)}
variants={filterVariants}
// animate={active === filter ? 'active' : 'inActive'}
whileTap={{ y: 3 }}
whileHover="hover"
>
{filter}
</motion.button>
))}
</motion.div>
);
};
export { Filter };
|
const INITIAL_STATE = [];
export default function user(state = INITIAL_STATE, action) {
switch (action.type) {
case "@user/CREATE_USER":
return [...state, action.data];
default:
return state;
}
}
|
"""
Mujoco Maze environment.
Based on `models`_ and `rllab`_.
.. _models: https://github.com/tensorflow/models/tree/master/research/efficient-hrl
.. _rllab: https://github.com/rll/rllab
"""
import itertools as it
import os
import tempfile
import xml.etree.ElementTree as ET
from typing import Any, List, Optional, Tuple, Type
import gym
import numpy as np
from mujoco_maze import maze_env_utils, maze_task
from mujoco_maze.agent_model import AgentModel
# Directory that contains mujoco xml files.
MODEL_DIR = os.path.dirname(os.path.abspath(__file__)) + "/assets"
class MazeEnv(gym.Env):
def __init__(
self,
model_cls: Type[AgentModel],
maze_task: Type[maze_task.MazeTask] = maze_task.MazeTask,
include_position: bool = True,
maze_height: float = 0.5,
maze_size_scaling: float = 4.0,
inner_reward_scaling: float = 1.0,
restitution_coef: float = 0.8,
task_kwargs: dict = {},
websock_port: Optional[int] = None,
camera_move_x: Optional[float] = None,
camera_move_y: Optional[float] = None,
camera_zoom: Optional[float] = None,
image_shape: Tuple[int, int] = (600, 480),
**kwargs,
) -> None:
self.t = 0 # time steps
self._task = maze_task(maze_size_scaling, **task_kwargs)
self._maze_height = height = maze_height
self._maze_size_scaling = size_scaling = maze_size_scaling
self._inner_reward_scaling = inner_reward_scaling
self._observe_blocks = self._task.OBSERVE_BLOCKS
self._put_spin_near_agent = self._task.PUT_SPIN_NEAR_AGENT
# Observe other objectives
self._observe_balls = self._task.OBSERVE_BALLS
self._top_down_view = self._task.TOP_DOWN_VIEW
self._restitution_coef = restitution_coef
self._maze_structure = structure = self._task.create_maze()
# Elevate the maze to allow for falling.
self.elevated = any(maze_env_utils.MazeCell.CHASM in row for row in structure)
# Are there any movable blocks?
self.blocks = any(any(r.can_move() for r in row) for row in structure)
torso_x, torso_y = self._find_robot()
self._init_torso_x = torso_x
self._init_torso_y = torso_y
self._init_positions = [
(x - torso_x, y - torso_y) for x, y in self._find_all_robots()
]
if model_cls.MANUAL_COLLISION:
if model_cls.RADIUS is None:
raise ValueError("Manual collision needs radius of the model")
self._collision = maze_env_utils.CollisionDetector(
structure,
size_scaling,
torso_x,
torso_y,
model_cls.RADIUS,
)
self._objball_collision = maze_env_utils.CollisionDetector(
structure,
size_scaling,
torso_x,
torso_y,
self._task.OBJECT_BALL_SIZE,
)
else:
self._collision = None
self._xy_to_rowcol = lambda x, y: (
2 + (y + size_scaling / 2) / size_scaling,
2 + (x + size_scaling / 2) / size_scaling,
)
# walls (immovable), chasms (fall), movable blocks
self._view = np.zeros([5, 5, 3])
# Let's create MuJoCo XML
xml_path = os.path.join(MODEL_DIR, model_cls.FILE)
tree = ET.parse(xml_path)
worldbody = tree.find(".//worldbody")
height_offset = 0.0
if self.elevated:
# Increase initial z-pos of ant.
height_offset = height * size_scaling
torso = tree.find(".//body[@name='torso']")
torso.set("pos", f"0 0 {0.75 + height_offset:.2f}")
if self.blocks:
# If there are movable blocks, change simulation settings to perform
# better contact detection.
default = tree.find(".//default")
default.find(".//geom").set("solimp", ".995 .995 .01")
self.movable_blocks = []
self.object_balls = []
for i in range(len(structure)):
for j in range(len(structure[0])):
struct = structure[i][j]
if struct.is_robot() and self._put_spin_near_agent:
struct = maze_env_utils.MazeCell.SPIN
x, y = j * size_scaling - torso_x, i * size_scaling - torso_y
h = height / 2 * size_scaling
size = size_scaling * 0.5
if self.elevated and not struct.is_chasm():
# Create elevated platform.
ET.SubElement(
worldbody,
"geom",
name=f"elevated_{i}_{j}",
pos=f"{x} {y} {h}",
size=f"{size} {size} {h}",
type="box",
material="",
contype="1",
conaffinity="1",
rgba="0.9 0.9 0.9 1",
)
if struct.is_block():
# Unmovable block.
# Offset all coordinates so that robot starts at the origin.
ET.SubElement(
worldbody,
"geom",
name=f"block_{i}_{j}",
pos=f"{x} {y} {h + height_offset}",
size=f"{size} {size} {h}",
type="box",
material="",
contype="1",
conaffinity="1",
rgba="0.4 0.4 0.4 1",
)
elif struct.can_move():
# Movable block.
self.movable_blocks.append(f"movable_{i}_{j}")
_add_movable_block(
worldbody,
struct,
i,
j,
size_scaling,
x,
y,
h,
height_offset,
)
elif struct.is_object_ball():
# Movable Ball
self.object_balls.append(f"objball_{i}_{j}")
if model_cls.OBJBALL_TYPE == "hinge":
_add_objball_hinge(
worldbody,
i,
j,
x,
y,
self._task.OBJECT_BALL_SIZE,
)
elif model_cls.OBJBALL_TYPE == "freejoint":
_add_objball_freejoint(
worldbody,
i,
j,
x,
y,
self._task.OBJECT_BALL_SIZE,
)
else:
raise ValueError(
f"OBJBALL_TYPE is not registered for {model_cls}"
)
torso = tree.find(".//body[@name='torso']")
geoms = torso.findall(".//geom")
for geom in geoms:
if "name" not in geom.attrib:
raise Exception("Every geom of the torso must have a name")
# Set goals
for i, goal in enumerate(self._task.goals):
z = goal.pos[2] if goal.dim >= 3 else 0.0
if goal.custom_size is None:
size = f"{maze_size_scaling * 0.1}"
else:
size = f"{goal.custom_size}"
ET.SubElement(
worldbody,
"site",
name=f"goal_site{i}",
pos=f"{goal.pos[0]} {goal.pos[1]} {z}",
size=size,
rgba=goal.rgb.rgba_str(),
)
_, file_path = tempfile.mkstemp(text=True, suffix=".xml")
tree.write(file_path)
self.world_tree = tree
self.wrapped_env = model_cls(file_path=file_path, **kwargs)
self.observation_space = self._get_obs_space()
self._websock_port = websock_port
self._camera_move_x = camera_move_x
self._camera_move_y = camera_move_y
self._camera_zoom = camera_zoom
self._image_shape = image_shape
self._mj_offscreen_viewer = None
self._websock_server_pipe = None
@property
def has_extended_obs(self) -> bool:
return self._top_down_view or self._observe_blocks or self._observe_balls
def get_ori(self) -> float:
return self.wrapped_env.get_ori()
def _get_obs_space(self) -> gym.spaces.Box:
shape = self._get_obs().shape
high = np.inf * np.ones(shape, dtype=np.float32)
low = -high
# Set velocity limits
wrapped_obs_space = self.wrapped_env.observation_space
high[: wrapped_obs_space.shape[0]] = wrapped_obs_space.high
low[: wrapped_obs_space.shape[0]] = wrapped_obs_space.low
# Set coordinate limits
low[0], high[0], low[1], high[1] = self._xy_limits()
# Set orientation limits
return gym.spaces.Box(low, high)
def _xy_limits(self) -> Tuple[float, float, float, float]:
xmin, ymin, xmax, ymax = 100, 100, -100, -100
structure = self._maze_structure
for i, j in it.product(range(len(structure)), range(len(structure[0]))):
if structure[i][j].is_block():
continue
xmin, xmax = min(xmin, j), max(xmax, j)
ymin, ymax = min(ymin, i), max(ymax, i)
x0, y0 = self._init_torso_x, self._init_torso_y
scaling = self._maze_size_scaling
xmin, xmax = (xmin - 0.5) * scaling - x0, (xmax + 0.5) * scaling - x0
ymin, ymax = (ymin - 0.5) * scaling - y0, (ymax + 0.5) * scaling - y0
return xmin, xmax, ymin, ymax
def get_top_down_view(self) -> np.ndarray:
self._view = np.zeros_like(self._view)
def valid(row, col):
return self._view.shape[0] > row >= 0 and self._view.shape[1] > col >= 0
def update_view(x, y, d, row=None, col=None):
if row is None or col is None:
x = x - self._robot_x
y = y - self._robot_y
row, col = self._xy_to_rowcol(x, y)
update_view(x, y, d, row=row, col=col)
return
row, row_frac, col, col_frac = int(row), row % 1, int(col), col % 1
if row_frac < 0:
row_frac += 1
if col_frac < 0:
col_frac += 1
if valid(row, col):
self._view[row, col, d] += (
min(1.0, row_frac + 0.5) - max(0.0, row_frac - 0.5)
) * (min(1.0, col_frac + 0.5) - max(0.0, col_frac - 0.5))
if valid(row - 1, col):
self._view[row - 1, col, d] += (max(0.0, 0.5 - row_frac)) * (
min(1.0, col_frac + 0.5) - max(0.0, col_frac - 0.5)
)
if valid(row + 1, col):
self._view[row + 1, col, d] += (max(0.0, row_frac - 0.5)) * (
min(1.0, col_frac + 0.5) - max(0.0, col_frac - 0.5)
)
if valid(row, col - 1):
self._view[row, col - 1, d] += (
min(1.0, row_frac + 0.5) - max(0.0, row_frac - 0.5)
) * (max(0.0, 0.5 - col_frac))
if valid(row, col + 1):
self._view[row, col + 1, d] += (
min(1.0, row_frac + 0.5) - max(0.0, row_frac - 0.5)
) * (max(0.0, col_frac - 0.5))
if valid(row - 1, col - 1):
self._view[row - 1, col - 1, d] += (max(0.0, 0.5 - row_frac)) * max(
0.0, 0.5 - col_frac
)
if valid(row - 1, col + 1):
self._view[row - 1, col + 1, d] += (max(0.0, 0.5 - row_frac)) * max(
0.0, col_frac - 0.5
)
if valid(row + 1, col + 1):
self._view[row + 1, col + 1, d] += (max(0.0, row_frac - 0.5)) * max(
0.0, col_frac - 0.5
)
if valid(row + 1, col - 1):
self._view[row + 1, col - 1, d] += (max(0.0, row_frac - 0.5)) * max(
0.0, 0.5 - col_frac
)
# Draw ant.
robot_x, robot_y = self.wrapped_env.get_body_com("torso")[:2]
self._robot_x = robot_x
self._robot_y = robot_y
structure = self._maze_structure
size_scaling = self._maze_size_scaling
# Draw immovable blocks and chasms.
for i in range(len(structure)):
for j in range(len(structure[0])):
if structure[i][j].is_block(): # Wall.
update_view(
j * size_scaling - self._init_torso_x,
i * size_scaling - self._init_torso_y,
0,
)
if structure[i][j].is_chasm(): # Chasm.
update_view(
j * size_scaling - self._init_torso_x,
i * size_scaling - self._init_torso_y,
1,
)
# Draw movable blocks.
for block_name in self.movable_blocks:
block_x, block_y = self.wrapped_env.get_body_com(block_name)[:2]
update_view(block_x, block_y, 2)
return self._view
def _get_obs(self) -> np.ndarray:
wrapped_obs = self.wrapped_env._get_obs()
if self._top_down_view:
view = [self.get_top_down_view().flat]
else:
view = []
additional_obs = []
if self._observe_balls:
for name in self.object_balls:
additional_obs.append(self.wrapped_env.get_body_com(name))
if self._observe_blocks:
for name in self.movable_blocks:
additional_obs.append(self.wrapped_env.get_body_com(name))
obs = np.concatenate([wrapped_obs[:3]] + additional_obs + [wrapped_obs[3:]])
return np.concatenate([obs, *view, np.array([self.t * 0.001])])
def reset(self) -> np.ndarray:
self.t = 0
self.wrapped_env.reset()
# Samples a new goal
if self._task.sample_goals():
self.set_marker()
# Samples a new start position
if len(self._init_positions) > 1:
xy = np.random.choice(self._init_positions)
self.wrapped_env.set_xy(xy)
return self._get_obs()
def set_marker(self) -> None:
for i, goal in enumerate(self._task.goals):
idx = self.model.site_name2id(f"goal{i}")
self.data.site_xpos[idx][: len(goal.pos)] = goal.pos
def _render_image(self) -> np.ndarray:
self._mj_offscreen_viewer._set_mujoco_buffers()
self._mj_offscreen_viewer.render(*self._image_shape)
pixels = self._mj_offscreen_viewer.read_pixels(*self._image_shape, depth=False)
return np.asarray(pixels[::-1, :, :], dtype=np.uint8)
def _maybe_move_camera(self, viewer: Any) -> None:
from mujoco_py import const
if self._camera_move_x is not None:
viewer.move_camera(const.MOUSE_ROTATE_V, self._camera_move_x, 0.0)
if self._camera_move_y is not None:
viewer.move_camera(const.MOUSE_ROTATE_H, 0.0, self._camera_move_y)
if self._camera_zoom is not None:
viewer.move_camera(const.MOUSE_ZOOM, 0, self._camera_zoom)
def render(self, mode="human", **kwargs) -> Optional[np.ndarray]:
if mode == "human" and self._websock_port is not None:
if self._mj_offscreen_viewer is None:
from mujoco_py import MjRenderContextOffscreen as MjRCO
from mujoco_maze.websock_viewer import start_server
self._mj_offscreen_viewer = MjRCO(self.wrapped_env.sim)
self._maybe_move_camera(self._mj_offscreen_viewer)
self._websock_server_pipe = start_server(self._websock_port)
return self._websock_server_pipe.send(self._render_image())
else:
if self.wrapped_env.viewer is None:
self.wrapped_env.render(mode, **kwargs)
self._maybe_move_camera(self.wrapped_env.viewer)
return self.wrapped_env.render(mode, **kwargs)
@property
def action_space(self):
return self.wrapped_env.action_space
def _find_robot(self) -> Tuple[float, float]:
structure = self._maze_structure
size_scaling = self._maze_size_scaling
for i, j in it.product(range(len(structure)), range(len(structure[0]))):
if structure[i][j].is_robot():
return j * size_scaling, i * size_scaling
raise ValueError("No robot in maze specification.")
def _find_all_robots(self) -> List[Tuple[float, float]]:
structure = self._maze_structure
size_scaling = self._maze_size_scaling
coords = []
for i, j in it.product(range(len(structure)), range(len(structure[0]))):
if structure[i][j].is_robot():
coords.append((j * size_scaling, i * size_scaling))
return coords
def _objball_positions(self) -> None:
return [
self.wrapped_env.get_body_com(name)[:2].copy() for name in self.object_balls
]
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, dict]:
self.t += 1
if self.wrapped_env.MANUAL_COLLISION:
old_pos = self.wrapped_env.get_xy()
old_objballs = self._objball_positions()
inner_next_obs, inner_reward, _, info = self.wrapped_env.step(action)
new_pos = self.wrapped_env.get_xy()
new_objballs = self._objball_positions()
# Checks that the new_position is in the wall
collision = self._collision.detect(old_pos, new_pos)
if collision is not None:
pos = collision.point + self._restitution_coef * collision.rest()
if self._collision.detect(old_pos, pos) is not None:
# If pos is also not in the wall, we give up computing the position
self.wrapped_env.set_xy(old_pos)
else:
self.wrapped_env.set_xy(pos)
# Do the same check for object balls
for name, old, new in zip(self.object_balls, old_objballs, new_objballs):
collision = self._objball_collision.detect(old, new)
if collision is not None:
pos = collision.point + self._restitution_coef * collision.rest()
if self._objball_collision.detect(old, pos) is not None:
pos = old
idx = self.wrapped_env.model.body_name2id(name)
self.wrapped_env.data.xipos[idx][:2] = pos
else:
inner_next_obs, inner_reward, _, info = self.wrapped_env.step(action)
next_obs = self._get_obs()
inner_reward = self._inner_reward_scaling * inner_reward
outer_reward = self._task.reward(next_obs)
done = self._task.termination(next_obs)
info["position"] = self.wrapped_env.get_xy()
return next_obs, inner_reward + outer_reward, done, info
def close(self) -> None:
self.wrapped_env.close()
if self._websock_server_pipe is not None:
self._websock_server_pipe.send(None)
def _add_objball_hinge(
worldbody: ET.Element,
i: str,
j: str,
x: float,
y: float,
size: float,
) -> None:
body = ET.SubElement(worldbody, "body", name=f"objball_{i}_{j}", pos=f"{x} {y} 0")
mass = 0.0001 * (size ** 3)
ET.SubElement(
body,
"geom",
type="sphere",
name=f"objball_{i}_{j}_geom",
size=f"{size}", # Radius
pos=f"0.0 0.0 {size}", # Z = size so that this ball can move!!
rgba=maze_task.BLUE.rgba_str(),
contype="1",
conaffinity="1",
solimp="0.9 0.99 0.001",
mass=f"{mass}",
)
ET.SubElement(
body,
"joint",
name=f"objball_{i}_{j}_x",
axis="1 0 0",
pos="0 0 0",
type="slide",
)
ET.SubElement(
body,
"joint",
name=f"objball_{i}_{j}_y",
axis="0 1 0",
pos="0 0 0",
type="slide",
)
ET.SubElement(
body,
"joint",
name=f"objball_{i}_{j}_rot",
axis="0 0 1",
pos="0 0 0",
type="hinge",
limited="false",
)
def _add_objball_freejoint(
worldbody: ET.Element,
i: str,
j: str,
x: float,
y: float,
size: float,
) -> None:
body = ET.SubElement(worldbody, "body", name=f"objball_{i}_{j}", pos=f"{x} {y} 0")
ET.SubElement(
body,
"geom",
type="sphere",
name=f"objball_{i}_{j}_geom",
size=f"{size}", # Radius
pos=f"0.0 0.0 {size}", # Z = size so that this ball can move!!
rgba=maze_task.BLUE.rgba_str(),
contype="1",
conaffinity="1",
solimp="0.9 0.99 0.001",
)
ET.SubElement(body, "freejoint", name=f"objball_{i}_{j}_root")
def _add_movable_block(
worldbody: ET.Element,
struct: maze_env_utils.MazeCell,
i: str,
j: str,
size_scaling: float,
x: float,
y: float,
h: float,
height_offset: float,
) -> None:
falling = struct.can_move_z()
if struct.can_spin():
h *= 0.1
x += size_scaling * 0.25
shrink = 0.1
elif falling:
# The "falling" blocks are shrunk slightly and increased in mass to
# ensure it can fall easily through a gap in the platform blocks.
shrink = 0.99
elif struct.is_half_block():
shrink = 0.5
else:
shrink = 1.0
size = size_scaling * 0.5 * shrink
movable_body = ET.SubElement(
worldbody,
"body",
name=f"movable_{i}_{j}",
pos=f"{x} {y} {h}",
)
ET.SubElement(
movable_body,
"geom",
name=f"block_{i}_{j}",
pos="0 0 0",
size=f"{size} {size} {h}",
type="box",
material="",
mass="0.001" if falling else "0.0002",
contype="1",
conaffinity="1",
rgba="0.9 0.1 0.1 1",
)
if struct.can_move_x():
ET.SubElement(
movable_body,
"joint",
axis="1 0 0",
name=f"movable_x_{i}_{j}",
armature="0",
damping="0.0",
limited="true" if falling else "false",
range=f"{-size_scaling} {size_scaling}",
margin="0.01",
pos="0 0 0",
type="slide",
)
if struct.can_move_y():
ET.SubElement(
movable_body,
"joint",
armature="0",
axis="0 1 0",
damping="0.0",
limited="true" if falling else "false",
range=f"{-size_scaling} {size_scaling}",
margin="0.01",
name=f"movable_y_{i}_{j}",
pos="0 0 0",
type="slide",
)
if struct.can_move_z():
ET.SubElement(
movable_body,
"joint",
armature="0",
axis="0 0 1",
damping="0.0",
limited="true",
range=f"{-height_offset} 0",
margin="0.01",
name=f"movable_z_{i}_{j}",
pos="0 0 0",
type="slide",
)
if struct.can_spin():
ET.SubElement(
movable_body,
"joint",
armature="0",
axis="0 0 1",
damping="0.0",
limited="false",
name=f"spinable_{i}_{j}",
pos="0 0 0",
type="ball",
)
|
/* eslint valid-jsdoc: "off" */
'use strict';
/**
* @param {Egg.EggAppInfo} appInfo app info
*/
module.exports = appInfo => {
/**
* built-in config
* @type {Egg.EggAppConfig}
**/
const config = exports = {};
// use for cookie sign key, should change to your own and keep security
config.keys = appInfo.name + '_1571897381700_2913';
// add your middleware config here
config.middleware = [];
// add your user config here
const userConfig = {
// myAppName: 'egg',
};
return {
...config,
...userConfig,
};
};
|
/**
* @fileoverview
* @enhanceable
* @suppress {messageConventions} JS Compiler reports an error if a variable or
* field starts with 'MSG_' and isn't a translatable message.
* @public
*/
// GENERATED CODE -- DO NOT EDIT!
var jspb = require('google-protobuf');
var goog = jspb;
var global = Function('return this')();
var google_ads_googleads_v1_enums_campaign_shared_set_status_pb = require('../../../../../google/ads/googleads/v1/enums/campaign_shared_set_status_pb.js');
var google_protobuf_wrappers_pb = require('google-protobuf/google/protobuf/wrappers_pb.js');
var google_api_annotations_pb = require('../../../../../google/api/annotations_pb.js');
goog.exportSymbol('proto.google.ads.googleads.v1.resources.CampaignSharedSet', null, global);
/**
* Generated by JsPbCodeGenerator.
* @param {Array=} opt_data Optional initial data array, typically from a
* server response, or constructed directly in Javascript. The array is used
* in place and becomes part of the constructed object. It is not cloned.
* If no data is provided, the constructed object will be empty, but still
* valid.
* @extends {jspb.Message}
* @constructor
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet = function(opt_data) {
jspb.Message.initialize(this, opt_data, 0, -1, null, null);
};
goog.inherits(proto.google.ads.googleads.v1.resources.CampaignSharedSet, jspb.Message);
if (goog.DEBUG && !COMPILED) {
proto.google.ads.googleads.v1.resources.CampaignSharedSet.displayName = 'proto.google.ads.googleads.v1.resources.CampaignSharedSet';
}
if (jspb.Message.GENERATE_TO_OBJECT) {
/**
* Creates an object representation of this proto suitable for use in Soy templates.
* Field names that are reserved in JavaScript and will be renamed to pb_name.
* To access a reserved field use, foo.pb_<name>, eg, foo.pb_default.
* For the list of reserved names please see:
* com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.
* @param {boolean=} opt_includeInstance Whether to include the JSPB instance
* for transitional soy proto support: http://goto/soy-param-migration
* @return {!Object}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.toObject = function(opt_includeInstance) {
return proto.google.ads.googleads.v1.resources.CampaignSharedSet.toObject(opt_includeInstance, this);
};
/**
* Static version of the {@see toObject} method.
* @param {boolean|undefined} includeInstance Whether to include the JSPB
* instance for transitional soy proto support:
* http://goto/soy-param-migration
* @param {!proto.google.ads.googleads.v1.resources.CampaignSharedSet} msg The msg instance to transform.
* @return {!Object}
* @suppress {unusedLocalVariables} f is only used for nested messages
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.toObject = function(includeInstance, msg) {
var f, obj = {
resourceName: jspb.Message.getFieldWithDefault(msg, 1, ""),
campaign: (f = msg.getCampaign()) && google_protobuf_wrappers_pb.StringValue.toObject(includeInstance, f),
sharedSet: (f = msg.getSharedSet()) && google_protobuf_wrappers_pb.StringValue.toObject(includeInstance, f),
status: jspb.Message.getFieldWithDefault(msg, 2, 0)
};
if (includeInstance) {
obj.$jspbMessageInstance = msg;
}
return obj;
};
}
/**
* Deserializes binary data (in protobuf wire format).
* @param {jspb.ByteSource} bytes The bytes to deserialize.
* @return {!proto.google.ads.googleads.v1.resources.CampaignSharedSet}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.deserializeBinary = function(bytes) {
var reader = new jspb.BinaryReader(bytes);
var msg = new proto.google.ads.googleads.v1.resources.CampaignSharedSet;
return proto.google.ads.googleads.v1.resources.CampaignSharedSet.deserializeBinaryFromReader(msg, reader);
};
/**
* Deserializes binary data (in protobuf wire format) from the
* given reader into the given message object.
* @param {!proto.google.ads.googleads.v1.resources.CampaignSharedSet} msg The message object to deserialize into.
* @param {!jspb.BinaryReader} reader The BinaryReader to use.
* @return {!proto.google.ads.googleads.v1.resources.CampaignSharedSet}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.deserializeBinaryFromReader = function(msg, reader) {
while (reader.nextField()) {
if (reader.isEndGroup()) {
break;
}
var field = reader.getFieldNumber();
switch (field) {
case 1:
var value = /** @type {string} */ (reader.readString());
msg.setResourceName(value);
break;
case 3:
var value = new google_protobuf_wrappers_pb.StringValue;
reader.readMessage(value,google_protobuf_wrappers_pb.StringValue.deserializeBinaryFromReader);
msg.setCampaign(value);
break;
case 4:
var value = new google_protobuf_wrappers_pb.StringValue;
reader.readMessage(value,google_protobuf_wrappers_pb.StringValue.deserializeBinaryFromReader);
msg.setSharedSet(value);
break;
case 2:
var value = /** @type {!proto.google.ads.googleads.v1.enums.CampaignSharedSetStatusEnum.CampaignSharedSetStatus} */ (reader.readEnum());
msg.setStatus(value);
break;
default:
reader.skipField();
break;
}
}
return msg;
};
/**
* Serializes the message to binary data (in protobuf wire format).
* @return {!Uint8Array}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.serializeBinary = function() {
var writer = new jspb.BinaryWriter();
proto.google.ads.googleads.v1.resources.CampaignSharedSet.serializeBinaryToWriter(this, writer);
return writer.getResultBuffer();
};
/**
* Serializes the given message to binary data (in protobuf wire
* format), writing to the given BinaryWriter.
* @param {!proto.google.ads.googleads.v1.resources.CampaignSharedSet} message
* @param {!jspb.BinaryWriter} writer
* @suppress {unusedLocalVariables} f is only used for nested messages
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.serializeBinaryToWriter = function(message, writer) {
var f = undefined;
f = message.getResourceName();
if (f.length > 0) {
writer.writeString(
1,
f
);
}
f = message.getCampaign();
if (f != null) {
writer.writeMessage(
3,
f,
google_protobuf_wrappers_pb.StringValue.serializeBinaryToWriter
);
}
f = message.getSharedSet();
if (f != null) {
writer.writeMessage(
4,
f,
google_protobuf_wrappers_pb.StringValue.serializeBinaryToWriter
);
}
f = message.getStatus();
if (f !== 0.0) {
writer.writeEnum(
2,
f
);
}
};
/**
* optional string resource_name = 1;
* @return {string}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.getResourceName = function() {
return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, ""));
};
/** @param {string} value */
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.setResourceName = function(value) {
jspb.Message.setProto3StringField(this, 1, value);
};
/**
* optional google.protobuf.StringValue campaign = 3;
* @return {?proto.google.protobuf.StringValue}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.getCampaign = function() {
return /** @type{?proto.google.protobuf.StringValue} */ (
jspb.Message.getWrapperField(this, google_protobuf_wrappers_pb.StringValue, 3));
};
/** @param {?proto.google.protobuf.StringValue|undefined} value */
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.setCampaign = function(value) {
jspb.Message.setWrapperField(this, 3, value);
};
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.clearCampaign = function() {
this.setCampaign(undefined);
};
/**
* Returns whether this field is set.
* @return {!boolean}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.hasCampaign = function() {
return jspb.Message.getField(this, 3) != null;
};
/**
* optional google.protobuf.StringValue shared_set = 4;
* @return {?proto.google.protobuf.StringValue}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.getSharedSet = function() {
return /** @type{?proto.google.protobuf.StringValue} */ (
jspb.Message.getWrapperField(this, google_protobuf_wrappers_pb.StringValue, 4));
};
/** @param {?proto.google.protobuf.StringValue|undefined} value */
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.setSharedSet = function(value) {
jspb.Message.setWrapperField(this, 4, value);
};
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.clearSharedSet = function() {
this.setSharedSet(undefined);
};
/**
* Returns whether this field is set.
* @return {!boolean}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.hasSharedSet = function() {
return jspb.Message.getField(this, 4) != null;
};
/**
* optional google.ads.googleads.v1.enums.CampaignSharedSetStatusEnum.CampaignSharedSetStatus status = 2;
* @return {!proto.google.ads.googleads.v1.enums.CampaignSharedSetStatusEnum.CampaignSharedSetStatus}
*/
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.getStatus = function() {
return /** @type {!proto.google.ads.googleads.v1.enums.CampaignSharedSetStatusEnum.CampaignSharedSetStatus} */ (jspb.Message.getFieldWithDefault(this, 2, 0));
};
/** @param {!proto.google.ads.googleads.v1.enums.CampaignSharedSetStatusEnum.CampaignSharedSetStatus} value */
proto.google.ads.googleads.v1.resources.CampaignSharedSet.prototype.setStatus = function(value) {
jspb.Message.setProto3EnumField(this, 2, value);
};
goog.object.extend(exports, proto.google.ads.googleads.v1.resources);
|
// Basic Closure
class Basic {
getDate() { // Overpass Queryに付ける日付指定
let seldate = $("#Select_Date").val();
return seldate ? '[date:"' + (new Date(seldate)).toISOString() + '"]' : "";
}
formatDate(date, format) {
// date format
try {
format = format.replace(/YYYY/g, date.getFullYear());
format = format.replace(/YY/g, date.getFullYear().toString().slice(-2));
format = format.replace(/MM/g, ('0' + (date.getMonth() + 1)).slice(-2));
format = format.replace(/DD/g, ('0' + date.getDate()).slice(-2));
format = format.replace(/hh/g, ('0' + date.getHours()).slice(-2));
format = format.replace(/mm/g, ('0' + date.getMinutes()).slice(-2));
format = format.replace(/ss/g, ('0' + date.getSeconds()).slice(-2));
} catch {
format = "";
};
return format;
}
dataURItoBlob(dataURI) { // DataURIからBlobへ変換(ファイルサイズ2MB超過対応)
const b64 = atob(dataURI.split(',')[1]);
const u8 = Uint8Array.from(b64.split(""), function (e) { return e.charCodeAt() });
return new Blob([u8], { type: "image/png" });
}
concatTwoDimensionalArray(array1, array2, axis) { // 2次元配列の合成
if (axis != 1) axis = 0;
var array3 = [];
if (axis == 0) { // 縦方向の結合
array3 = array1.slice();
for (var i = 0; i < array2.length; i++) {
array3.push(array2[i]);
}
} else { // 横方向の結合
for (var i = 0; i < array1.length; i++) {
array3[i] = array1[i].concat(array2[i]);
};
};
return array3;
}
unicodeUnescape(str) { // \uxxx形式→文字列変換
let result = "", strs = str.match(/\\u.{4}/ig);
if (!strs) return '';
for (var i = 0, len = strs.length; i < len; i++) {
result += String.fromCharCode(strs[i].replace('\\u', '0x'));
};
return result;
}
uniq(array) {
let elems = new Map();
for (let elem of array) {
elems.set(elem, true); // 同じキーに何度も値を設定しても問題ない
};
return Array.from(elems.keys());
}
getWikipedia(lang, url) { // get wikipedia contents
return new Promise((resolve, reject) => {
let encurl = encodeURI(url);
encurl = "https://" + lang + "." + Conf.osm.wikipedia.api + encurl + "?origin=*";
console.log(encurl);
$.get({ url: encurl, dataType: "json" }, function (data) {
console.log(data.extract);
resolve([data.extract, data.thumbnail]);
});
});
}
isSmartPhone() {
if (window.matchMedia && window.matchMedia('(max-device-width: 640px)').matches) {
return true;
} else {
return false;
};
}
convLinkTag(url) {
return (/^(ftp|http|https):\/\/[^ "]+$/.test(url)) ? `<a href="${url}" target="_blank" rel="noopener noreferrer">${url}</a>` : "";
}
getStyleSheetValue(cssname, property) {
let element = document.querySelector(cssname);
if (!element || !property) return null;
let style = window.getComputedStyle(element);
return style.getPropertyValue(property);
}
async makeSHA256(text) {
const uint8 = new TextEncoder().encode(text);
const digest = await crypto.subtle.digest('SHA-256', uint8);
return Array.from(new Uint8Array(digest)).map(v => v.toString(16).padStart(2, '0')).join('');
}
// getLatLng(keyword, (address) => { return address }, () => { return [] }
};
|
glift.utilTest = function() {
module('glift.utilTest');
var util = glift.util;
test('typeOf test', function() {
deepEqual(util.typeOf({}), 'object', 'expect object');
});
test('inbounds', function() {
ok(util.inBounds(5, 19), '5 should be between 0 and 19');
});
test('inbounds', function() {
ok(!util.inBounds(22, 19), '22 should not be between 0 and 19');
});
test('outbounds', function() {
ok(util.outBounds(19, 19), 'Is 19 is out of bounds (inclusive)');
});
test('outbounds', function() {
ok(!util.outBounds(2, 19), '2 is within bounds');
});
test('Copy obj', function() {
var testObj = {
inner: {
foo: 'bar',
fizz: ['baz']
},
isTrue: true,
flam: 1234,
flag: function() { return 'fizzbizz'; }
};
var clone = glift.util.simpleClone(testObj);
ok(testObj !== clone);
deepEqual(clone, testObj);
});
};
|
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
var prefix = 'tb';
var iconName = 'chart-pie-3';
var width = 512;
var height = 512;
var ligatures = [];
var unicode = null;
var svgPathData = 'M 255.96094 42.660156 A 21.33 21.33 0 0 0 255.48633 42.666016 C 138.15419 42.92401 42.660153 138.5699 42.660156 255.96094 C 42.660156 373.51063 138.41124 469.25977 255.96094 469.25977 C 373.3376 469.25977 468.97261 373.79094 469.25391 256.47852 C 469.25393 256.47003 469.25389 256.46161 469.25391 256.45312 A 21.33 21.33 0 0 0 469.25977 255.96094 A 21.33 21.33 0 0 0 469.25391 255.48633 C 468.99629 138.32713 373.63236 42.946939 256.47852 42.666016 C 256.47003 42.665995 256.46161 42.666035 256.45312 42.666016 A 21.33 21.33 0 0 0 255.96094 42.660156 z M 234.63086 86.636719 L 234.63086 246.06641 L 112.84375 349.11719 C 95.431014 322.35992 85.320312 290.38072 85.320312 255.96094 C 85.320311 168.69306 150.267 97.080641 234.63086 86.636719 z M 277.28906 86.636719 C 354.66744 96.215009 415.70429 157.25252 425.2832 234.63086 L 277.28906 234.63086 L 277.28906 86.636719 z M 263.77344 277.28906 L 425.2832 277.28906 C 414.84038 361.6536 343.22947 426.59961 255.96094 426.59961 C 211.309 426.59961 170.75999 409.58998 140.40039 381.68359 L 263.77344 277.28906 z ';
exports.definition = {
prefix: prefix,
iconName: iconName,
icon: [
width,
height,
ligatures,
unicode,
svgPathData
]};
exports.tbChartPie3 = exports.definition;
exports.prefix = prefix;
exports.iconName = iconName;
exports.width = width;
exports.height = height;
exports.ligatures = ligatures;
exports.unicode = unicode;
exports.svgPathData = svgPathData; |
import { combineReducers } from 'redux';
import GameReducer from '../redux/reducers/GameReducer'
import RouteReducer from '../redux/reducers/RouteReducer'
export default combineReducers({
GameData: GameReducer,
Routes: RouteReducer
})
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 Ashley Whetter <[email protected]>
# Copyright (c) 2016 Yuri Bochkarev <[email protected]>
# Copyright (c) 2016 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Glenn Matthews <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2017 hippo91 <[email protected]>
# Copyright (c) 2017 Mitar <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Utility methods for docstring checking."""
from __future__ import absolute_import, print_function
import re
import astroid
from pylint.checkers import utils
def space_indentation(s):
"""The number of leading spaces in a string
:param str s: input string
:rtype: int
:return: number of leading spaces
"""
return len(s) - len(s.lstrip(' '))
def get_setters_property_name(node):
"""Get the name of the property that the given node is a setter for.
:param node: The node to get the property name for.
:type node: str
:rtype: str or None
:returns: The name of the property that the node is a setter for,
or None if one could not be found.
"""
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
if (isinstance(decorator, astroid.Attribute) and
decorator.attrname == "setter" and
isinstance(decorator.expr, astroid.Name)):
return decorator.expr.name
return None
def get_setters_property(node):
"""Get the property node for the given setter node.
:param node: The node to get the property for.
:type node: astroid.FunctionDef
:rtype: astroid.FunctionDef or None
:returns: The node relating to the property of the given setter node,
or None if one could not be found.
"""
property_ = None
property_name = get_setters_property_name(node)
class_node = utils.node_frame_class(node)
if property_name and class_node:
class_attrs = class_node.getattr(node.name)
for attr in class_attrs:
if utils.decorated_with_property(attr):
property_ = attr
break
return property_
def returns_something(return_node):
"""Check if a return node returns a value other than None.
:param return_node: The return node to check.
:type return_node: astroid.Return
:rtype: bool
:return: True if the return node returns a value other than None,
False otherwise.
"""
returns = return_node.value
if returns is None:
return False
return not (isinstance(returns, astroid.Const) and returns.value is None)
def possible_exc_types(node):
"""
Gets all of the possible raised exception types for the given raise node.
.. note::
Caught exception types are ignored.
:param node: The raise node to find exception types for.
:type node: astroid.node_classes.NodeNG
:returns: A list of exception types possibly raised by :param:`node`.
:rtype: list(str)
"""
excs = []
if isinstance(node.exc, astroid.Name):
inferred = utils.safe_infer(node.exc)
if inferred:
excs = [inferred.name]
elif (isinstance(node.exc, astroid.Call) and
isinstance(node.exc.func, astroid.Name)):
target = utils.safe_infer(node.exc.func)
if isinstance(target, astroid.ClassDef):
excs = [target.name]
elif isinstance(target, astroid.FunctionDef):
for ret in target.nodes_of_class(astroid.Return):
if ret.frame() != target:
# return from inner function - ignore it
continue
val = utils.safe_infer(ret.value)
if (val and isinstance(val, (astroid.Instance, astroid.ClassDef))
and utils.inherit_from_std_ex(val)):
excs.append(val.name)
elif node.exc is None:
handler = node.parent
while handler and not isinstance(handler, astroid.ExceptHandler):
handler = handler.parent
if handler and handler.type:
inferred_excs = astroid.unpack_infer(handler.type)
excs = (exc.name for exc in inferred_excs
if exc is not astroid.Uninferable)
try:
return set(exc for exc in excs if not utils.node_ignores_exception(node, exc))
except astroid.InferenceError:
return ()
def docstringify(docstring):
for docstring_type in [SphinxDocstring, EpytextDocstring,
GoogleDocstring, NumpyDocstring]:
instance = docstring_type(docstring)
if instance.is_valid():
return instance
return Docstring(docstring)
class Docstring(object):
re_for_parameters_see = re.compile(r"""
For\s+the\s+(other)?\s*parameters\s*,\s+see
""", re.X | re.S)
supports_yields = None
"""True if the docstring supports a "yield" section.
False if the docstring uses the returns section to document generators.
"""
# These methods are designed to be overridden
# pylint: disable=no-self-use
def __init__(self, doc):
doc = doc or ""
self.doc = doc.expandtabs()
def is_valid(self):
return False
def exceptions(self):
return set()
def has_params(self):
return False
def has_returns(self):
return False
def has_rtype(self):
return False
def has_property_returns(self):
return False
def has_property_type(self):
return False
def has_yields(self):
return False
def has_yields_type(self):
return False
def match_param_docs(self):
return set(), set()
def params_documented_elsewhere(self):
return self.re_for_parameters_see.search(self.doc) is not None
class SphinxDocstring(Docstring):
re_type = r"[\w\.]+"
re_simple_container_type = r"""
{type} # a container type
[\(\[] [^\n\s]+ [\)\]] # with the contents of the container
""".format(type=re_type)
re_xref = r"""
(?::\w+:)? # optional tag
`{0}` # what to reference
""".format(re_type)
re_param_raw = r"""
: # initial colon
(?: # Sphinx keywords
param|parameter|
arg|argument|
key|keyword
)
\s+ # whitespace
(?: # optional type declaration
({type}|{container_type})
\s+
)?
(\w+) # Parameter name
\s* # whitespace
: # final colon
""".format(type=re_type, container_type=re_simple_container_type)
re_param_in_docstring = re.compile(re_param_raw, re.X | re.S)
re_type_raw = r"""
:type # Sphinx keyword
\s+ # whitespace
({type}) # Parameter name
\s* # whitespace
: # final colon
""".format(type=re_type)
re_type_in_docstring = re.compile(re_type_raw, re.X | re.S)
re_property_type_raw = r"""
:type: # Sphinx keyword
\s+ # whitespace
{type} # type declaration
""".format(type=re_type)
re_property_type_in_docstring = re.compile(
re_property_type_raw, re.X | re.S
)
re_raise_raw = r"""
: # initial colon
(?: # Sphinx keyword
raises?|
except|exception
)
\s+ # whitespace
(?: # type declaration
({type})
\s+
)?
(\w+) # Parameter name
\s* # whitespace
: # final colon
""".format(type=re_type)
re_raise_in_docstring = re.compile(re_raise_raw, re.X | re.S)
re_rtype_in_docstring = re.compile(r":rtype:")
re_returns_in_docstring = re.compile(r":returns?:")
supports_yields = False
def is_valid(self):
return bool(self.re_param_in_docstring.search(self.doc) or
self.re_raise_in_docstring.search(self.doc) or
self.re_rtype_in_docstring.search(self.doc) or
self.re_returns_in_docstring.search(self.doc) or
self.re_property_type_in_docstring.search(self.doc))
def exceptions(self):
types = set()
for match in re.finditer(self.re_raise_in_docstring, self.doc):
raise_type = match.group(2)
types.add(raise_type)
return types
def has_params(self):
if not self.doc:
return False
return self.re_param_in_docstring.search(self.doc) is not None
def has_returns(self):
if not self.doc:
return False
return bool(self.re_returns_in_docstring.search(self.doc))
def has_rtype(self):
if not self.doc:
return False
return bool(self.re_rtype_in_docstring.search(self.doc))
def has_property_returns(self):
if not self.doc:
return False
# The summary line is the return doc,
# so the first line must not be a known directive.
return not self.doc.lstrip().startswith(':')
def has_property_type(self):
if not self.doc:
return False
return bool(self.re_property_type_in_docstring.search(self.doc))
def match_param_docs(self):
params_with_doc = set()
params_with_type = set()
for match in re.finditer(self.re_param_in_docstring, self.doc):
name = match.group(2)
params_with_doc.add(name)
param_type = match.group(1)
if param_type is not None:
params_with_type.add(name)
params_with_type.update(re.findall(self.re_type_in_docstring, self.doc))
return params_with_doc, params_with_type
class EpytextDocstring(SphinxDocstring):
"""
Epytext is similar to Sphinx. See the docs:
http://epydoc.sourceforge.net/epytext.html
http://epydoc.sourceforge.net/fields.html#fields
It's used in PyCharm:
https://www.jetbrains.com/help/pycharm/2016.1/creating-documentation-comments.html#d848203e314
https://www.jetbrains.com/help/pycharm/2016.1/using-docstrings-to-specify-types.html
"""
re_param_in_docstring = re.compile(
SphinxDocstring.re_param_raw.replace(':', '@', 1),
re.X | re.S)
re_type_in_docstring = re.compile(
SphinxDocstring.re_type_raw.replace(':', '@', 1),
re.X | re.S)
re_property_type_in_docstring = re.compile(
SphinxDocstring.re_property_type_raw.replace(':', '@', 1),
re.X | re.S)
re_raise_in_docstring = re.compile(
SphinxDocstring.re_raise_raw.replace(':', '@', 1),
re.X | re.S)
re_rtype_in_docstring = re.compile(r"""
@ # initial "at" symbol
(?: # Epytext keyword
rtype|returntype
)
: # final colon
""", re.X | re.S)
re_returns_in_docstring = re.compile(r"@returns?:")
def has_property_returns(self):
if not self.doc:
return False
# If this is a property docstring, the summary is the return doc.
if self.has_property_type():
# The summary line is the return doc,
# so the first line must not be a known directive.
return not self.doc.lstrip().startswith('@')
return False
class GoogleDocstring(Docstring):
re_type = SphinxDocstring.re_type
re_xref = SphinxDocstring.re_xref
re_container_type = r"""
(?:{type}|{xref}) # a container type
[\(\[] [^\n]+ [\)\]] # with the contents of the container
""".format(type=re_type, xref=re_xref)
re_multiple_type = r"""
(?:{container_type}|{type}|{xref})
(?:\s+or\s+(?:{container_type}|{type}|{xref}))*
""".format(type=re_type, xref=re_xref, container_type=re_container_type)
_re_section_template = r"""
^([ ]*) {0} \s*: \s*$ # Google parameter header
( .* ) # section
"""
re_param_section = re.compile(
_re_section_template.format(r"(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M
)
re_keyword_param_section = re.compile(
_re_section_template.format(r"Keyword\s(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M
)
re_param_line = re.compile(r"""
\s* \*{{0,2}}(\w+) # identifier potentially with asterisks
\s* ( [(]
{type}
[)] )? \s* : # optional type declaration
\s* (.*) # beginning of optional description
""".format(
type=re_multiple_type,
), re.X | re.S | re.M)
re_raise_section = re.compile(
_re_section_template.format(r"Raises"),
re.X | re.S | re.M
)
re_raise_line = re.compile(r"""
\s* ({type}) \s* : # identifier
\s* (.*) # beginning of optional description
""".format(type=re_type), re.X | re.S | re.M)
re_returns_section = re.compile(
_re_section_template.format(r"Returns?"),
re.X | re.S | re.M
)
re_returns_line = re.compile(r"""
\s* ({type}:)? # identifier
\s* (.*) # beginning of description
""".format(
type=re_multiple_type,
), re.X | re.S | re.M)
re_property_returns_line = re.compile(r"""
^{type}: # indentifier
\s* (.*) # Summary line / description
""".format(
type=re_multiple_type,
), re.X | re.S | re.M)
re_yields_section = re.compile(
_re_section_template.format(r"Yields?"),
re.X | re.S | re.M
)
re_yields_line = re_returns_line
supports_yields = True
def is_valid(self):
return bool(self.re_param_section.search(self.doc) or
self.re_raise_section.search(self.doc) or
self.re_returns_section.search(self.doc) or
self.re_yields_section.search(self.doc) or
self.re_property_returns_line.search(self._first_line()))
def has_params(self):
if not self.doc:
return False
return self.re_param_section.search(self.doc) is not None
def has_returns(self):
if not self.doc:
return False
entries = self._parse_section(self.re_returns_section)
for entry in entries:
match = self.re_returns_line.match(entry)
if not match:
continue
return_desc = match.group(2)
if return_desc:
return True
return False
def has_rtype(self):
if not self.doc:
return False
entries = self._parse_section(self.re_returns_section)
for entry in entries:
match = self.re_returns_line.match(entry)
if not match:
continue
return_type = match.group(1)
if return_type:
return True
return False
def has_property_returns(self):
# The summary line is the return doc,
# so the first line must not be a known directive.
first_line = self._first_line()
return not bool(self.re_param_section.search(first_line) or
self.re_raise_section.search(first_line) or
self.re_returns_section.search(first_line) or
self.re_yields_section.search(first_line))
def has_property_type(self):
if not self.doc:
return False
return bool(self.re_property_returns_line.match(self._first_line()))
def has_yields(self):
if not self.doc:
return False
entries = self._parse_section(self.re_yields_section)
for entry in entries:
match = self.re_yields_line.match(entry)
if not match:
continue
yield_desc = match.group(2)
if yield_desc:
return True
return False
def has_yields_type(self):
if not self.doc:
return False
entries = self._parse_section(self.re_yields_section)
for entry in entries:
match = self.re_yields_line.match(entry)
if not match:
continue
yield_type = match.group(1)
if yield_type:
return True
return False
def exceptions(self):
types = set()
entries = self._parse_section(self.re_raise_section)
for entry in entries:
match = self.re_raise_line.match(entry)
if not match:
continue
exc_type = match.group(1)
exc_desc = match.group(2)
if exc_desc:
types.add(exc_type)
return types
def match_param_docs(self):
params_with_doc = set()
params_with_type = set()
entries = self._parse_section(self.re_param_section)
entries.extend(self._parse_section(self.re_keyword_param_section))
for entry in entries:
match = self.re_param_line.match(entry)
if not match:
continue
param_name = match.group(1)
param_type = match.group(2)
param_desc = match.group(3)
if param_type:
params_with_type.add(param_name)
if param_desc:
params_with_doc.add(param_name)
return params_with_doc, params_with_type
def _first_line(self):
return self.doc.lstrip().split('\n', 1)[0]
@staticmethod
def min_section_indent(section_match):
return len(section_match.group(1)) + 1
def _parse_section(self, section_re):
section_match = section_re.search(self.doc)
if section_match is None:
return []
min_indentation = self.min_section_indent(section_match)
entries = []
entry = []
is_first = True
for line in section_match.group(2).splitlines():
if not line.strip():
continue
indentation = space_indentation(line)
if indentation < min_indentation:
break
# The first line after the header defines the minimum
# indentation.
if is_first:
min_indentation = indentation
is_first = False
if indentation == min_indentation:
# Lines with minimum indentation must contain the beginning
# of a new parameter documentation.
if entry:
entries.append("\n".join(entry))
entry = []
entry.append(line)
if entry:
entries.append("\n".join(entry))
return entries
class NumpyDocstring(GoogleDocstring):
_re_section_template = r"""
^([ ]*) {0} \s*?$ # Numpy parameters header
\s* [-=]+ \s*?$ # underline
( .* ) # section
"""
re_param_section = re.compile(
_re_section_template.format(r"(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M
)
re_param_line = re.compile(r"""
\s* (\w+) # identifier
\s* :
\s* (?:({type})(?:,\s+optional)?)? # optional type declaration
\n # description starts on a new line
\s* (.*) # description
""".format(
type=GoogleDocstring.re_multiple_type,
), re.X | re.S)
re_raise_section = re.compile(
_re_section_template.format(r"Raises"),
re.X | re.S | re.M
)
re_raise_line = re.compile(r"""
\s* ({type})$ # type declaration
\s* (.*) # optional description
""".format(type=GoogleDocstring.re_type), re.X | re.S | re.M)
re_returns_section = re.compile(
_re_section_template.format(r"Returns?"),
re.X | re.S | re.M
)
re_returns_line = re.compile(r"""
\s* ({type})$ # type declaration
\s* (.*) # optional description
""".format(
type=GoogleDocstring.re_multiple_type,
), re.X | re.S | re.M)
re_yields_section = re.compile(
_re_section_template.format(r"Yields?"),
re.X | re.S | re.M
)
re_yields_line = re_returns_line
supports_yields = True
@staticmethod
def min_section_indent(section_match):
return len(section_match.group(1))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-01 19:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yasp', '0008_auto_20160901_1606'),
]
operations = [
migrations.AlterField(
model_name='flatpage',
name='template_name',
field=models.CharField(blank=True, help_text="Exemplo: 'yasp/contact_page.html'. Se n\xe3o for informado, o sistema utilizar\xe1 o padr\xe3o 'yasp/default.html'.", max_length=70, verbose_name='nome do template'),
),
]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
logging.basicConfig(level=logging.ERROR)
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
sys.path.insert(0, top_dir)
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow import retry
from taskflow import task
# INTRO: In this example we create a retry controller that receives a phone
# directory and tries different phone numbers. The next task tries to call Jim
# using the given number. If it is not a Jim's number, the task raises an
# exception and retry controller takes the next number from the phone
# directory and retries the call.
#
# This example shows a basic usage of retry controllers in a flow.
# Retry controllers allows to revert and retry a failed subflow with new
# parameters.
class CallJim(task.Task):
def execute(self, jim_number):
print("Calling jim %s." % jim_number)
if jim_number != 555:
raise Exception("Wrong number!")
else:
print("Hello Jim!")
def revert(self, jim_number, **kwargs):
print("Wrong number, apologizing.")
# Create your flow and associated tasks (the work to be done).
flow = lf.Flow('retrying-linear',
retry=retry.ParameterizedForEach(
rebind=['phone_directory'],
provides='jim_number')).add(CallJim())
# Now run that flow using the provided initial data (store below).
taskflow.engines.run(flow, store={'phone_directory': [333, 444, 555, 666]})
|
const { MessageEmbed, MessageCollector } = require("discord.js");
const { MinakoError } = require("../../utils/errors");
const { sendCharacterEmbed } = require("./embeds/CharacterEmbed");
const { searchCharacter, fetchCharacter } = require("./functions/FetchAnime");
exports.run = async(message, args) => {
if (args.length == 1) return MinakoError.global.commandInvalidArguments(message, "animecharacter", "`Character Name`", "`Reimu Hakurei`");
args.splice(0, 1);
const searchResult = searchCharacter(args.join(" "));
searchResult.then(result => {
if (result.length == 0) return MinakoError.anime.notFoundSearch(message, args.join(" "));
let resultSize = result.length;
if (resultSize > 10) resultSize = 10;
let filterArgs = []
const embed = new MessageEmbed()
.setTitle("Character search results for " + `"${args.join(" ")}"`)
.setDescription("Type the number to select the character\n\n")
.setColor('#D9A0F3')
.setFooter("You have 10s to select")
for (let i = 0; i < resultSize; i++) {
filterArgs.push(new String(i + 1).valueOf())
embed.description = embed.description.concat(`**${i+1}** :small_blue_diamond: __${result[i].name}__\n\n`)
}
const filter = (m) => m.content;
const collector = new MessageCollector(message.channel, filter, { time: 10000, max: 1 });
let collected = false;
collector.on('collect', m => {
if (!filterArgs.includes(m.content)) return;
collected = true;
const index = new Number(m.content);
return fetchCharacter(result[index - 1].id).then(character => {
sendCharacterEmbed(character, message)
})
}).on("end", () => {
if (collected == true) return;
message.channel.send(`${message.author} | You kept me waiting too long. This is not polite at all! Run the command again to use it.`)
})
message.channel.send(embed)
})
} |
# -*- coding: utf-8 -*-
"""
.. module:: organizations
"""
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.utils.text import slugify
from django.views.generic import View
from apps.volontulo.forms import VolounteerToOrganizationContactForm
from apps.volontulo.lib.email import send_mail
from apps.volontulo.models import Offer
from apps.volontulo.models import Organization
from apps.volontulo.models import UserProfile
from apps.volontulo.utils import correct_slug
def organizations_list(request):
"""View responsible for listing all organizations.
:param request: WSGIRequest instance
"""
organizations = Organization.objects.all()
return render(
request,
"organizations/list.html",
{'organizations': organizations},
)
class OrganizationsCreate(View):
"""Class view supporting creation of new organization."""
@staticmethod
@login_required
def get(request):
"""Method responsible for rendering form for new organization."""
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
@staticmethod
@login_required
def post(request):
"""Method responsible for saving new organization."""
if not (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
messages.error(
request,
"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': Organization()}
)
organization = Organization(
name=request.POST.get('name'),
address=request.POST.get('address'),
description=request.POST.get('description'),
)
organization.save()
request.user.userprofile.organizations.add(organization)
messages.success(
request,
"Organizacja została dodana."
)
return redirect(
'organization_view',
slug=slugify(organization.name),
id_=organization.id,
)
@correct_slug(Organization, 'organization_form', 'name')
@login_required
def organization_form(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for editing organization.
Edition will only work, if logged user has been registered as organization.
"""
org = Organization.objects.get(pk=id_)
users = [profile.user.email for profile in org.userprofiles.all()]
if (
request.user.is_authenticated() and
request.user.email not in users
):
messages.error(
request,
"Nie masz uprawnień do edycji tej organizacji."
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
if not (
request.user.is_authenticated() and
UserProfile.objects.get(user=request.user).organizations
):
return redirect(settings.ANGULAR_ROOT)
if request.method == 'POST':
if (
request.POST.get('name') and
request.POST.get('address') and
request.POST.get('description')
):
org.name = request.POST.get('name')
org.address = request.POST.get('address')
org.description = request.POST.get('description')
org.save()
messages.success(
request,
"Oferta została dodana/zmieniona."
)
return redirect(
reverse(
'organization_view',
args=[slugify(org.name), org.id]
)
)
else:
messages.error(
request,
"Należy wypełnić wszystkie pola formularza."
)
return render(
request,
"organizations/organization_form.html",
{'organization': org},
)
@correct_slug(Organization, 'organization_view', 'name')
def organization_view(request, slug, id_): # pylint: disable=unused-argument
"""View responsible for viewing organization."""
org = get_object_or_404(Organization, id=id_)
offers = Offer.objects.filter(organization_id=id_)
allow_contact = True
allow_edit = False
allow_offer_create = False
if (
request.user.is_authenticated() and
request.user.userprofile in org.userprofiles.all()
):
allow_contact = False
allow_edit = True
allow_offer_create = True
if request.method == 'POST':
form = VolounteerToOrganizationContactForm(request.POST)
if form.is_valid():
send_mail(
request,
'volunteer_to_organisation',
[
userprofile.user.email
for userprofile in org.userprofiles.all()
],
{k: v for k, v in request.POST.items()},
)
messages.success(request, "Email został wysłany.")
else:
messages.error(
request,
"Formularz zawiera nieprawidłowe dane: {errors}".format(
errors=form.errors
)
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': form,
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
},
)
return render(
request,
"organizations/organization_view.html",
{
'organization': org,
'contact_form': VolounteerToOrganizationContactForm(),
'offers': offers,
'allow_contact': allow_contact,
'allow_edit': allow_edit,
'allow_offer_create': allow_offer_create,
}
)
|
import React, { Component } from 'react';
import CourseLibrary from './courseLibrary';
import Schedule from './schedule';
export default class App extends Component {
render() {
return (
<div className="main-container">
<div className="library">
<h1 className="library__header">Course Library</h1>
<CourseLibrary />
</div>
<div className="schedule">
<h1 className="schedule__header">Schedule</h1>
<Schedule />
</div>
</div>
);
}
}
|
import qutip as qt
import numpy as np
import scipy
from scipy import constants
from scipy.linalg import expm, sinm, cosm
import itertools
import matplotlib.pyplot as plt
pi = np.pi
e = constants.e
h = constants.h
hbar = constants.hbar
ep0 = constants.epsilon_0
mu0 = constants.mu_0
Phi0 = h/(2*e)
kb = constants.Boltzmann
def ket(Nq, i):
return qt.basis(Nq, i)
def ket_2Qsys(i, j, Nq1, Nq2):
a = ket(Nq1, i)
b = ket(Nq2, j)
return qt.tensor(a, b)
def ket_3Qsys(i, j, k, Nq1, Nq2, Nqc):
a = ket(Nq1, i)
b = ket(Nq2, j)
c = ket(Nqc, k)
return qt.tensor(a, b, c)
def Bose_distributon(f, T):
f = f * 1e9
if T==0:
res = 0
else:
res = ( np.exp(h*f/(kb*T)) - 1 )**(-1)
return res
def Maxwell_Boltzmann_distributon(fs, T, N):
if N < 1:
print('N should be >=1')
# f1 = fs[1] * 1e9
# a = np.exp(-f1*h/(kb*T))
Z = 0
for i in range(len(fs[0:N])):
fi = fs[i] * 1e9
Z = Z + np.exp(-fi*h/(kb*T))
pops = [np.exp(-fs[i]*1e9*h/(kb*T))/Z for i in range(len(fs[0:N]))]
return pops
######### N-level paulis #########
def pI_N(Nq):
return ket(Nq, 0) * ket(Nq, 0).dag() + ket(Nq, 1) * ket(Nq, 1).dag()
def pX_N(Nq):
return ket(Nq, 0) * ket(Nq, 1).dag() + ket(Nq, 1) * ket(Nq, 0).dag()
def pY_N(Nq):
return 1j*ket(Nq, 0) * ket(Nq, 1).dag() - 1j*ket(Nq, 1) * ket(Nq, 0).dag()
def pZ_N(Nq):
return ket(Nq, 0) * ket(Nq, 0).dag() - ket(Nq, 1) * ket(Nq, 1).dag()
def Rarb(p:list, Nq):
SX, SY, SZ = pX_N(Nq), pY_N(Nq), pZ_N(Nq)
return ( -1j*(p[0]*SX + p[1]*SY + p[2]*SZ)/2 ).expm()
def Rarb2Q(p:list, Nq):
R1 = Rarb(p[0:3], Nq)
R2 = Rarb(p[3:6], Nq)
res = qt.tensor(R1, R2)
return res
def iniState1Qsys(Nq:int, s:int, mode='ket'):
q1 = ket(Nq, s)
psi0 = q1
if mode == 'rho':
ini = psi0*psi0.dag()
else:
ini = psi0
return ini
def iniState1Q1Rsys(Nq:int, Nf:int, s:int, t:int, mode='ket'):
q1 = ket(Nq, s)
r1 = qt.fock(Nf, t)
psi0 = qt.tensor(q1, r1)
if mode == 'rho':
ini = psi0*psi0.dag()
else:
ini = psi0
return ini
def iniState2Qsys(Nq1:int, Nq2:int, s1:int, s2:int, mode='ket'):
q1 = ket(Nq1, s1)
q2 = ket(Nq2, s2)
psi0 = qt.tensor(q1, q2)
if mode == 'rho':
ini = psi0*psi0.dag()
else:
ini = psi0
return ini
class transmon():
def __init__(self, EC=10, EJ=0.3, f01=None, alpha=None, N=10, Nq=3, Temp=20e-3):
# Unit in [GHz]
if f01 is None and alpha is None:
self.EC = EC
self.EJ = EJ
self.N = N
self.enes = self.calcChargeQubitLevels(self.EC, self.EJ, self.N)
self.f01 = self.enes[1]
self.anh = self.enes[2] - 2*self.enes[1]
else:
self.f01 = f01
self.anh = - abs(alpha)
self.enes = [0] + [i*self.f01 + (i-1)*self.anh for i in range(1, N)]
self.nth_q = Maxwell_Boltzmann_distributon(self.enes, Temp, N)
self.P0 = iniState1Qsys(Nq, 0, mode='rho')
self.P1 = iniState1Qsys(Nq, 1, mode='rho')
# Thermal_state_ket = 0
# for i in range(Nq):
# Thermal_state_ket += ket(Nq, i)*self.nth_q[i]
# self.Thermal_state_ket = Thermal_state_ket
# self.Thermal_state_dm = self.Thermal_state_ket*self.Thermal_state_ket.dag()
self.Thermal_state_dm = qt.thermal_dm(Nq, self.nth_q[1])
if Nq != None:
self.Q_duffingOscillator(Nq)
def chargeQubitHamilonian(self, Ec, Ej, N, ng):
"""
Return the charge qubit hamiltonian as a Qobj instance.
Parameters
----------
Ec : float
unit is [GHz]
Ej : float
unit is [GHz]
N : int
Difference in the number of Cooper pairs
ng : float:
Voltage bias for Charge qubit.
Returns
-------
qobj of Charge qubit hamiltonian.
"""
m = np.diag(4 * Ec * (np.arange(-N,N+1)-ng)**2) + 0.5 * Ej * (np.diag(-np.ones(2*N), 1) +
np.diag(-np.ones(2*N), -1))
return qt.Qobj(m)
def Q_duffingOscillator(self, Nq=5):
self.Nq = Nq
Iq = qt.qeye(Nq)
b = qt.destroy(Nq)
nb = b.dag()*b
self.X = pX_N(Nq)
self.Y = pY_N(Nq)
self.Z = pZ_N(Nq)
self.Iq = Iq
self.nb = nb
self.b = b
q1_lab = self.f01 * nb + 0.5 * self.anh * nb * (nb - Iq)
self.Hqlab = q1_lab
return q1_lab
def calcChargeQubitLevels(self, EC, EJ, N):
"""
Return the list of charge qubit eigen energy at flux optimal point.
Parameters
----------
Ec : float
unit is [GHz]
Ej : float
unit is [GHz]
N : int
Difference in the number of Cooper pairs
Returns
-------
list of eigenenergies.
"""
ng_vec = [0]
ene = np.array([self.chargeQubitHamilonian(EC, EJ, N, ng).eigenenergies() for ng in ng_vec])
return [ene[0][i] - ene[0][0] for i in range(len(ene[0])) ]
class resonator():
def __init__(self, fr, Qc, Nf=5, Temp=20e-3):
self.fr = fr
self.Nf = Nf
self.Qc = Qc
self.a = qt.destroy(Nf)
self.ad = self.a.dag()
self.na = self.ad * self.a
self.Hr = fr * self.na
self.Ir = qt.qeye(Nf)
self.kappa = fr/Qc
self.nth_a = Bose_distributon(fr, Temp)
self.Thermal_state_dm = qt.thermal_dm(Nf, self.nth_a)
class QR():
def __init__(self, Q, fr, g):
# Unit in [GHz]
self.fr = fr
self.g = g
self.Q = Q
self.detuning = Q.f01 - fr
self.thermal_photon = qt.utilities.n_thermal(fr, Q.f01)
self.f01_dressed = Q.f01 + ( 2 * (g**2) / self.detuning ) * ( self.thermal_photon + 1/2 )
self.X = ((g**2)/self.detuning)*(Q.anh/(Q.f01+Q.anh-fr))
class QRQ():
def __init__(self, Q1, Q2, frb, g1, g2):
# Unit in [GHz]
self.frb = frb
self.g1 = g1
self.g2 = g2
self.Q1 = Q1
self.Q1 = Q2
self.detuning1 = Q1.f01 - frb
self.thermal_photon1 = qt.utilities.n_thermal(frb, Q1.f01)
self.f01_dressed1 = Q1.f01 + ( 2 * (g1**2) / self.detuning1 ) * ( self.thermal_photon1 + 1/2 )
self.X1 = ((g1**2)/self.detuning1)*(Q1.anh/(Q1.f01+Q1.anh-frb))
self.detuning2 = Q2.f01 - frb
self.thermal_photon2 = qt.utilities.n_thermal(frb, Q2.f01)
self.f01_dressed2 = Q2.f01 + ( 2 * (g2**2) / self.detuning2 ) * ( self.thermal_photon2 + 1/2 )
self.X2 = ((g2**2)/self.detuning2)*(Q2.anh/(Q2.f01+Q2.anh-frb))
self.D12 = self.f01_dressed1 - self.f01_dressed2
self.J = g1*g2*( self.detuning1 + self.detuning2 ) / ( 2 * self.detuning1 * self.detuning2 )
self.f01_coupled1 = self.f01_dressed1 + (self.J**2)/self.D12
self.f01_coupled2 = self.f01_dressed2 - (self.J**2)/self.D12
class QQ():
# For direct coupling simulation
def __init__(self, Q1, Q2, g12):
# duffing oscillator model
# Unit in [GHz]
self.g12 = g12
self.Q1 = Q1
self.Q2 = Q2
self.Nq1, self.Nq2 = Q1.Nq, Q2.Nq
iq1, iq2 = qt.qeye(self.Nq1), qt.qeye(self.Nq2)
b1, b2 = qt.destroy(self.Nq1), qt.destroy(self.Nq2)
self.b1, self.b2 = b1, b2
self.iq1, self.iq2 = iq1, iq2
self.nb1, self.nb2 = self.b1.dag()*self.b1, self.b2.dag()*self.b2
self.B1 = qt.tensor(b1, iq2)
self.B2 = qt.tensor(iq1, b2)
self.Iq1 = qt.tensor(iq1, iq2)
self.Iq2 = qt.tensor(iq1, iq2)
self.Nb1 = self.B1.dag()*self.B1
self.Nb2 = self.B2.dag()*self.B2
# Drive term @rotating frame
self.Hd1_real = self.B1 + self.B1.dag()
self.Hd1_imag = (- self.B1 + self.B1.dag())*1j
self.Hd2_real = (self.B2 + self.B2.dag())
self.Hd2_imag = (- self.B2 + self.B2.dag())*1j
self.X1 = qt.tensor(pX_N(self.Nq1), iq2)
self.Y1 = qt.tensor(pY_N(self.Nq1), iq2)
self.Z1 = qt.tensor(pZ_N(self.Nq1), iq2)
self.X2 = qt.tensor(iq1, pX_N(self.Nq2))
self.Y2 = qt.tensor(iq1, pY_N(self.Nq2))
self.Z2 = qt.tensor(iq1, pZ_N(self.Nq2))
bbbb1 = self.B1.dag()*self.B1.dag()*self.B1*self.B1
bbbb2 = self.B2.dag()*self.B2.dag()*self.B2*self.B2
self.duff_part1 = 0.5 * self.Q1.anh * self.Nb1 * (self.Nb1 - self.Iq1) # 0.5 * Q1.anh * bbbb1
self.duff_part2 = 0.5 * self.Q2.anh * self.Nb2 * (self.Nb2 - self.Iq2) # 0.5 * Q2.anh * bbbb2
self.Hq1 = Q1.f01 * self.Nb1 + self.duff_part1 # - self.Iq1*0
self.Hq2 = Q2.f01 * self.Nb2 + self.duff_part2 # - self.Iq2*0
self._int12 = self.B1*self.B2.dag() + self.B1.dag()*self.B2
self.Hint12 = g12*(self._int12)
self.Hint = self.Hint12
self.Hlab = self.Hq1 + self.Hq2 + self.Hint
self.calcStaticZZ(self.Hlab)
self.fd1 = self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['00']]
self.fd2 = self.eigenlevels[0][self.keys['01']] - self.eigenlevels[0][self.keys['00']]
# ref : https://doi.org/10.1103/PhysRevApplied.12.054023
self.staticZZ = self.eigenlevels[0][self.keys['11']] - self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['01']]
def dressedEnergyLevels(self, H=None):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
if H == None:
eigenlevels = self.Hlab.eigenstates()
else:
eigenlevels = H.eigenstates()
keys = {}
for i in range(Nq):
for j in range(Nq):
k = ket_2Qsys(i, j, Nq, Nq)
e = np.abs([(k.dag() * eigenlevels[1])[i].tr() for i in range(Nq**2)])
index = np.argmax(e)
keys['{}{}'.format(i, j)] = index
self.keys = keys
self.eigenlevels = eigenlevels
def plotDressedEnergyLevels(self, figname=1):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
d = self.keys
enes = self.eigenlevels
plt.figure(figname, dpi=150)
plt.title(r'$|Q1, Q2\rangle$')
for i in range(Nq):
for j in range(Nq):
key = '{}{}'.format(i,j)
if key == '22':
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t)
plt.text(s, ene+0.4, '|'+key+'>'+':{:.4f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
def toRotFrameHamiltonian(self, fd, Scl=0, target=0):
q1_rot = (self.Q1.f01-fd) * self.nb1 + 0.5 * self.Q1.anh * self.nb1 * (self.nb1 - self.iq1)
q2_rot = (self.Q2.f01-fd) * self.nb2 + 0.5 * self.Q2.anh * self.nb2 * (self.nb2 - self.iq2)
self.Hqrot = qt.tensor(q1_rot, self.iq2) + qt.tensor(self.iq1, q2_rot)
if target == 0:
Hdrive = self.B1 + self.B1.dag()
else:
Hdrive = self.B2 + self.B2.dag()
return self.Hqrot + self.Hint + Scl * Hdrive
def calcStaticZZ(self, H):
self.dressedEnergyLevels(H=H)
self.staticZZ = self.eigenlevels[0][self.keys['11']] - self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['01']]
return self.staticZZ
class QQQ():
# For tunable coupling simulation
def __init__(self, Q1, Q2, Qc, gc1, gc2, g12):
# duffing oscillator model
# Unit in [GHz]
self.gc1 = gc1
self.gc2 = gc2
self.g12 = g12
self.Q1 = Q1
self.Q2 = Q2
self.Qc = Qc
self.Nq1, self.Nq2, self.Nqc = Q1.Nq, Q2.Nq, Qc.Nq
iq1, iq2, iqc = qt.qeye(self.Nq1), qt.qeye(self.Nq2), qt.qeye(self.Nqc)
b1, b2, bc = qt.destroy(self.Nq1), qt.destroy(self.Nq2), qt.destroy(self.Nqc)
self.B1 = qt.tensor(b1, iq2, iqc)
self.B2 = qt.tensor(iq1, b2, iqc)
self.Bc = qt.tensor(iq1, iq2, bc)
self.Iq1 = qt.tensor(iq1, iq2, iqc)
self.Iq2 = qt.tensor(iq1, iq2, iqc)
self.Iqc = qt.tensor(iq1, iq2, iqc)
self.Nb1 = self.B1.dag()*self.B1
self.Nb2 = self.B2.dag()*self.B2
self.Nbc = self.Bc.dag()*self.Bc
bbbb1 = self.B1.dag()*self.B1.dag()*self.B1*self.B1
bbbb2 = self.B2.dag()*self.B2.dag()*self.B2*self.B2
bbbbc = self.Bc.dag()*self.Bc.dag()*self.Bc*self.Bc
self.duff_part1 = 0.5 * self.Q1.anh * self.Nb1 * (self.Nb1 - self.Iq1) # 0.5 * Q1.anh * bbbb1
self.duff_part2 = 0.5 * self.Q2.anh * self.Nb2 * (self.Nb2 - self.Iq2) # 0.5 * Q2.anh * bbbb2
self.duff_partc = 0.5 * self.Qc.anh * self.Nbc * (self.Nbc - self.Iqc) # 0.5 * Qc.anh * bbbbc
self.Hq1 = Q1.f01 * self.Nb1 + self.duff_part1 # - self.Iq1*0
self.Hq2 = Q2.f01 * self.Nb2 + self.duff_part2 # - self.Iq2*0
self.Hqc = Qc.f01 * self.Nbc + self.duff_partc # - self.Iqc*0
self._intc1 = self.B1*self.Bc.dag() + self.B1.dag()*self.Bc
self._intc2 = self.B2*self.Bc.dag() + self.B2.dag()*self.Bc
self._int12 = self.B1*self.B2.dag() + self.B1.dag()*self.B2
# self._intc1 = (self.B1 + self.B1.dag())*(self.Bc + self.Bc.dag())
# self._intc2 = (self.B2 + self.B2.dag())*(self.Bc + self.Bc.dag())
# self._int12 = (self.B1 + self.B1.dag())*(self.B2 + self.B2.dag())
self.Hintc1 = gc1*self._intc1
self.Hintc2 = gc2*self._intc2
self.Hint12 = g12*self._int12
self.Hint = self.Hintc1 + self.Hintc2 + self.Hint12
self.Hlab = self.Hq1 + self.Hq2 + self.Hqc + self.Hint
self.eigenlevels = self.Hlab.eigenstates()
self.dressedEnergyLevels()
self.fd1 = self.eigenlevels[0][self.keys['100']] - self.eigenlevels[0][self.keys['000']]
self.fd2 = self.eigenlevels[0][self.keys['010']] - self.eigenlevels[0][self.keys['000']]
# ref : https://doi.org/10.1103/PhysRevApplied.12.054023
self.staticZZ = self.eigenlevels[0][self.keys['110']] - self.eigenlevels[0][self.keys['100']] - self.eigenlevels[0][self.keys['010']]
self.effectiveCoupling = gc1*gc2*(1/(Q1.f01-Qc.f01)+1/(Q2.f01-Qc.f01))*0.5 + g12
def dressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
eigenlevels = self.eigenlevels
keys = {}
for i in range(Nq):
for j in range(Nq):
for k in range(Nq):
bra = ket_3Qsys(i, j, k, Nq, Nq, Nq).dag()
e = np.abs([(bra * eigenlevels[1])[i].tr() for i in range(Nq**3)])
index = np.argmax(e)
keys['{}{}{}'.format(i, j, k)] = index
self.keys = keys
def plotDressedEnergyLevels(self, coupler_exitation_stop=0):
# coupler_exitation_stop : coupler exitation number to be plotted.
ces = coupler_exitation_stop
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
d = self.keys
enes = self.eigenlevels
plt.figure(1, dpi=150)
cmap = plt.get_cmap("tab10")
plt.title(r'$|Q1, Q2, Qc\rangle$')
for i in range(Nq):
for j in range(Nq):
for k in range(Nq):
key = '{}{}{}'.format(i, j, k)
if key == '220' or k > ces:
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t, color=cmap(k))
plt.text(s, ene+0.4, '|'+key+r'$\rangle$'+':{:.3f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
class RQRQR():
def __init__(self, QR1, QR2, frb, g1, g2):
# Unit in [GHz]
self.frb = frb
self.g1 = g1
self.g2 = g2
self.QR1 = QR1
self.QR2 = QR2
self.detuning1 = QR1.f01_dressed - frb
self.thermal_photon1 = qt.utilities.n_thermal(frb, QR1.f01_dressed)
self.f01_dressed1 = QR1.f01_dressed + ( 2 * (g1**2) / self.detuning1 ) * ( self.thermal_photon1 + 1/2 )
self.X1 = ((g1**2)/self.detuning1)*(QR1.Q.anh/(QR1.f01_dressed + QR1.Q.anh - frb))
self.detuning2 = QR2.f01_dressed - frb
self.thermal_photon2 = qt.utilities.n_thermal(frb, QR2.f01_dressed)
self.f01_dressed2 = QR2.f01_dressed + ( 2 * (g2**2) / self.detuning2 ) * ( self.thermal_photon2 + 1/2 )
self.X2 = ((g2**2)/self.detuning2)*(QR2.Q.anh/(QR2.f01_dressed + QR2.Q.anh - frb))
self.D12 = self.f01_dressed1 - self.f01_dressed2
self.J = g1*g2*( self.detuning1 + self.detuning2 ) / ( 2 * self.detuning1 * self.detuning2 )
self.f01_coupled1 = self.f01_dressed1 + (self.J**2)/self.D12
self.f01_coupled2 = self.f01_dressed2 - (self.J**2)/self.D12
class labFrame2Qhamiltonian_DuffingOscillator():
def __init__(self, RQRQR, Nq1, Nq2):
self.Nq1, self.Nq2 = Nq1, Nq2
Iq1, Iq2 = qt.qeye(Nq1), qt.qeye(Nq2)
b1, b2 = qt.destroy(Nq1), qt.destroy(Nq2)
Nb1, Nb2 = b1.dag()*b1, b2.dag()*b2
self.X1 = qt.tensor(pX_N(Nq1), Iq2)
self.Y1 = qt.tensor(pY_N(Nq1), Iq2)
self.Z1 = qt.tensor(pZ_N(Nq1), Iq2)
self.X2 = qt.tensor(Iq1, pX_N(Nq2))
self.Y2 = qt.tensor(Iq1, pY_N(Nq2))
self.Z2 = qt.tensor(Iq1, pZ_N(Nq2))
self.Iq1, self.Iq2 = Iq1, Iq2
self.Nb1, self.Nb2 = Nb1, Nb2
self.QR1 = RQRQR.QR1
self.QR2 = RQRQR.QR2
J = RQRQR.J
self.B1 = qt.tensor(b1, Iq2)
self.B2 = qt.tensor(Iq1, b2)
bbbb1 = b1.dag()*b1.dag()*b1*b1
bbbb2 = b2.dag()*b2.dag()*b2*b2
# Drive term @rotating frame
self.Hd1_real = self.B1 + self.B1.dag()
self.Hd1_imag = (- self.B1 + self.B1.dag())*1j
self.Hd2_real = (self.B2 + self.B2.dag())
self.Hd2_imag = (- self.B2 + self.B2.dag())*1j
q1_lab = self.QR1.f01_dressed * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_lab = self.QR2.f01_dressed * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqlab = qt.tensor(q1_lab, Iq2) + qt.tensor(Iq1, q2_lab)
self.Hint = J * ( qt.tensor(b1, b2.dag()) + qt.tensor(b1.dag(), b2) )
self.Hlab = self.Hqlab + self.Hint
self.dressedEnergyLevels()
self.fd1 = self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['00']]
self.fd2 = self.eigenlevels[0][self.keys['01']] - self.eigenlevels[0][self.keys['00']]
def dressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
eigenlevels = self.Hlab.eigenstates()
keys = {}
for i in range(Nq):
for j in range(Nq):
k = ket_2Qsys(i, j, Nq, Nq)
e = np.abs([(k.dag() * eigenlevels[1])[i].tr() for i in range(Nq**2)])
index = np.argmax(e)
keys['{}{}'.format(i, j)] = index
self.keys = keys
self.eigenlevels = eigenlevels
def plotDressedEnergyLevels(self):
if self.Nq1 == self.Nq2:
Nq = self.Nq2
else:
print('Should be Nq1 = Nq2')
d = self.keys
enes = self.eigenlevels
plt.figure(1)
for i in range(Nq):
for j in range(Nq):
key = '{}{}'.format(i,j)
if key == '22':
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t)
plt.text(s, ene+0.4, '|'+key+'>'+':{:.3f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
def toRotFrameHamiltonian(self, fd:float):
Nb1, Nb2 = self.Nb1, self.Nb2
Iq1, Iq2 = self.Iq1, self.Iq2
q1_rot = (self.QR1.f01_dressed-fd) * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_rot = (self.QR2.f01_dressed-fd) * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqrot = qt.tensor(q1_rot, self.Iq2) + qt.tensor(self.Iq1, q2_rot)
return self.Hqrot + self.Hint
def toDoublyRotFrameHamiltonian(self, fd1:float, fd2:float):
Nb1, Nb2 = self.Nb1, self.Nb2
Iq1, Iq2 = self.Iq1, self.Iq2
q1_rot = (self.QR1.f01_dressed-fd1) * Nb1 + 0.5 * self.QR1.Q.anh * Nb1 * (Nb1 - Iq1)
q2_rot = (self.QR2.f01_dressed-fd2) * Nb2 + 0.5 * self.QR2.Q.anh * Nb2 * (Nb2 - Iq2)
self.Hqrot = qt.tensor(q1_rot, self.Iq2) + qt.tensor(self.Iq1, q2_rot)
return self.Hqrot + self.Hint
class labFrame1Qhamiltonian_DuffingOscillator():
def __init__(self, QR, Nq):
self.Nq = Nq
Iq = qt.qeye(Nq)
b = qt.destroy(Nq)
Nb = b.dag()*b
self.X = pX_N(Nq)
self.Y = pY_N(Nq)
self.Z = pZ_N(Nq)
self.Iq = Iq
self.Nb = Nb
self.QR = QR
self.B = b
# Drive term @rotating frame
self.f01_dressed = QR.f01_dressed
self.Hd1_real = self.B + self.B.dag()
self.Hd1_imag = (- self.B + self.B.dag())*1j
q1_lab = self.QR.f01_dressed * Nb + 0.5 * self.QR.Q.anh * Nb * (Nb - Iq)
self.Hqlab = q1_lab
self.Hlab = self.Hqlab
def calcUrot(self, t_list):
Urots = []
for t in t_list:
u = (1j*self.f01_dressed*t*self.Nb).expm()
Urots.append(u)
return Urots
class labFrame1Q_1R_hamiltonian():
def __init__(self, Q, R, g):
"""
params
---
Q : class instance
transmon()
R : class instance
resonator()
g : float in [GHz]
coupling constant
"""
self.Nq = Q.Nq
self.Nf = R.Nf
self.Ir = Ir = R.Ir
self.Iq = Iq = Q.Iq
self.II = qt.tensor(Iq, Ir)
self.f01 = Q.f01
self.anh = Q.anh
self.fr = R.fr
self.g = g
self.Q = Q
self.R = R
self.detuning = Q.f01 - R.fr
# self.thermal_photon = qt.utilities.n_thermal(self.fr, Q.f01)
# self.f01_dressed = Q.f01 + ( 2 * (g**2) / self.detuning ) * ( self.thermal_photon + 1/2 )
self.X = qt.tensor(Q.X, Ir)
self.Y = qt.tensor(Q.Y, Ir)
self.Z = qt.tensor(Q.Z, Ir)
self.P0 = qt.tensor(Q.P0, Ir)
self.P1 = qt.tensor(Q.P1, Ir)
self.Na = qt.tensor(Iq, R.na)
self.Nb = qt.tensor(Q.nb, Ir)
self.A = A = qt.tensor(Iq, R.a)
self.B = B = qt.tensor(Q.b, Ir)
self.HQ1 = qt.tensor(Q.Hqlab, Ir)
self.HR1 = qt.tensor(Iq, R.Hr)
self.Hint = g * ( B*A.dag() + B.dag()*A )
self.Hd1_real = A + A.dag()
self.Hd1_imag = (- A + A.dag())*1j
self.Hlab = self.HQ1 + self.HR1 + self.Hint
self.dressedEnergyLevels()
self.fq10 = self.eigenlevels[0][self.keys['10']] - self.eigenlevels[0][self.keys['00']]
self.fq11 = self.eigenlevels[0][self.keys['11']] - self.eigenlevels[0][self.keys['01']]
self.fr0 = self.eigenlevels[0][self.keys['01']] - self.eigenlevels[0][self.keys['00']]
self.fr1 = self.eigenlevels[0][self.keys['11']] - self.eigenlevels[0][self.keys['10']]
def calcUrot(self, t_list, fd):
Urots = []
for t in t_list:
u = (1j*fd*(self.Na + self.Nb)*t).expm()
Urots.append(u)
return Urots
def addDecoherence(self):
pass
return
def calcDispersiveShift(self):
eigenlevels = self.Hlab.eigenstates()
e0 = qt.tensor(qt.basis(self.Nq, 1), qt.fock(self.Nf, 0))
g1 = qt.tensor(qt.basis(self.Nq, 0), qt.fock(self.Nf, 1))
e1 = qt.tensor(qt.basis(self.Nq, 1), qt.fock(self.Nf, 1))
ket_try = [e0, g1, e1]
ket_keys = ['e0', 'g1', 'e1']
disp_dic = {}
for i in range(3):
e = np.abs([(ket_try[i].dag() * eigenlevels[1])[j].tr() for j in range(self.Nq*self.Nf)])
index = np.argmax(e)
disp_dic[ket_keys[i]] = eigenlevels[0][index]
disp_dic['chi'] = (disp_dic['e1'] - disp_dic['e0'] - disp_dic['g1'])/2
self.dispersiveshift = disp_dic
return disp_dic
def toRotFrameHamiltonian(self, fd:float):
q1_rot = (self.f01-fd) * self.Nb + 0.5 * self.Q.anh * self.Nb * (self.Nb - self.II)
r1_rot = (self.fr-fd) * self.Na
self.Hrot = q1_rot + r1_rot + self.Hint
return self.Hrot
def dressedEnergyLevels(self, H=None):
Nq = self.Nq
Nf = self.Nf
if H == None:
eigenlevels = self.Hlab.eigenstates()
else:
eigenlevels = H.eigenstates()
keys = {}
for i in range(Nq):
for j in range(2):
k = ket_2Qsys(i, j, Nq, Nf)
e = np.abs([(k.dag() * eigenlevels[1])[i].tr() for i in range(Nq*Nf)])
index = np.argmax(e)
keys['{}{}'.format(i, j)] = index
self.keys = keys
self.eigenlevels = eigenlevels
def plotDressedEnergyLevels(self, figname=1):
Nq = self.Nq
Nf = self.Nf
d = self.keys
enes = self.eigenlevels
plt.figure(figname, dpi=150)
plt.title(r'$|Transmon, Resonator\rangle$')
for i in range(Nq):
for j in range(2):
key = '{}{}'.format(i,j)
if key == '22':
break
index = d[key]
ene = enes[0][index]
if i < j:#p
s = abs(i-j)
t = s+1
elif i > j:#m
t = -abs(i-j)+1
s = t-1
elif i == j:
s = 0
t = 1
plt.hlines(ene, s, t)
plt.text(s, ene+0.4, '|'+key+r'$\rangle$'+':{:.4f}GHz'.format(ene))
plt.ylim([-1.0, ene+3])
plt.ylabel('Eigen energy [GHz]')
plt.xticks(color='None')
plt.tick_params(length=0)
plt.grid()
class timeEvo():
def __init__(self):
return 0
|
# Example parser that calculates the result of an expression
from varas import *
import sys
import re
LITERAL_TOKEN = 1
tokenizer = Tokenizer(("\d+", LITERAL_TOKEN),
(".", None))
def handle_lparen(parser, actions, token):
expr = parser.expression(actions)
parser.match(")")
return expr
def handle_lsquare(parser, actions, token):
result = []
while not parser.opt("]"):
if result:
parser.match(",")
result.append(parser.expression(actions))
return result
expr_spec = ExprSpec()
expr_spec.add_word(LITERAL_TOKEN, lambda token: int(token.content))
expr_spec.add_unary_op("+", lambda token, right: right)
expr_spec.add_unary_op("-", lambda token, right: -right)
expr_spec.add_binary_op("+", 10, Assoc.LEFT, lambda t, l, r: l + r)
expr_spec.add_binary_op("-", 10, Assoc.LEFT, lambda t, l, r: l - r)
expr_spec.add_binary_op("*", 20, Assoc.LEFT, lambda t, l, r: l * r)
expr_spec.add_binary_op("/", 20, Assoc.LEFT, lambda t, l, r: l / r)
expr_spec.add_binary_op("^", 30, Assoc.RIGHT, lambda t, l, r: l ** r)
expr_spec.add_prefix_handler("(", handle_lparen)
expr_spec.add_prefix_handler("[", handle_lsquare)
def parse_expr(input):
return list(Parser(expr_spec, tokenizer.tokenize(input)).parse_all())
import unittest
class TestCalc(unittest.TestCase):
def check(self, expected, input):
self.assertEqual([expected], parse_expr(input))
def checkError(self, input):
self.assertRaises(ParseError, parse_expr, input)
def test_number(self):
self.check(1, "1")
self.check(2, " 2")
self.check(123, "123")
self.check(-234, "-234")
self.check(-456, " - 456")
self.check(456, "+456")
def test_exprs(self):
self.check(14, " 2 + 3 * 4")
self.check(1, " (1)")
self.check(20, " (2 + 3) * 4")
self.check(16, " (2 + (3 - 1)) * 4")
self.check(1, " 15 / 3 - 4")
self.check(8, " 2 ^ 3")
self.check(2, " 2 ^ 1 ^ 2")
def test_list(self):
self.check([], "[]")
self.check([1], "[1]")
self.check([1,2,3], "[1, 2, 3]")
self.check([2,2,[4,5,6]], "[1 + 1, 2 * 3 - 4, [4, 5, (6)]]")
self.checkError("[")
self.checkError("]")
self.checkError(",")
self.checkError("[1")
self.checkError("[1,")
self.checkError("1]")
self.checkError(",1]")
self.checkError(",1")
self.checkError("1,")
self.checkError("1,1")
def test_error(self):
self.checkError(" ^ 3")
self.checkError("2 ^")
self.checkError("1 * * 2")
self.checkError("( 1")
self.checkError(") 1")
self.checkError("1 (")
self.checkError("1 )")
self.checkError("1 +")
def checkCount(self, expected, input):
p = Parser(expr_spec, tokenizer.tokenize(input))
for i in range(expected):
self.assertFalse(p.at_end())
e = p.parse()
self.assertTrue(p.at_end())
def test_multiple(self):
self.checkCount(1, "1")
self.checkCount(2, "1 2 * 3")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == "-t":
sys.argv.pop(1)
unittest.main()
else:
while True:
try:
program = raw_input("> ")
for result in parse_expr(program):
print(repr(result))
except EOFError:
print("")
exit(0)
|
const express = require("express");
const connection = require("../database/connection");
module.exports = (error, request, response, next) => {
console.error({ error });
//message
//stack
//constructor
// const a = connection("error").insert({
// errorMessage: error.toString(),
// request,
// response,
// });
return response.status(500).json({ message: "Internal Server Error" });
};
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
## Command ``qey``
"""
import argparse
import os
import sys
import subprocess
import psutil
from qey.json_handling import read_json, write_json
from qey.hotkeys_handling import write_hotstrings, get_hotstrings
from qey.os_detection import is_linux
WELCOME_MESSAGE = """
Welcome to the wonderful world of
__ _ ___ _ _
/ _` |/ _ \ | | |
| (_| | __/ |_| |
\__, |\___|\__, |.
| | __/ |
|_| |___/
`qey` is a package to configure hotstrings with ease.
Start it by running `qey start`. Try it out by writing down anywhere '^cat'.
Run `qey --help` for more details.
"""
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
HOME = os.path.expanduser("~")
CONFIG_PATH = os.path.join(HOME, '.config')
CONFIG_QEY_PATH = os.path.join(CONFIG_PATH, 'qey')
CONFIG_FILE = os.path.join(CONFIG_QEY_PATH, 'config.json')
PIDS_PATH = os.path.join(CONFIG_QEY_PATH, 'pids')
HOTCHAR = '^'
if not os.path.isdir(CONFIG_PATH):
os.mkdir(CONFIG_PATH)
if not os.path.isdir(CONFIG_QEY_PATH):
os.mkdir(CONFIG_QEY_PATH)
if not os.path.isfile(CONFIG_FILE):
DEFAULT_INI_FILE = os.path.join(CONFIG_QEY_PATH, 'hotstrings.ini').replace('\\', '/')
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
f.write('{{"INI_FILE" : "{}"}}'.format(DEFAULT_INI_FILE))
with open(DEFAULT_INI_FILE, 'w', encoding='utf-8') as f:
f.write('cat 😺')
if not os.path.isdir(PIDS_PATH):
os.mkdir(PIDS_PATH)
def start():
"""Start `qey`."""
hotstrings = get_hotstrings(CONFIG_FILE, HOTCHAR)
hotstring_file = os.path.join(CONFIG_QEY_PATH, 'hotstrings')
extention = '.ahk' if not is_linux() else ''
hotstring_file += extention
write_hotstrings(hotstrings, hotstring_file, HOTCHAR)
if is_linux():
hotstring_executor = os.path.join(CURRENT_PATH, "linux", "autokey_simple.py")
cmd = '{python} {hotstring_executor} {file} &'
cmd = cmd.format(python=sys.executable, hotstring_executor=hotstring_executor, file=hotstring_file)
else:
hotstring_executor = os.path.join(CURRENT_PATH, "windows", "AutoHotkey.exe")
cmd = 'cmd.exe /C start "{hotstring_executor}" "{file}"'.format(hotstring_executor=hotstring_executor, file=hotstring_file)
subprocess.call(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def stop():
"""Stop `qey`."""
for file in os.listdir(PIDS_PATH):
try:
pid = int(file)
p = psutil.Process(pid)
p.terminate()
if os.path.isfile(os.path.join(PIDS_PATH, file)):
os.remove(os.path.join(PIDS_PATH, file))
except Exception:
os.remove(os.path.join(PIDS_PATH, file))
def edit():
"""Edit the hotstring file."""
data = read_json(CONFIG_FILE)
ini_file = data.get("INI_FILE", None)
if ini_file is not None:
cmd = 'xdg-open {}' if is_linux() else 'cmd.exe /C start "" "{}"'
subprocess.call(cmd.format(ini_file), shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
def set_file(filename: str):
"""Set the INI file containing hotstrings."""
data = read_json(CONFIG_FILE)
data["INI_FILE"] = os.path.abspath(filename)
write_json(CONFIG_FILE, data)
if os.listdir(PIDS_PATH):
start()
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subparser_name")
subparsers.add_parser('start', description=start.__doc__)
subparsers.add_parser('stop', description=stop.__doc__)
subparsers.add_parser('edit', description=edit.__doc__)
set_file_parser = subparsers.add_parser('set_file', description=set_file.__doc__)
set_file_parser.add_argument('path', type=str)
args = parser.parse_args()
if args.subparser_name == 'start':
start()
elif args.subparser_name == 'stop':
stop()
elif args.subparser_name == 'edit':
edit()
elif args.subparser_name == 'set_file':
set_file(args.path)
else:
print(WELCOME_MESSAGE)
if __name__ == '__main__':
main()
|
import axios from "axios";
import { MessageBox, Message } from "element-ui";
import store from "@/store";
// create an axios instance
const service = axios.create({
baseURL: process.env.VUE_APP_BASE_API, // url = base url + request url
// withCredentials: true, // send cookies when cross-domain requests
timeout: 20000 // request timeout
});
// request interceptor
service.interceptors.request.use(config => {
// do something before request is sent
if (store.getters.token) {
// let each request carry token
// ['X-Token'] is a custom headers key
// please modify it according to the actual situation
config.headers["token"] = store.getters.token;
}
return config;
});
// response interceptor
service.interceptors.response.use(
/**
* If you want to get http information such as headers or status
* Please return response => response
*/
/**
* Determine the request status by custom code
* Here is just an example
* You can also judge the status by HTTP Status Code
*/
response => {
const res = response.data;
// if the custom code is not 20000, it is judged as an error.
if (res.code !== 20000 && res.code !== 200) {
Message({
message: res.message || "Error",
type: "error",
duration: 5 * 1000
});
// 50008: Illegal token; 50012: Other clients logged in; 50014: Token expired;
if (res.code === 50008 || res.code === 50012 || res.code === 50014) {
// to re-login
MessageBox.confirm(
"You have been logged out, you can cancel to stay on this page, or log in again",
"Confirm logout",
{
confirmButtonText: "Re-Login",
cancelButtonText: "Cancel",
type: "warning"
}
).then(() => {
store.dispatch("user/resetToken").then(() => {
location.reload();
});
});
}
return Promise.reject(new Error(res.message || "Error"));
} else {
return res;
}
},
error => {
console.log("err" + error); // for debug
Message({
message: error.message,
type: "error",
duration: 5 * 1000
});
return Promise.reject(error);
}
);
export default service;
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
preprocess_input_vgg = imagenet_utils.preprocess_input
WEIGHTS_PATH_VGG = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP_VGG = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
def VGG16(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='vgg16')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_VGG,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = keras_utils.get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP_VGG,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
# ---------------------------------------------------------------------------- #
preprocess_input_vgg19 = imagenet_utils.preprocess_input
WEIGHTS_PATH_VGG19 = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg19_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP_VGG19 = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.1/'
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
)
def VGG19(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the VGG19 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(img_input)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv4')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = layers.Flatten(name='flatten')(x)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='vgg19')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_VGG19,
cache_subdir='models',
file_hash='cbe5617147190e668d6c5d5026f83318')
else:
weights_path = keras_utils.get_file(
'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP_VGG19,
cache_subdir='models',
file_hash='253f8cb515780f3b799900260a226db6')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
# ---------------------------------------------------------------------------- #
preprocess_input = imagenet_utils.preprocess_input
WEIGHTS_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size,
padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2)):
"""A block that has a conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the first conv layer in the block.
# Returns
Output tensor for the block.
Note that from stage 3,
the first conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(input_tensor)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
name=conv_name_base + '2b')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '1')(input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
x = layers.Conv2D(64, (7, 7),
strides=(2, 2),
padding='valid',
kernel_initializer='he_normal',
name='conv1')(x)
x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = layers.Activation('relu')(x)
x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
else:
warnings.warn('The output shape of `ResNet50(include_top=False)` '
'has been changed since Keras 2.2.0.')
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='resnet50')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = keras_utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
# ---------------------------------------------------------------------------- #
WEIGHTS_PATH = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if backend.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool],
axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2],
axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='inception_v3')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
# ---------------------------------------------------------------------------- #
TF_WEIGHTS_PATH = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.4/'
'xception_weights_tf_dim_ordering_tf_kernels.h5')
TF_WEIGHTS_PATH_NO_TOP = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.4/'
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5')
def Xception(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True,
and if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=299,
min_size=71,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
x = layers.Conv2D(32, (3, 3),
strides=(2, 2),
use_bias=False,
name='block1_conv1')(img_input)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
x = layers.Activation('relu', name='block1_conv1_act')(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
x = layers.Activation('relu', name='block1_conv2_act')(x)
residual = layers.Conv2D(128, (1, 1),
strides=(2, 2),
padding='same',
use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.SeparableConv2D(128, (3, 3),
padding='same',
use_bias=False,
name='block2_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
x = layers.Activation('relu', name='block2_sepconv2_act')(x)
x = layers.SeparableConv2D(128, (3, 3),
padding='same',
use_bias=False,
name='block2_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block2_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(256, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block3_sepconv1_act')(x)
x = layers.SeparableConv2D(256, (3, 3),
padding='same',
use_bias=False,
name='block3_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
x = layers.Activation('relu', name='block3_sepconv2_act')(x)
x = layers.SeparableConv2D(256, (3, 3),
padding='same',
use_bias=False,
name='block3_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2),
padding='same',
name='block3_pool')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(728, (1, 1),
strides=(2, 2),
padding='same',
use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block4_sepconv1_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name='block4_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
x = layers.Activation('relu', name='block4_sepconv2_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name='block4_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2),
padding='same',
name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis,
name=prefix + '_sepconv1_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis,
name=prefix + '_sepconv2_bn')(x)
x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name=prefix + '_sepconv3')(x)
x = layers.BatchNormalization(axis=channel_axis,
name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = layers.Conv2D(1024, (1, 1), strides=(2, 2),
padding='same', use_bias=False)(x)
residual = layers.BatchNormalization(axis=channel_axis)(residual)
x = layers.Activation('relu', name='block13_sepconv1_act')(x)
x = layers.SeparableConv2D(728, (3, 3),
padding='same',
use_bias=False,
name='block13_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv1_bn')(x)
x = layers.Activation('relu', name='block13_sepconv2_act')(x)
x = layers.SeparableConv2D(1024, (3, 3),
padding='same',
use_bias=False,
name='block13_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv2_bn')(x)
x = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='same',
name='block13_pool')(x)
x = layers.add([x, residual])
x = layers.SeparableConv2D(1536, (3, 3),
padding='same',
use_bias=False,
name='block14_sepconv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv1_bn')(x)
x = layers.Activation('relu', name='block14_sepconv1_act')(x)
x = layers.SeparableConv2D(2048, (3, 3),
padding='same',
use_bias=False,
name='block14_sepconv2')(x)
x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv2_bn')(x)
x = layers.Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='xception')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = keras_utils.get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
if backend.backend() == 'theano':
keras_utils.convert_all_kernels_in_model(model)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
|
# pylint: disable=missing-docstring, protected-access
import unittest
import warnings
from iCount import demultiplex
from iCount.tests.utils import make_fastq_file, get_temp_dir
class TestDemultiplex(unittest.TestCase):
def setUp(self):
self.dir = get_temp_dir()
self.adapter = 'CCCCCCCCC'
self.barcodes = [
'NNNGGTTNN',
'NNNTTGTNN',
'NNNCAATNN',
'NNNACCTNN',
'NNNGGCGNN',
]
self.reads = make_fastq_file(barcodes=self.barcodes, adapter=self.adapter)
warnings.simplefilter("ignore", ResourceWarning)
def test_run_fail(self):
message = r'Output directory does not exist. Make sure it does.'
with self.assertRaisesRegex(FileNotFoundError, message):
demultiplex.run(
self.reads, self.adapter, self.barcodes, mismatches=1, out_dir='/unexisting/dir')
def test_run_ok(self):
expected = ['{}/demux_{}.fastq.gz'.format(self.dir, b) for b in self.barcodes]
expected.extend([self.dir + '/demux_nomatch.fastq.gz'])
# Without adapter
filenames = demultiplex.run(
self.reads, None, self.barcodes, mismatches=1, out_dir=self.dir)
self.assertEqual(sorted(filenames), sorted(expected))
# With adapter
filenames = demultiplex.run(
self.reads, self.adapter, self.barcodes, mismatches=1, out_dir=self.dir)
self.assertEqual(sorted(filenames), sorted(expected))
if __name__ == '__main__':
unittest.main()
|
import { expect } from 'chai'
import sinon from 'sinon'
import React from 'react'
import TestUtils from 'react-addons-test-utils'
import TodoTextInput from '../../../app/components/TodoTextInput'
import style from '../../../app/components/TodoTextInput.css'
function setup(propOverrides) {
const props = {
onSave: sinon.spy(),
text: 'Use Redux',
placeholder: 'What needs to be done?',
editing: false,
newTodo: false,
...propOverrides
}
const renderer = TestUtils.createRenderer()
renderer.render(<TodoTextInput {...props} />)
let output = renderer.getRenderOutput()
output = renderer.getRenderOutput()
return { props, output, renderer }
}
describe('todoapp TodoTextInput component', () => {
it('should render correctly', () => {
const { output } = setup()
expect(output.props.placeholder).to.equal('What needs to be done?')
expect(output.props.value).to.equal('Use Redux')
expect(output.props.className).to.equal('')
})
it('should render correctly when editing=true', () => {
const { output } = setup({ editing: true })
expect(output.props.className).to.equal(style.edit)
})
it('should render correctly when newTodo=true', () => {
const { output } = setup({ newTodo: true })
expect(output.props.className).to.equal(style.new)
})
it('should update value on change', () => {
const { output, renderer } = setup()
output.props.onChange({ target: { value: 'Use Radox' } })
const updated = renderer.getRenderOutput()
expect(updated.props.value).to.equal('Use Radox')
})
it('should call onSave on return key press', () => {
const { output, props } = setup()
output.props.onKeyDown({ which: 13, target: { value: 'Use Redux' } })
expect(props.onSave.calledWith('Use Redux')).to.equal(true)
})
it('should reset state on return key press if newTodo', () => {
const { output, renderer } = setup({ newTodo: true })
output.props.onKeyDown({ which: 13, target: { value: 'Use Redux' } })
const updated = renderer.getRenderOutput()
expect(updated.props.value).to.equal('')
})
it('should call onSave on blur', () => {
const { output, props } = setup()
output.props.onBlur({ target: { value: 'Use Redux' } })
expect(props.onSave.calledWith('Use Redux')).to.equal(true)
})
it('shouldnt call onSave on blur if newTodo', () => {
const { output, props } = setup({ newTodo: true })
output.props.onBlur({ target: { value: 'Use Redux' } })
expect(props.onSave.callCount).to.equal(0)
})
})
|
## https://leetcode.com/problems/vowel-spellchecker/
## problem is to spellcheck queries against a valid wordlist.
## spellcheck/possible corrections are defined as subbing all
## vowels in the word with all other (i.e., only the vowels
## can be wrong).
## unsurprisingly, this dumb, brute-force solution is too slow
## because every vowel we hit increases our possible word list
## by five.
class Solution:
def spellchecker(self, wordlist: List[str], queries: List[str]) -> List[str]:
from copy import copy
output = []
vowels = 'aeiou'
lower_words = [w.lower() for w in wordlist]
for query in queries:
## first check for any matches in the valid wordlist
if query in wordlist:
output.append(query)
continue
## and do the same for lower-case
q = query.lower()
if q in lower_words:
output.append(wordlist[lower_words.index(q)])
continue
## build up the list of possible words we can make
## out of the consonants in our word + all possible vowels
possible_words = ['']
## loop over the characters
for c in q:
## if it's not a vowel, we just add this character to every
## word in our list
if c not in vowels:
new_words = [w+c for w in possible_words]
possible_words = new_words
## if it is a vowel, we have to add 5 new possible paths to our
## words -- ___+a, ___+e, ___+i, ....
else:
new_words = [oldr + v for v in vowels for oldr in possible_words]
possible_words = new_words
## now we loop over those possible words and
## check if any of them are in the wordlist.
## if they are, we return the first entry to
## appear in the wordlist
found_words_by_index = {}
for w in possible_words:
if w in lower_words:
idx = lower_words.index(w)
found_words_by_index[idx] = wordlist[idx]
## https://leetcode.com/problems/vowel-spellchecker/
## problem is to spellcheck queries against a valid wordlist.
## spellcheck/possible corrections are defined as subbing all
## vowels in the word with all other (i.e., )
## if there aren't any, we return an empty string
if len(found_words_by_index) == 0:
output.append('')
else:
output.append(found_words_by_index[min(found_words_by_index.keys())])
return output
|
"""
WSGI config for girlsgoit project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "girlsgoit.settings.local")
application = get_wsgi_application()
|
from rest_framework import serializers
from .models import Todo
class TodoSerializer(serializers.ModelSerializer):
class Meta:
model = Todo
fields = ('id', 'title', 'body',)
|
/**
* Generated bundle index. Do not edit.
*/
export * from './index';
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibWFwLXdhbGQuanMiLCJzb3VyY2VSb290IjoiIiwic291cmNlcyI6WyIuLi8uLi9zcmMvbWFwLXdhbGQudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IkFBQUE7O0dBRUc7QUFFSCxjQUFjLFNBQVMsQ0FBQyIsInNvdXJjZXNDb250ZW50IjpbIi8qKlxuICogR2VuZXJhdGVkIGJ1bmRsZSBpbmRleC4gRG8gbm90IGVkaXQuXG4gKi9cblxuZXhwb3J0ICogZnJvbSAnLi9pbmRleCc7XG4iXX0= |
/**
* Subscribe with Google module initialization.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internal dependencies
*/
import { SetupMain } from './components/setup';
import {
SettingsEdit,
SettingsView,
} from '../subscribe-with-google/components/settings';
import SubscribeWithGoogleIcon from '../../../svg/graphics/logo-g.svg';
import { STORE_NAME } from './datastore/constants';
import { isFeatureEnabled } from '../../features';
export { registerStore } from '../subscribe-with-google/datastore';
const ifSwgIsEnabled = ( func ) => ( ...args ) => {
if ( isFeatureEnabled( 'swgModule' ) ) {
func( ...args );
}
};
export const registerModule = ifSwgIsEnabled( ( modules ) => {
modules.registerModule( 'subscribe-with-google', {
storeName: STORE_NAME,
SettingsEditComponent: SettingsEdit,
SettingsViewComponent: SettingsView,
SetupComponent: SetupMain,
// TODO: Replace with another icon later.
Icon: SubscribeWithGoogleIcon,
} );
} );
|
/**
* Created by glenn on 18.09.16.
*/
(() => {
'use strict';
angular.module('helloWidgetApp.helloWidget').run(runBlock);
/* @ngInject */
function runBlock(c8ySystem) {
(async () => {
const version = await c8ySystem.getUIVersion();
console.log(version);
// Put your fiddle code here.
console.log('hello, world');
})();
}
})();
|
/**
* There's a list of file, since two files cannot have equal names,
* the one which comes later will have a suffix (k),
* where k is the smallest integer such that the found name is not used yet.
*
* Return an array of names that will be given to the files.
*
* @param {Array} names
* @return {Array}
*
* @example
* For input ["file", "file", "image", "file(1)", "file"],
* the output should be ["file", "file(1)", "image", "file(1)(1)", "file(2)"]
*
*/
export default function renameFiles(names) {
const result = []
for (const name of names) {
if (result.indexOf(name) !== -1) {
let i = 1
while (result.indexOf(`${name}(${i})`) !== -1) {
i += 1
}
result.push(`${name}(${i})`);
} else {
result.push(name);
}
}
return result;
}
|
var files =
[
[ "argumatrix", "dir_3642087d0ef9cba84f84b59ebe47f6ca.html", "dir_3642087d0ef9cba84f84b59ebe47f6ca" ]
]; |
import random
import torch
import numpy as np
def set_global_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
|
'use strict'
const fs = require('fs')
const path = require('path')
let routeName = process.argv[2]
let routes = fs.readFileSync(
path.join(__dirname, '../../app/src/routes.js'),
'utf8'
).split('\n')
let routeTemplate = fs.readFileSync(
path.join(__dirname, 'route.template.txt'),
'utf8'
)
let routesTemplate = fs.readFileSync(
path.join(__dirname, 'routes.template.txt'),
'utf8'
)
routes[routes.length - 3] = routes[routes.length - 3] + ','
routes.splice(
routes.length - 2,
0,
routesTemplate
.replace( //g, routeName)
.replace(/\n$/, '')
)
fs.writeFileSync(
path.join(__dirname, `../../app/src/components/${routeName}View.vue`),
routeTemplate
)
fs.mkdirSync(path.join(__dirname, `../../app/src/components/${routeName}View`))
fs.writeFileSync(
path.join(__dirname, '../../app/src/routes.js'),
routes.join('\n')
)
console.log(`\n\x1b[33m[vue]\x1b[0m route "${routeName}" has been created`) console.log(' [ \n' + [
' ' + path.join(__dirname, `../../app/src/components/${routeName}View.vue`),
path.join(__dirname, `../../app/src/components/${routeName}View`),
path.join(__dirname, '../../app/src/routes.js'),
].join(',\n ') + '\n ]') |
module.exports = {
purge: ["./pages/**/*.{js,ts,jsx,tsx}", "./components/**/*.{js,ts,jsx,tsx}"],
darkMode: false, // or 'media' or 'class'
theme: {
fontFamily: {
"logo": "Alfa Slab One",
"sans": "Iosevka Aile Web",
"serif": "Source Serif Pro",
"mono": "Iosevka Web"
},
extend: {
typography: {
"sm": {
css: {
fontFamily: "Source Serif Pro", // Using the font `docs.rs` uses.
h1: {
fontFamily: "Fira Sans", // Ditto
fontSize: "1.5rem"
},
}
}
}
},
},
variants: {
extend: {},
},
plugins: [
require("@tailwindcss/typography")
],
}
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var tslib_1 = require("tslib");
var http_1 = require("@angular/common/http");
var core_1 = require("@angular/core");
var rxjs_1 = require("rxjs");
var electricity_1 = require("../data/electricity");
require("rxjs/add/operator/map");
var ElectricityService = /** @class */ (function (_super) {
tslib_1.__extends(ElectricityService, _super);
function ElectricityService(http) {
var _this = _super.call(this) || this;
_this.http = http;
_this.chartPoints = [
490, 490, 495, 500,
505, 510, 520, 530,
550, 580, 630, 720,
800, 840, 860, 870,
870, 860, 840, 800,
720, 200, 145, 130,
130, 145, 200, 570,
635, 660, 670, 670,
660, 630, 580, 460,
380, 350, 340, 340,
340, 340, 340, 340,
340, 340, 340,
];
_this.chartData = _this.chartPoints.map(function (p, index) { return ({
label: (index % 5 === 3) ? "" + Math.round(index / 5) : '',
value: p,
}); });
_this.actionUrl = 'http://localhost:8081/listusers';
return _this;
}
ElectricityService.prototype.getListData = function () {
this.listData = this.http.get(this.actionUrl);
return this.listData;
};
ElectricityService.prototype.getChartData = function () {
return rxjs_1.of(this.chartData);
};
ElectricityService = tslib_1.__decorate([
core_1.Injectable(),
tslib_1.__metadata("design:paramtypes", [http_1.HttpClient])
], ElectricityService);
return ElectricityService;
}(electricity_1.ElectricityData));
exports.ElectricityService = ElectricityService;
//# sourceMappingURL=electricity.service.js.map |
"""
Copyright 2022 Technology Innovation Institute LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import stp
class XoodooState(object):
nrows = 3
ncols = 4
word_size = 32
def __init__(self, state):
"""
Construct an instance of XoodooState
INPUT:
- ``state`` -- input state
TESTS::
>>> from xoodoo import XoodooState
>>> S0 = XoodooState(range(12))
>>> S0
0x00000000 0x00000001 0x00000002 0x00000003
0x00000004 0x00000005 0x00000006 0x00000007
0x00000008 0x00000009 0x0000000A 0x0000000B
>>> S1 = XoodooState([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
>>> S1
0x00000000 0x00000001 0x00000002 0x00000003
0x00000004 0x00000005 0x00000006 0x00000007
0x00000008 0x00000009 0x0000000A 0x0000000B
>>> S2 = XoodooState([1])
Traceback (most recent call last):
...
TypeError: invalid input format
"""
nrows = XoodooState.nrows
ncols = XoodooState.ncols
self._state = []
if len(state) == nrows * ncols:
self._state = [[int(state[i * ncols + j]) for j in range(ncols)] for i in range(nrows)]
elif len(state) == nrows and all(len(row) == ncols for row in state):
self._state = [[int(state[i][j]) for j in range(ncols)] for i in range(nrows)]
else:
raise TypeError("invalid input format")
@property
def state(self):
"""
Return the Xoodoo state
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState(range(12))
>>> S.state
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self._state
def hamming_weight(self):
"""
Return the Hamming weight of the state
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState(range(12))
>>> S.hamming_weight()
20
"""
weight = 0
for i in range(XoodooState.nrows):
for j in range(XoodooState.ncols):
for k in range(XoodooState.word_size):
weight += (self[i][j] & (1 << k)) >> k
return weight
def trail_weight(self):
"""
Return the trail weight of the state
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState(range(12))
>>> S.trail_weight()
24
"""
def hamming_weight(x):
hw = 0
for k in range(XoodooState.word_size):
hw += (x & (1 << k)) >> k
return hw
s = self.state
w = 0
for j in range(XoodooState.ncols):
w += hamming_weight(s[0][j] | s[1][j] | s[2][j])
return 2*w
def __repr__(self):
"""
Return the string representation of XoodooState
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> S
0x00000000 0x00000001 0x00000002 0x00000003
0x00000004 0x00000005 0x00000006 0x00000007
0x00000008 0x00000009 0x0000000A 0x0000000B
"""
nrows = XoodooState.nrows
ncols = XoodooState.ncols
hex_str = [["0x" + hex(self[i][j])[2:].zfill(8).upper() for j in range(ncols)] for i in range(nrows)]
return '\n'.join([' '.join(hex_str[i]) for i in range(nrows)])
def __getitem__(self, r):
"""
INPUT:
- ``r`` -- row index
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState(range(12))
>>> S[0]
[0, 1, 2, 3]
"""
return self.state[r]
def __setitem__(self, r, v):
"""
Replace the value of row `r` with vector `v`
INPUT:
- ``r`` -- row index
- ``v`` -- a list/tuple
TESTS::
>>> from xoodoo import XoodooState
>>> S = XoodooState(range(12))
>>> S
0x00000000 0x00000001 0x00000002 0x00000003
0x00000004 0x00000005 0x00000006 0x00000007
0x00000008 0x00000009 0x0000000A 0x0000000B
>>> S[0] = [0xFFFF0000, 0x0000FFFF, 0x00FF00FF00, 0x0F0F0F0F]
>>> S
0xFFFF0000 0x0000FFFF 0xFF00FF00 0x0F0F0F0F
0x00000004 0x00000005 0x00000006 0x00000007
0x00000008 0x00000009 0x0000000A 0x0000000B
"""
if len(v) != XoodooState.ncols:
raise TypeError("the length of v must be equal to %d" % XoodooState.ncols)
self.state[r] = v
class XoodooTrail(object):
_nsteps_in_a_round = 4
def __init__(self, trail):
"""
Construct an instance of Xoodoo trail
INPUT:
- ``trail`` -- a list of Xoodoo state
"""
self._trail = [XoodooState(state) for state in trail]
self._nrounds = len(trail) // XoodooTrail._nsteps_in_a_round
@property
def trail(self):
"""
Return the trail
"""
return self._trail
@property
def nrounds(self):
"""
Return the number of rounds covered for the given trail
"""
return self._nrounds
@staticmethod
def input_round_index(r):
"""
Return the index of input state at round `r` in the trail array
INPUT:
- ``r`` -- round index
"""
nsteps = XoodooTrail._nsteps_in_a_round
return nsteps * r
def input(self):
"""
Return the input state of the trail
"""
return self.input_round(0)
def output(self):
"""
Return the output state of the trail
"""
return self.output_round(self.nrounds - 1)
def input_round(self, r):
"""
Return the input state at round `r`
INPUT:
- ``r`` -- round index
"""
return self.trail[XoodooTrail.input_round_index(r)]
def input_theta(self, r):
"""
Return the input state of the theta function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.input_round(r)
def input_rho_west(self, r):
"""
Return the input state of the rho west function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.trail[XoodooTrail.input_round_index(r) + 1]
def input_chi(self, r):
"""
Return the input state of the chi function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.trail[XoodooTrail.input_round_index(r) + 2]
def input_rho_east(self, r):
"""
Return the input state of the rho east function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.trail[XoodooTrail.input_round_index(r) + 3]
def output_round(self, r):
"""
Return the output state of round `r`
INPUT:
- ``r`` -- round index
"""
return self.input_round(r + 1)
def output_theta(self, r):
"""
Return the output state of theta function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.input_rho_west(r)
def output_rho_west(self, r):
"""
Return the output state of the rho west function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.input_chi(r)
def output_chi(self, r):
"""
Return the output state of the chi function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.input_rho_east(r)
def output_rho_east(self, r):
"""
Return the output state of the rho east function at round `r`
INPUT:
- ``r`` -- round index
"""
return self.output_round(r)
def weight(self):
"""
Return the weight of the trail
"""
w = sum(self.input_chi(r).trail_weight() for r in range(self.nrounds))
return int(w)
class Xoodoo(object):
max_nrounds = 12
round_constants = [0x00000058, 0x00000038, 0x000003C0, 0x000000D0, 0x00000120, 0x00000014,
0x00000060, 0x0000002C, 0x00000380, 0x000000F0, 0x000001A0, 0x00000012]
_input_round_var_name = 'x'
_input_chi_var_name = 's'
_output_chi_var_name = 't'
_aux_theta_var_name = 'p'
_output_theta_var_name = 'e'
_diff_input_round_var_name = _input_round_var_name.upper()
_diff_input_chi_var_name = _input_chi_var_name.upper()
_diff_output_chi_var_name = _output_chi_var_name.upper()
_diff_aux_theta_var_name = _aux_theta_var_name.upper()
_diff_output_theta_var_name = _output_theta_var_name.upper()
_weight_var_name = 'w'
_total_weight_var_name = _weight_var_name.upper()
def __init__(self, nrounds):
"""
Construct an instance of Xoodoo
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> X
4-rounds of Xoodoo
"""
if not 1 <= nrounds <= 12:
raise ValueError("nrounds must be between 1 <= nrounds <= 12")
self._nrounds = nrounds
self._solver = stp.Solver()
@property
def nrounds(self):
"""
Return the number of rounds
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> X.nrounds
4
"""
return self._nrounds
@property
def solver(self):
"""
Return the STP solver
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> X.solver # doctest: +ELLIPSIS
<stp.stp.Solver object at 0x...>
"""
return self._solver
def round_constant(self, r):
"""
Return the round constant at round `r`
INPUT:
- ``r`` -- round index
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> "0x" + hex(X.round_constant(0))[2:].zfill(8)
'0x00000380'
TESTS::
>>> X.round_constant(0) == Xoodoo.round_constants[-X.nrounds:][0]
True
"""
if not 0 <= r < self.nrounds:
raise ValueError("r must be in the range 0 <= r < %d" % self.nrounds)
return Xoodoo.round_constants[-self.nrounds:][r]
def rotate_left_constraint(self, x, y, r):
"""
Return the bitwise left-rotation constraint
INPUT:
- ``x`` -- input word
- ``y`` -- output word
- ``r`` -- rotation constant
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> S = X.solver
>>> a = S.bitvec('a', width=32)
>>> b = S.bitvec('b', width=32)
>>> S.add(X.rotate_left_constraint(a, b, 2))
>>> S.add(a == 0x80000000)
>>> S.check()
True
>>> S.model(b.name)
2L
"""
if not isinstance(y, stp.Expr):
raise TypeError("y must be an instance of stp.stp.Expr")
if y.width != XoodooState.word_size:
raise ValueError("the width of y must be equal to %d" % XoodooState.word_size)
return Xoodoo.rotate_left(x, r) == y
@staticmethod
def rotate_left(x, r):
"""
Return bitwise left-rotation on `x` with `r`-bit rotation
INPUT:
- ``x`` -- input word
- ``r`` -- rotation constant
TESTS::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> S = X.solver
>>> a = S.bitvec('a', width=32)
>>> b = S.bitvec('b', width=32)
>>> S.add(X.rotate_left(a, 2) == b)
>>> S.add(a == 0x80000000)
>>> S.check()
True
>>> S.model(b.name)
2L
"""
if not isinstance(r, (int, long)):
raise TypeError("r must be an int or long")
if isinstance(x, stp.Expr) and x.width != XoodooState.word_size:
raise ValueError("the width of x must be equal to %d" % XoodooState.word_size)
if not 0 <= r < XoodooState.word_size:
raise ValueError("r must be in the range 0 <= r < %d" % XoodooState.word_size)
val = ((x << r) | (x >> (XoodooState.word_size - r)))
if isinstance(x, (int, long)):
val &= 0xFFFFFFFF
return val
def input_round_varstrs(self, r):
"""
Return input variables string for round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> X.input_round_varstrs(0) # doctest: +NORMALIZE_WHITESPACE
[['x000000', 'x000001', 'x000002', 'x000003'],
['x000100', 'x000101', 'x000102', 'x000103'],
['x000200', 'x000201', 'x000202', 'x000203']]
TESTS::
>>> X.input_round_varstrs(-1)
Traceback (most recent call last):
...
ValueError: r must be in the range 0 <= r <= 4
>>> X.input_round_varstrs(5)
Traceback (most recent call last):
...
ValueError: r must be in the range 0 <= r <= 4
"""
return self.varstrs(r, var_name=Xoodoo._input_round_var_name)
def input_round_vars(self, r):
"""
Return input variables for round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.input_round_vars(0)
>>> [[v.name for v in row] for row in x] # doctest: +NORMALIZE_WHITESPACE
[['x000000', 'x000001', 'x000002', 'x000003'],
['x000100', 'x000101', 'x000102', 'x000103'],
['x000200', 'x000201', 'x000202', 'x000203']]
"""
return self.vars(r, var_name=Xoodoo._input_round_var_name)
def output_round_varstrs(self, r):
"""
Return output variables string of round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=4)
>>> X.output_round_varstrs(0) # doctest: +NORMALIZE_WHITESPACE
[['x010000', 'x010001', 'x010002', 'x010003'],
['x010100', 'x010101', 'x010102', 'x010103'],
['x010200', 'x010201', 'x010202', 'x010203']]
"""
return self.input_round_varstrs(r + 1)
def output_round_vars(self, r):
"""
Return output variables of round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.output_round_vars(0)
>>> [[ v.name for v in row] for row in x] # doctest: +NORMALIZE_WHITESPACE
[['x010000', 'x010001', 'x010002', 'x010003'],
['x010100', 'x010101', 'x010102', 'x010103'],
['x010200', 'x010201', 'x010202', 'x010203']]
"""
return self.input_round_vars(r + 1)
def aux_vars(self, r, var_name):
"""
Return a list of auxilliary variables
INPUT:
- ``r`` -- round number
- ``var_name`` -- variable name
"""
x = ["%s%02d%02d" % (var_name, r, j) for j in range(XoodooState.ncols)]
return [self.solver.bitvec(x[j], width=XoodooState.word_size) for j in range(XoodooState.ncols)]
def vars(self, r, var_name):
"""
Return a list of variables
INPUT:
- ``r`` -- round number
- ``var_name`` -- variable name
"""
x = self.varstrs(r, var_name)
return [[self.solver.bitvec(x[i][j], width=XoodooState.word_size)
for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)]
@staticmethod
def is_valid_state_format(state):
"""
Return `True` if `l` is a `3 \times 4` list/tuple
INPUT:
- ``l`` -- a list/tuple
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> Xoodoo.is_valid_state_format([])
False
>>> Xoodoo.is_valid_state_format([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
True
"""
return len(state) == XoodooState.nrows and all(len(row) == XoodooState.ncols for row in state)
def varstrs(self, r, var_name):
"""
Return a list of variable in string format
INPUT:
- ``r`` -- round number
- ``var_name`` -- name of variable
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.varstrs(0, var_name='x') # doctest: +NORMALIZE_WHITESPACE
[['x000000', 'x000001', 'x000002', 'x000003'],
['x000100', 'x000101', 'x000102', 'x000103'],
['x000200', 'x000201', 'x000202', 'x000203']]
>>> xoodoo.varstrs(1, var_name='y') # doctest: +NORMALIZE_WHITESPACE
[['y010000', 'y010001', 'y010002', 'y010003'],
['y010100', 'y010101', 'y010102', 'y010103'],
['y010200', 'y010201', 'y010202', 'y010203']]
"""
if not 0 <= r <= self.nrounds:
raise ValueError("r must be in the range 0 <= r <= %d" % self.nrounds)
return [['%s%02d%02d%02d' % (var_name, r, i, j) for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)]
def input_chi_varstrs(self, r):
"""
Return a list of input variables in string format for chi at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.input_chi_varstrs(0) # doctest: +NORMALIZE_WHITESPACE
[['s000000', 's000001', 's000002', 's000003'],
['s000100', 's000101', 's000102', 's000103'],
['s000200', 's000201', 's000202', 's000203']]
"""
return self.varstrs(r, Xoodoo._input_chi_var_name)
def input_chi_vars(self, r):
"""
Return a list of input variables for chi at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.input_chi_vars(0)
>>> [[v.name for v in row] for row in x] # doctest: +NORMALIZE_WHITESPACE
[['s000000', 's000001', 's000002', 's000003'],
['s000100', 's000101', 's000102', 's000103'],
['s000200', 's000201', 's000202', 's000203']]
"""
return self.vars(r, Xoodoo._input_chi_var_name)
def output_chi_varstrs(self, r):
"""
Return a list of output variables in string format for chi at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.output_chi_varstrs(0) # doctest: +NORMALIZE_WHITESPACE
[['t000000', 't000001', 't000002', 't000003'],
['t000100', 't000101', 't000102', 't000103'],
['t000200', 't000201', 't000202', 't000203']]
"""
return self.varstrs(r, Xoodoo._output_chi_var_name)
def output_chi_vars(self, r):
"""
Return a list of input variables for chi at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.output_chi_vars(0)
>>> [[v.name for v in row] for row in x] # doctest: +NORMALIZE_WHITESPACE
[['t000000', 't000001', 't000002', 't000003'],
['t000100', 't000101', 't000102', 't000103'],
['t000200', 't000201', 't000202', 't000203']]
"""
return self.vars(r, Xoodoo._output_chi_var_name)
def input_theta_vars(self, r):
"""
Return a list of input variables for theta at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.input_theta_vars(0) == xoodoo.input_round_vars(0)
True
"""
return self.input_round_vars(r)
def aux_theta_vars(self, r):
"""
Return a list of auxilliary variables for theta at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.aux_theta_vars(0)
>>> [v.name for v in x]
['p0000', 'p0001', 'p0002', 'p0003']
"""
return self.aux_vars(r, Xoodoo._aux_theta_var_name)
def output_theta_vars(self, r):
""""
Return a list of output variables for theta at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> x = xoodoo.output_theta_vars(0)
>>> [[v.name for v in row] for row in x] # doctest: +NORMALIZE_WHITESPACE
[['e000000', 'e000001', 'e000002', 'e000003'],
['e000100', 'e000101', 'e000102', 'e000103'],
['e000200', 'e000201', 'e000202', 'e000203']]
"""
return self.vars(r, Xoodoo._output_theta_var_name)
def theta_constraints(self, r):
"""
Return a list of constraints for theta at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.theta_constraints(0):
... solver.add(constraint)
>>> randval = xoodoo.random_state()
>>> for constraint in xoodoo.assignment_constraints(xoodoo.input_theta_vars(0), randval):
... solver.add(constraint)
>>> solver.check()
True
>>> e = xoodoo.output_theta_vars(0)
>>> result = [[solver.model(e[i][j].name) for j in range(XoodooState.ncols)]
... for i in range(XoodooState.nrows)]
>>> def theta(x):
... p = [x[0][j] ^ x[1][j] ^ x[2][j] for j in range(XoodooState.ncols)]
... y = [[0 for __ in range(XoodooState.ncols)] for _ in range(XoodooState.nrows)]
... for i in range(XoodooState.nrows):
... for j in range(XoodooState.ncols):
... k = (j + XoodooState.nrows) % XoodooState.ncols
... y[i][j] = x[i][j] ^ xoodoo.rotate_left(p[k], 5) ^ xoodoo.rotate_left(p[k], 14)
... return y
>>> result == theta(randval)
True
"""
x = self.input_theta_vars(r)
p = self.aux_theta_vars(r)
y = self.output_theta_vars(r)
return self._theta_constraints_(x, p, y)
def _theta_constraints_(self, x, p, y):
"""
Return a list of constraints for theta
INPUT:
- ``x`` -- input variables
- ``p`` -- auxiliary variables
- ``y`` -- output variables
"""
constraints = [p[j] == x[0][j] ^ x[1][j] ^ x[2][j] for j in range(XoodooState.ncols)]
for i in range(XoodooState.nrows):
for j in range(XoodooState.ncols):
k = (j + XoodooState.nrows) % XoodooState.ncols
constraints.append(y[i][j] == (x[i][j] ^ Xoodoo.rotate_left(p[k], 5) ^ Xoodoo.rotate_left(p[k], 14)))
return constraints
def random_state(self):
"""
Return a random xoodoo state
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> state = xoodoo.random_state()
>>> xoodoo.is_valid_state_format(state)
True
"""
return [[random.randint(0, 2**XoodooState.word_size) for __ in range(XoodooState.ncols)]
for _ in range(XoodooState.nrows)]
def assignment_constraints(self, state, value):
"""
Return a list of constraints for assignment
INPUT:
- ``state`` -- variables representing Xoodoo state
- ``value`` -- substituted value
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> x = xoodoo.input_round_vars(0)
>>> v = xoodoo.random_state()
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.assignment_constraints(x, v):
... solver.add(constraint)
>>> solver.check()
True
>>> all(solver.model(x[i][j].name) == v[i][j] for i in range(XoodooState.nrows)
... for j in range(XoodooState.ncols))
True
"""
if not self.is_valid_state_format(state):
raise TypeError("state must be an instance of %dx%d array of %d-bit words" %
(XoodooState.nrows, XoodooState.ncols, XoodooState.word_size))
if not self.is_valid_state_format(value):
raise TypeError("value must be an instance of %dx%d array of %d-bit words" %
(XoodooState.nrows, XoodooState.ncols, XoodooState.word_size))
return [state[i][j] == value[i][j] for i in range(XoodooState.nrows) for j in range(XoodooState.ncols)]
def chi_constraints(self, r):
"""
Return a list of constraints for chi at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> import random
>>> xoodoo = Xoodoo(nrounds=4)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.chi_constraints(0):
... solver.add(constraint)
>>> randval = xoodoo.random_state()
>>> s = xoodoo.input_chi_vars(0)
>>> for constraint in xoodoo.assignment_constraints(s, randval):
... solver.add(constraint)
>>> solver.check()
True
>>> t = xoodoo.output_chi_vars(0)
>>> result = [[solver.model(t[i][j].name) for j in range(XoodooState.ncols)]
... for i in range(XoodooState.nrows)]
>>> all(result[i][j] ==
... randval[i][j] ^ (~randval[(i + 1) % XoodooState.nrows][j] & randval[(i + 2) % XoodooState.nrows][j])
... for i in range(XoodooState.nrows) for j in range(XoodooState.ncols))
True
"""
x = self.input_chi_vars(r)
y = self.output_chi_vars(r)
return [y[i][j] == x[i][j] ^ (~x[(i + 1) % XoodooState.nrows][j] & x[(i + 2) % XoodooState.nrows][j])
for j in range(XoodooState.ncols) for i in range(XoodooState.nrows)]
def rho_east_constraints(self, r):
"""
Return a list of constraints for rho east at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> import random
>>> xoodoo = Xoodoo(nrounds=4)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.rho_east_constraints(0):
... solver.add(constraint)
>>> randval = xoodoo.random_state()
>>> out = []
>>> out.append( [randval[0][j] for j in range(XoodooState.ncols)] )
>>> out.append( [xoodoo.rotate_left(randval[1][j], 1) for j in range(XoodooState.ncols)] )
>>> out.append( [xoodoo.rotate_left(randval[2][(j + 2) % XoodooState.ncols], 8) for j in range(XoodooState.ncols)] )
>>> x = xoodoo.output_chi_vars(0)
>>> for constraint in xoodoo.assignment_constraints(x, randval):
... solver.add(constraint)
>>> solver.check()
True
>>> y = xoodoo.output_round_vars(0)
>>> result = [[solver.model(y[i][j].name) for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)]
>>> all(result[i][j] == out[i][j] for i in range(XoodooState.nrows) for j in range(XoodooState.ncols))
True
"""
x = self.output_chi_vars(r)
y = self.output_round_vars(r)
return self._rho_east_constraints_(x, y)
def _rho_east_constraints_(self, x, y):
"""
Return a list of constraints for rho east
INPUT:
- ``x`` -- input variables
- ``y`` -- output variables
"""
constraints = [y[0][j] == x[0][j] for j in range(XoodooState.ncols)]
constraints += [y[1][j] == Xoodoo.rotate_left(x[1][j], 1) for j in range(XoodooState.ncols)]
constraints += [y[2][j] == Xoodoo.rotate_left(x[2][(j + 2) % XoodooState.ncols], 8)
for j in range(XoodooState.ncols)]
return constraints
def rho_west_and_iota_constraints(self, r):
"""
Return a list of constraints for composition of rho west and iota at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.rho_west_and_iota_constraints(0):
... solver.add(constraint)
>>> randval = xoodoo.random_state()
>>> for constraint in xoodoo.assignment_constraints(xoodoo.output_theta_vars(0), randval):
... solver.add(constraint)
>>> def theta_and_iota(x):
... y = list()
... y.append([x[0][0] ^ xoodoo.round_constant(0)] + x[0][1:])
... y.append([x[1][3]] + x[1][:3])
... y.append([xoodoo.rotate_left(x[2][j], 11) for j in range(XoodooState.ncols)])
... return y
>>> solver.check()
True
>>> out_vars = xoodoo.input_chi_vars(0)
>>> result = [[solver.model(out_vars[i][j].name) for j in range(XoodooState.ncols)]
... for i in range(XoodooState.nrows)]
>>> result == theta_and_iota(randval)
True
"""
x = self.output_theta_vars(r)
y = self.input_chi_vars(r)
constraints = self._rho_west_constraints_(x, y, r)
return constraints
def _rho_west_constraints_(self, x, y, r=None):
"""
Return a list of constraints for rho west
If the round number `r` is specified, then the round constant is included in the constraints
INPUT:
- ``x`` -- input variables
- ``y`` -- output variables
- ``r`` -- round number (default: None)
"""
constraints = []
if r is not None:
constraints += [y[0][0] == x[0][0] ^ self.round_constant(r)]
constraints += [y[0][j] == x[0][j] for j in range(1, XoodooState.ncols)]
else:
constraints += [y[0][j] == x[0][j] for j in range(XoodooState.ncols)]
constraints += [y[1][j] == x[1][(j + XoodooState.nrows) % XoodooState.ncols] for j in range(XoodooState.ncols)]
constraints += [y[2][j] == Xoodoo.rotate_left(x[2][j], 11) for j in range(XoodooState.ncols)]
return constraints
def round_constraints(self, r):
"""
Return a list of constraints of the `r`-th round
INPUT:
- ``r`` -- round number
"""
return self.theta_constraints(r) + self.rho_west_and_iota_constraints(r) + self.chi_constraints(r) +\
self.rho_east_constraints(r)
def permutation_constraints(self):
"""
Return a list of constraints for the Xoodoo permutation
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=12)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.permutation_constraints():
... solver.add(constraint)
>>> x = xoodoo.input_variables()
>>> for constraint in xoodoo.assignment_constraints(x, [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]):
... solver.add(constraint)
>>> solver.check()
True
>>> y = xoodoo.output_variables()
>>> out_value = [[solver.model(y[i][j].name) for j in range(XoodooState.ncols)]
... for i in range(XoodooState.nrows)]
>>> correct_output = [
... [2312493197, 2841902271, 455290137, 4289044500],
... [917602566, 2949104126, 2934275262, 2809479357],
... [780593264, 4277516233, 2337254898, 1582252130]
... ]
>>> out_value == correct_output
True
>>> xoodoo_3rounds = Xoodoo(nrounds=3)
>>> for constraint in xoodoo_3rounds.permutation_constraints():
... xoodoo_3rounds.solver.add(constraint)
>>> x = xoodoo_3rounds.input_variables()
>>> for constraint in xoodoo.assignment_constraints(x, [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]):
... xoodoo_3rounds.solver.add(constraint)
>>> xoodoo_3rounds.solver.check()
True
>>> y = xoodoo_3rounds.output_variables()
>>> out_value = [[xoodoo_3rounds.solver.model(y[i][j].name) for j in range(XoodooState.ncols)]
... for i in range(XoodooState.nrows)]
>>> correct_output = [
... [3367309476, 2795523367, 3279790372, 1225296034],
... [2440626244, 4167606016, 210768320, 1157228578],
... [2173818853, 1684836713, 160556720, 1812112827]
... ]
>>> out_value == correct_output
True
"""
return sum([self.round_constraints(r) for r in range(self.nrounds)], [])
def input_variables(self):
"""
Return a list of input variables of the permutation
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> in_vars = xoodoo.input_variables()
>>> [[in_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['x000000', 'x000001', 'x000002', 'x000003'],
['x000100', 'x000101', 'x000102', 'x000103'],
['x000200', 'x000201', 'x000202', 'x000203']]
"""
return self.input_round_vars(0)
def diff_input_round_vars(self, r):
"""
Return a list of input round difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_in_vars = xoodoo.diff_input_round_vars(0)
>>> [[d_in_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['X000000', 'X000001', 'X000002', 'X000003'],
['X000100', 'X000101', 'X000102', 'X000103'],
['X000200', 'X000201', 'X000202', 'X000203']]
"""
return self.vars(r, Xoodoo._diff_input_round_var_name)
def diff_output_round_vars(self, r):
"""
Return a list of output round difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_out_vars = xoodoo.diff_output_round_vars(0)
>>> [[d_out_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['X010000', 'X010001', 'X010002', 'X010003'],
['X010100', 'X010101', 'X010102', 'X010103'],
['X010200', 'X010201', 'X010202', 'X010203']]
"""
return self.diff_input_round_vars(r + 1)
def diff_input_theta_vars(self, r):
"""
Return a list of input theta difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo, XoodooState
>>> xoodoo = Xoodoo(nrounds=3)
>>> d = xoodoo.diff_input_theta_vars(0)
>>> d_in_round_vars = xoodoo.diff_input_round_vars(0)
>>> all(d[i][j].name == d_in_round_vars[i][j].name
... for i in range(XoodooState.nrows)
... for j in range(XoodooState.ncols))
True
"""
return self.diff_input_round_vars(r)
def diff_aux_theta_vars(self, r):
"""
Return a list of auxiliary theta difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_aux_theta_vars = xoodoo.diff_aux_theta_vars(0)
>>> [var_.name for var_ in d_aux_theta_vars]
['P0000', 'P0001', 'P0002', 'P0003']
"""
return self.aux_vars(r, Xoodoo._diff_aux_theta_var_name)
def diff_output_theta_vars(self, r):
"""
Return a list of output theta difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_out_theta_vars = xoodoo.diff_output_theta_vars(0)
>>> [[d_out_theta_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['E000000', 'E000001', 'E000002', 'E000003'],
['E000100', 'E000101', 'E000102', 'E000103'],
['E000200', 'E000201', 'E000202', 'E000203']]
"""
return self.vars(r, Xoodoo._diff_output_theta_var_name)
def diff_theta_constraints(self, r):
"""
Return a list of constraints for the differential propagation of theta at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> constraints = xoodoo.diff_theta_constraints(0)
>>> for constraint in constraints:
... xoodoo.solver.add(constraint)
>>> xoodoo.solver.check()
True
"""
x = self.diff_input_theta_vars(r)
p = self.diff_aux_theta_vars(r)
y = self.diff_output_theta_vars(r)
return self._theta_constraints_(x, p, y)
def diff_input_chi_vars(self, r):
"""
Return a list of input chi difference variables at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_in_chi_vars = xoodoo.diff_input_chi_vars(0)
>>> [[d_in_chi_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['S000000', 'S000001', 'S000002', 'S000003'],
['S000100', 'S000101', 'S000102', 'S000103'],
['S000200', 'S000201', 'S000202', 'S000203']]
"""
return self.vars(r, Xoodoo._diff_input_chi_var_name)
def diff_output_chi_vars(self, r):
"""
Return a list of output chi difference variables at round `r`
INPUT:
- ``r`` -- round number
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> d_out_chi_vars = xoodoo.diff_output_chi_vars(0)
>>> [[d_out_chi_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['T000000', 'T000001', 'T000002', 'T000003'],
['T000100', 'T000101', 'T000102', 'T000103'],
['T000200', 'T000201', 'T000202', 'T000203']]
"""
return self.vars(r, Xoodoo._diff_output_chi_var_name)
def diff_input_rho_west_vars(self, r):
"""
Return a list of input rho west difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.diff_input_rho_west_vars(0) == xoodoo.diff_output_theta_vars(0)
True
"""
return self.diff_output_theta_vars(r)
def diff_output_rho_west_vars(self, r):
"""
Return a list of output rho west difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.diff_output_rho_west_vars(0) == xoodoo.diff_input_chi_vars(0)
True
"""
return self.diff_input_chi_vars(r)
def diff_rho_west_constraints(self, r):
"""
Return a list of constraints for the differential propagation of rho west at round `r`
INPUT:
- ``r`` -- round number
"""
x = self.diff_input_rho_west_vars(r)
y = self.diff_output_rho_west_vars(r)
return self._rho_west_constraints_(x, y)
def diff_input_rho_east_vars(self, r):
"""
Return a list of input rho east difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.diff_input_rho_east_vars(0) == xoodoo.diff_output_chi_vars(0)
True
"""
return self.diff_output_chi_vars(r)
def diff_output_rho_east_vars(self, r):
"""
Return a list of output rho east difference variables at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.diff_output_rho_east_vars(0) == xoodoo.diff_output_round_vars(0)
True
"""
return self.diff_output_round_vars(r)
def diff_rho_east_constraints(self, r):
"""
Return a list of constraints for the differential propagation of rho east at round `r`
INPUT:
- ``r`` -- round number
"""
x = self.diff_input_rho_east_vars(r)
y = self.diff_output_rho_east_vars(r)
return self._rho_east_constraints_(x, y)
def diff_chi_constraints(self, r):
"""
Return a list of constraints for the differential propagation of chi at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> constraints = xoodoo.diff_chi_constraints(0)
>>> for constraint in constraints:
... xoodoo.solver.add(constraint)
>>> xoodoo.solver.check()
True
"""
x = self.diff_input_chi_vars(r)
y = self.diff_output_chi_vars(r)
w = self.weight_vars(r)
constraints = []
for j in range(XoodooState.ncols):
constraints += self._diff_chi_wordwise_constraints_(x[0][j], x[1][j], x[2][j], y[0][j], y[1][j], y[2][j])
constraints += [self.hamming_weight_constraint(x[0][j] | x[1][j] | x[2][j], w[j])]
return constraints
def _diff_chi_wordwise_constraints_(self, dx, dy, dz, dxp, dyp, dzp):
"""
Return a list of constraints for the word-wise differential propagation of chi
INPUT:
- ``dx, dy, dz`` -- input difference
- ``dxp, dyp, dzp`` -- output difference
TESTS::
>>> from xoodoo import Xoodoo
>>> chi_ddt = [[0], [1, 3, 5, 7], [2, 3, 6, 7], [1, 2, 5, 6], [4, 5, 6, 7], [1, 3, 4, 6], [2, 3, 4, 5],
... [1, 2, 4, 7]]
>>> is_correct = []
>>> for input_diff in range(len(chi_ddt)):
... for output_diff in chi_ddt[input_diff]:
... xoodoo = Xoodoo(nrounds=4)
... solver = xoodoo.solver
... dx, dy, dz = solver.bitvecs('dx dy dz', width=1)
... dxp, dyp, dzp = solver.bitvecs('dxp dyp dzp', width=1)
... in_diff = map(int, bin(input_diff)[2:].zfill(3))
... out_diff = map(int, bin(output_diff)[2:].zfill(3))
... solver.add(dx == in_diff[0]) #zero is the most-significant bit
... solver.add(dy == in_diff[1])
... solver.add(dz == in_diff[2])
... solver.add(dxp == out_diff[0])
... solver.add(dyp == out_diff[1])
... solver.add(dzp == out_diff[2])
... is_correct.append(solver.check())
>>> all(is_correct)
True
"""
constraints = [
(~dx & ~dy & ~dz) & (dxp | dyp | dzp) == 0,
(~dx & ~dy & dz) & ~dzp == 0,
(~dx & dy & ~dz) & ~dyp == 0,
( dx & ~dy & ~dz) & ~dxp == 0,
(~dx & dy & dz) & ~(dyp ^ dzp) == 0,
( dx & ~dy & dz) & ~(dxp ^ dzp) == 0,
( dx & dy & ~dz) & ~(dxp ^ dyp) == 0,
( dx & dy & dz) & ~(dxp ^ dyp ^ dzp) == 0,
]
return constraints
def output_variables(self):
"""
Return a list of output variables of the permutation
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> out_vars = xoodoo.output_variables()
>>> [[out_vars[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['x040000', 'x040001', 'x040002', 'x040003'],
['x040100', 'x040101', 'x040102', 'x040103'],
['x040200', 'x040201', 'x040202', 'x040203']]
"""
return self.output_round_vars(self.nrounds - 1)
def diff_input_vars(self):
"""
Return a list of variables for the input difference of the Xoodoo permutation
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> X = xoodoo.diff_input_vars()
>>> [[X[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['X000000', 'X000001', 'X000002', 'X000003'],
['X000100', 'X000101', 'X000102', 'X000103'],
['X000200', 'X000201', 'X000202', 'X000203']]
"""
return self.diff_input_round_vars(0)
def diff_output_vars(self):
"""
Return a list of variables for the output difference of the Xoodoo permutation
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> X = xoodoo.diff_output_vars()
>>> [[X[i][j].name for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)] # doctest: +NORMALIZE_WHITESPACE
[['X030000', 'X030001', 'X030002', 'X030003'],
['X030100', 'X030101', 'X030102', 'X030103'],
['X030200', 'X030201', 'X030202', 'X030203']]
"""
return self.diff_output_round_vars(self.nrounds - 1)
def diff_round_constraints(self, r):
"""
Return a list of constraints for the differential propagation for the `r`-th round of Xoodoo
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> constraints = xoodoo.diff_round_constraints(0)
>>> for constraint in constraints:
... xoodoo.solver.add(constraint)
>>> xoodoo.solver.check()
True
"""
return self.diff_theta_constraints(r) + self.diff_rho_west_constraints(r) + self.diff_chi_constraints(r) +\
self.diff_rho_east_constraints(r)
def differential_constraints(self):
"""
Return a list of constraints for the differential propagation of Xoodoo
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> constraints = xoodoo.differential_constraints()
>>> for constraint in constraints:
... xoodoo.solver.add(constraint)
>>> x = xoodoo.diff_input_round_vars(0)
>>> xoodoo.solver.check()
True
"""
x = self.diff_input_vars()
constraints = [
(x[0][0] | x[0][1] | x[0][2] | x[0][3] |
x[1][0] | x[1][1] | x[1][2] | x[1][3] |
x[2][0] | x[2][1] | x[2][2] | x[2][3]) != 0
]
constraints += sum([self.diff_round_constraints(r) for r in range(self.nrounds)], [])
constraints += [self.total_weight_constraint()]
return constraints
def hamming_weight_constraint(self, x, weight):
"""
Return a constraint represent the Hamming weight of `x`
INPUT:
- ``x`` -- word variable
- ``weight`` -- the specified Hamming weight
TESTS::
>>> import random
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> solver = xoodoo.solver
>>> a = solver.bitvec('a', width=XoodooState.word_size)
>>> w = random.randint(0, XoodooState.word_size)
>>> solver.add(xoodoo.hamming_weight_constraint(a, w))
>>> solver.check()
True
>>> result = solver.model(a.name)
>>> bin(result)[2:].count('1') == w
True
"""
return sum([(x & (1 << i)) >> i for i in range(XoodooState.word_size)]) == weight
def weight_vars(self, r):
"""
Return a list of variables for the Hamming weight of column-wise Xoodoo state at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> W = xoodoo.weight_vars(0)
>>> [w.name for w in W]
['w0000', 'w0001', 'w0002', 'w0003']
"""
return self.aux_vars(r, Xoodoo._weight_var_name)
def total_weight_var(self):
"""
Return the variable representing the total Hamming weight of the differential trail
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=4)
>>> xoodoo.total_weight_var().name == Xoodoo._total_weight_var_name
True
"""
return self.solver.bitvec(Xoodoo._total_weight_var_name, width=XoodooState.word_size)
def total_weight_constraint(self):
"""
Return a constraint representing the total weight for the differential trail
TESTS::
>>> from xoodoo import Xoodoo
>>> import random
>>> xoodoo = Xoodoo(nrounds=2)
>>> solver = xoodoo.solver
>>> weight = random.randint(0, 256)
>>> solver.add(xoodoo.total_weight_constraint())
>>> solver.add(xoodoo.total_weight_var() == weight)
>>> xoodoo.solver.check()
True
>>> weight_vars = sum([xoodoo.weight_vars(r) for r in range(xoodoo.nrounds)], [])
>>> sum([xoodoo.solver.model(w.name) for w in weight_vars]) == weight
True
"""
w = sum([self.weight_vars(r) for r in range(self.nrounds)], [])
W = self.total_weight_var()
return sum(w) == W
def differential_trail_constraints(self, w):
"""
Return a list of constraints to find differential trail of trail weight `w`
INPUT:
- ``w`` -- a positive integer
"""
if w <= 0:
raise ValueError("w must be a positive integer")
constraints = self.differential_constraints()
constraints += [self.total_weight_var() == w//2]
return constraints
def validity_input_vars(self, r):
"""
Return a list of input variables to verify the validity of differential at round `r`
INPUT:
- ``r`` -- round number
"""
x = self.input_chi_vars(r)
a = self.diff_input_chi_vars(r)
return [[x[i][j] + a[i][j] for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)]
def validity_output_vars(self, r):
"""
Return a list of output variables to verify the validity of differential at round `r`
INPUT:
- ``r`` -- round number
"""
y = self.output_chi_vars(r)
b = self.diff_output_chi_vars(r)
return [[y[i][j] + b[i][j] for j in range(XoodooState.ncols)] for i in range(XoodooState.nrows)]
def validity_round_constraints(self, r):
"""
Return a list of constraints to verify the validity of a differential at round `r`
INPUT:
- ``r`` -- round number
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=3)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.validity_round_constraints(0):
... solver.add(constraint)
>>> solver.check()
True
"""
x = self.validity_input_vars(r)
y = self.validity_output_vars(r)
return [y[i][j] == x[i][j] ^ (~x[(i + 1) % XoodooState.nrows][j] & x[(i + 2) % XoodooState.nrows][j])
for j in range(XoodooState.ncols) for i in range(XoodooState.nrows)]
def validity_constraints(self):
"""
Return a list of constraints to verify the validity of differential trail
TESTS::
>>> from xoodoo import Xoodoo
>>> xoodoo = Xoodoo(nrounds=2)
>>> solver = xoodoo.solver
>>> for constraint in xoodoo.validity_constraints():
... solver.add(constraint)
>>> solver.check()
True
"""
return sum([self.validity_round_constraints(r) for r in range(self.nrounds)], [])
def has_differential_trail(self, w):
"""
Return `True` if there exists a differential trail with trail weight `w`
Note that the trail weight of a word is equal to twice of its Hamming weight
INPUT:
- ``w`` -- a positive integer
TESTS::
>>> from xoodoo import Xoodoo
>>> _2rounds_xoodoo_0 = Xoodoo(nrounds=2)
>>> _2rounds_xoodoo_0.has_differential_trail(8)
True
>>> _2rounds_xoodoo_1 = Xoodoo(nrounds=2)
>>> _2rounds_xoodoo_1.has_differential_trail(7)
False
"""
for constraint in self.differential_trail_constraints(w):
self.solver.add(constraint)
return self.solver.check()
def has_valid_differential_trail(self, w):
"""
Return `True` if there exists a valid differential trail with trail weight `w`
INPUT:
- ``w`` -- a positive integer
TESTS::
>>> from xoodoo import Xoodoo
>>> _2rounds_xoodoo_0 = Xoodoo(nrounds=2)
>>> _2rounds_xoodoo_0.has_valid_differential_trail(8)
True
>>> _2rounds_xoodoo_1 = Xoodoo(nrounds=2)
>>> _2rounds_xoodoo_1.has_valid_differential_trail(7)
False
"""
constraints = self.differential_trail_constraints(w) + self.validity_constraints() +\
self.permutation_constraints()
for constraint in constraints:
self.solver.add(constraint)
return self.solver.check()
def differential_trail(self, w):
"""
Return a differential trail (if exists) with trail weight `w`
INPUT:
- ``w`` -- a positive integer
TESTS::
>>> from xoodoo import Xoodoo
>>> _2rounds_xoodoo_0 = Xoodoo(nrounds=2)
>>> trail_0 = _2rounds_xoodoo_0.differential_trail(8)
>>> _2rounds_xoodoo_1 = Xoodoo(nrounds=2)
>>> trail_1 = _2rounds_xoodoo_1.differential_trail(7)
Traceback (most recent call last):
...
RuntimeError: no differential trail with trail weight 7
"""
if not self.has_differential_trail(w):
raise RuntimeError("no differential trail with trail weight %d" % w)
return self._differential_trail_()
def min_differential_trail(self):
"""
Return a differential trail with minimum trail weight
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=2)
>>> trail = X.min_differential_trail()
>>> trail.weight()
8
"""
trail = None
max_trail_weight = XoodooState.word_size * XoodooState.ncols
for w in range(2, max_trail_weight, 2):
other_instance = Xoodoo(nrounds=self.nrounds)
try:
trail = other_instance.differential_trail(w)
except RuntimeError:
continue
break
return trail
def valid_differential_trail(self, w):
"""
Return a valid differential trail (if exists) with trail weight `w`
INPUT:
- ``w`` -- a positive integer
TESTS::
>>> from xoodoo import Xoodoo
>>> _2rounds_xoodoo_0 = Xoodoo(nrounds=2)
>>> trail_0 = _2rounds_xoodoo_0.valid_differential_trail(8)
>>> _2rounds_xoodoo_1 = Xoodoo(nrounds=2)
>>> trail_1 = _2rounds_xoodoo_1.valid_differential_trail(7)
Traceback (most recent call last):
...
RuntimeError: no valid differential trail with trail weight 7
"""
if not self.has_valid_differential_trail(w):
raise RuntimeError("no valid differential trail with trail weight %d" % w)
return self._differential_trail_()
def min_valid_differential_trail(self):
"""
Return a valid differential trail with minimum trail weight
TESTS::
>>> from xoodoo import Xoodoo
>>> X = Xoodoo(nrounds=2)
>>> trail = X.min_valid_differential_trail()
>>> trail.weight()
8
"""
trail = None
max_trail_weight = XoodooState.word_size * XoodooState.ncols
for w in range(2, max_trail_weight, 2):
other_instance = Xoodoo(nrounds=self.nrounds)
try:
trail = other_instance.valid_differential_trail(w)
except RuntimeError:
continue
break
return trail
def valid_input(self, w):
"""
Return an input that satisfy the differential trail (if exists) with trail weight `w`
INPUT:
- ``w`` -- a positive integer
TESTS::
>>> from xoodoo import Xoodoo, XoodooState
>>> _2rounds_xoodoo_0 = Xoodoo(nrounds=2)
>>> x = _2rounds_xoodoo_0.valid_input(8)
>>> isinstance(x, XoodooState)
True
"""
if not self.has_valid_differential_trail(w):
raise RuntimeError("no valid differential trail with trail weight %d" % w)
x = self.input_variables()
model = self.solver.model()
nrows = XoodooState.nrows
ncols = XoodooState.ncols
return XoodooState([model[x[i][j].name] for i in range(nrows) for j in range(ncols)])
def _differential_trail_(self):
"""
Return XoodooTrail object from `self.solver.model()`
"""
model = self.solver.model()
nrows = XoodooState.nrows
ncols = XoodooState.ncols
trail = []
for r in range(self.nrounds):
a = self.diff_input_round_vars(r)
trail.append([model[a[i][j].name] for i in range(nrows) for j in range(ncols)])
b = self.diff_input_rho_west_vars(r)
trail.append([model[b[i][j].name] for i in range(nrows) for j in range(ncols)])
c = self.diff_input_chi_vars(r)
trail.append([model[c[i][j].name] for i in range(nrows) for j in range(ncols)])
d = self.diff_input_rho_east_vars(r)
trail.append([model[d[i][j].name] for i in range(nrows) for j in range(ncols)])
y = self.diff_output_vars()
trail.append([model[y[i][j].name] for i in range(nrows) for j in range(ncols)])
return XoodooTrail(trail)
def __repr__(self):
"""
Return a string representation of Xoodoo object
EXAMPLES::
>>> from xoodoo import Xoodoo
>>> Xoodoo(nrounds=1)
1-round of Xoodoo
>>> Xoodoo(nrounds=2)
2-rounds of Xoodoo
"""
round_str = "round"
if self.nrounds > 1:
round_str += 's'
return "%d-%s of Xoodoo" % (self.nrounds, round_str)
|
import React, {useRef, useEffect, useState} from 'react';
import PropTypes from 'prop-types';
const DragAndDrop = props => {
const dropRef = useRef(null);
const dragCounter = useRef(0);
const [dragging, setDragging] = useState(false);
const handleDrag = e => {
e.preventDefault();
e.stopPropagation();
};
const handleDragIn = e => {
e.preventDefault();
e.stopPropagation();
dragCounter.current += 1;
if (e.dataTransfer.items && e.dataTransfer.items.length > 0) {
setDragging(true);
}
};
const handleDragOut = e => {
e.preventDefault();
e.stopPropagation();
dragCounter.current -= 1;
if (dragCounter.current > 0) return;
setDragging(false);
};
const handleDrop = e => {
e.preventDefault();
e.stopPropagation();
setDragging(false);
if (e.dataTransfer.files && e.dataTransfer.files.length > 0) {
props.handleDrop(e.dataTransfer.files);
e.dataTransfer.clearData();
dragCounter.current = 0;
}
};
const handleChange = e => {
props.handleDrop(e.target.files);
};
useEffect(() => {
let div = dropRef.current;
div.addEventListener('dragenter', handleDragIn);
div.addEventListener('dragleave', handleDragOut);
div.addEventListener('dragover', handleDrag);
div.addEventListener('drop', handleDrop);
return () => {
div.removeEventListener('dragenter', handleDragIn);
div.removeEventListener('dragleave', handleDragOut);
div.removeEventListener('dragover', handleDrag);
div.removeEventListener('drop', handleDrop);
};
});
return (
<div className={'drop-box ' + (dragging ? 'drag-hover' : '')} ref={dropRef}>
<input className="input" type="file" onChange={handleChange} multiple />
<div className="drag-text">
{' '}
{dragging ? (
'Drop here'
) : (
<div className="drag-box">
Click to choose or <br />
Drop file here
</div>
)}
</div>
</div>
);
};
DragAndDrop.propTypes = {
handleDrop: PropTypes.func
};
export default DragAndDrop;
|
# coding: utf-8
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: Latest
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ory_oathkeeper_client.configuration import Configuration
class HealthStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'str'
}
attribute_map = {
'status': 'status'
}
def __init__(self, status=None, local_vars_configuration=None): # noqa: E501
"""HealthStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self.discriminator = None
if status is not None:
self.status = status
@property
def status(self):
"""Gets the status of this HealthStatus. # noqa: E501
Status always contains \"ok\". # noqa: E501
:return: The status of this HealthStatus. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this HealthStatus.
Status always contains \"ok\". # noqa: E501
:param status: The status of this HealthStatus. # noqa: E501
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HealthStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HealthStatus):
return True
return self.to_dict() != other.to_dict()
|
import { takeLatest, call, put, select } from 'redux-saga/effects';
import swal from 'sweetalert';
import { push } from 'connected-react-router';
import history from './../../../utils/history'
import * as AppSelectors from '../../App/selectors';
import * as AppActions from '../../App/actions';
import * as Selectors from './selectors';
import request from '../../../utils/request';
import * as Endpoints from '../../../components/Endpoints';
import * as Actions from './actions';
import * as Constants from './constants';
export function* getChartOfAccounts() {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.GetAllChartOfAccountApi}/${currentUser.organisation.orgId}`;
try {
const response = yield call(request, requestURL, {
method: 'GET',
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
yield put(Actions.getChartOfAccountsSuccess(response));
} catch (err) {
yield put(Actions.getChartOfAccountsError(err));
}
}
export function* getBranches() {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.GetBranches}?orgId=${currentUser && currentUser.organisation.id}&tagId=1`;
try {
const response = yield call(request, requestURL, {
method: 'GET',
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
yield put(Actions.getBranchesSuccess(response));
} catch (err) {
yield put(Actions.getBranchesError(err));
}
}
export function* getAssets() {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.GetAssetByOrgIdApi}?orgId=${currentUser.organisation.orgId}`;
try {
const response = yield call(request, requestURL, {
method: 'GET',
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
yield put(Actions.getAssetsSuccess(response));
} catch (err) {
yield put(Actions.getAssetsError(err));
}
}
export function* getAssetById({ payload }) {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.GetAssetByIdApi}/${payload.id}`;
try {
const response = yield call(request, requestURL, {
method: 'GET',
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
yield put(Actions.getAssetByIdSuccess(response));
} catch (err) {
yield put(Actions.getAssetByIdError(err));
}
}
export function* createAsset({ payload }) {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.AddAssetApi}`;
payload.orgId = currentUser.organisation.orgId;
try {
const response = yield call(request, requestURL, {
method: 'POST',
body: JSON.stringify(payload),
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
swal('Success', 'Asset created successfully!', 'success');
yield put(Actions.createAssetSuccess(response));
yield put(Actions.getAssets());
yield put(Actions.closeNewAssetDialog());
yield put(push('/account/fixedassets'));
} catch (err) {
swal('Error', 'Something went wrong', 'error');
yield put(Actions.createAssetError(err));
}
}
export function* disposeAsset({ payload }) {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.CreateDisposalApi}`;
payload.orgId = currentUser.organisation.orgId;
try {
const response = yield call(request, requestURL, {
method: 'POST',
body: JSON.stringify(payload),
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
swal('Success', 'Asset disposed successfully!', 'success');
yield put(Actions.disposeAssetSuccess(response));
yield put(Actions.getAssets());
yield put(Actions.closeAssetDisposalDialog());
} catch (err) {
swal('Error', 'Something went wrong', 'error');
yield put(Actions.disposeAssetError(err));
}
}
export function* updateAsset({ payload }) {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.UpdateAssetApi}`;
payload.orgId = currentUser.organisation.orgId
try {
const response = yield call(request, requestURL, {
method: 'POST',
body: JSON.stringify(payload),
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
swal('Success', 'Asset updated successfully!', 'success');
yield put(Actions.updateAssetSuccess(response));
yield put(Actions.getAssets());
yield put(Actions.closeNewAssetDialog());
yield put(push('/account/fixedassets'));
} catch (err) {
swal('Error', 'Something went wrong', 'error');
yield put(Actions.updateAssetError(err));
}
}
export function* getAssetTypes() {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.GetAssetTypeByOrgIdApi}?orgId=${currentUser.organisation.orgId}`;
try {
const response = yield call(request, requestURL, {
method: 'GET',
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
yield put(Actions.getAssetTypesSuccess(response));
} catch (err) {
yield put(Actions.getAssetTypesError(err));
}
}
export function* createAssetType({ payload }) {
const accessToken = yield select(AppSelectors.makeSelectAccessToken());
const currentUser = yield select(AppSelectors.makeSelectCurrentUser());
const requestURL = `${Endpoints.CreateAssetTypeApi}`;
payload.orgId = currentUser.organisation.orgId;
try {
const response = yield call(request, requestURL, {
method: 'POST',
body: JSON.stringify(payload),
headers: new Headers({
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
}),
});
swal('Success', 'Asset type created successfully!', 'success');
yield put(Actions.createAssetTypeSuccess(response));
yield put(Actions.getAssetTypes());
yield put(Actions.closeNewAssetTypeDialog());
} catch (err) {
swal('Error', 'Something went wrong', 'error');
yield put(Actions.createAssetTypeError(err));
}
}
// Individual exports for testing
export default function* SettingsSaga() {
yield takeLatest(Constants.GET_CHART_OF_ACCOUNTS, getChartOfAccounts);
yield takeLatest(Constants.GET_BRANCHES, getBranches);
yield takeLatest(Constants.GET_ASSETS, getAssets);
yield takeLatest(Constants.GET_ASSET_BY_ID, getAssetById);
yield takeLatest(Constants.CREATE_ASSET, createAsset);
yield takeLatest(Constants.DISPOSE_ASSET, disposeAsset);
yield takeLatest(Constants.UPDATE_ASSET, updateAsset);
yield takeLatest(Constants.GET_ASSET_TYPES, getAssetTypes);
yield takeLatest(Constants.CREATE_ASSET_TYPE, createAssetType);
}
|
const {
MessageEmbed
} = require("discord.js");
exports.run = async (client, message, args, level) => {
const language = client.getSettings(message.guild.id).language
const lang = require("../lib/languages/" + language + ".json")
try {
const hex = Math.random().toString(16).slice(2, 8).toUpperCase().slice(-6)
const color = !args[0] ? hex : args[0]
const embed = new MessageEmbed()
.setColor(hex)
.setDescription(`${lang.RandomHex} \`#${hex}\``)
.setTitle("#" + hex)
.setImage(`https://derekdinan.github.io/ClassiCube-Stuff/hex-to-img/?hex=${color}`)
.setFooter(`${lang.RespondingTo} ${message.author.tag}`, message.author.avatarURL())
.setTimestamp()
message.channel.send({
embeds: [embed]
})
} catch (err) {
const errors = require('../modules/errors.js')
errors.embedError(err, lang, message)
}
};
exports.conf = {
enabled: true,
aliases: ["randomcolor", "randomcolour", "colour", "rcol", "rc"],
guildOnly: false,
permLevel: "User",
};
exports.help = {
name: "color",
category: "Utility",
description: "Returns a random hex color code.",
usage: "color",
}; |
"""
Interesting numbers are 3-or-more digit numbers that meet one or more of the following criteria:
Any digit followed by all zeros: 100, 90000
Every digit is the same number: 1111
The digits are sequential, incementing†: 1234
The digits are sequential, decrementing‡: 4321
The digits are a palindrome: 1221 or 73837
The digits match one of the values in the awesome_phrases array
† For incrementing sequences, 0 should come after 9, and not before 1, as in 7890.
‡ For decrementing sequences, 0 should come after 1, and not before 9, as in 3210.
Write the function that parses the mileage number input, and returns a 2 if the number is "interesting" (see below),
a 1 if an interesting number occurs within the next two miles, or a 0 if the number is not interesting.
"""
def _check_sequential(number):
number_list = list(str(number))
if int(number_list[-1]) == 0:
if int(number_list[-2]) != 9 and int(number_list[-2]) != 1:
return False
else:
number_list = number_list[:-1]
minus_v = int(number_list[1]) - int(number_list[0])
if abs(minus_v) == 1:
for i in range(1, len(number_list)):
if int(number_list[i]) - int(number_list[i - 1]) != minus_v:
return False
else:
return False
return True
def _check_palindrome(number):
number_list = list(str(number))
while len(number_list) > 1:
if number_list.pop(0) != number_list.pop(-1):
return False
return True
def _check_interesting_number(number, awesome_phrases):
if number < 100:
return False
return True if number in awesome_phrases or number % 100 == 0 or _check_sequential(number) or _check_palindrome(
number) else False
def is_interesting(number, awesome_phrases):
if _check_interesting_number(number, awesome_phrases):
return 2
elif _check_interesting_number(number+1, awesome_phrases) or _check_interesting_number(number+2, awesome_phrases):
return 1
return 0
print(is_interesting(67888,[]))
# print(_check_palindrome(11211))
# print(_check_sequential(3210))
|
import React from "react";
import "../../style/feedback/feedback-area.scss";
class FeedbackArea extends React.Component {
render() {
return (
<div className="feedback-area">
</div>
)
}
} |
'use strict';
const consoleHelper = require('../helpers/consoleHelper');
const Duration = require('./Duration');
module.exports = class TaskRole {
/**
* Constructor for TaskRole
* @param {Object} roleDef Example:
* { name: 'crewA',
* description: 'Person who does XYZ'
* duration: { minutes: 20 } }
* @param {Object} taskRequirements Info about this usage of task from procedure file. Ex:
* { file: 'colka-temporary-lid-removal.yml',
* roles: { crewA: 'EV1', crewB: 'EV2' },
* color: '#7FB3D5' }
*/
constructor(roleDef, taskRequirements) {
this.name = roleDef.name;
this.description = roleDef.description;
this.duration = new Duration(roleDef.duration);
if (!taskRequirements || !taskRequirements.roles || !taskRequirements.roles[this.name]) {
consoleHelper.error([
'Roles defined within tasks must be filled by procedure actors',
`Within task "${taskRequirements.file}", role "${this.name}" is defined`,
`Within the procedure, role "${this.name}" is not filled by an actor`
], 'TaskRole error');
} else {
this.actor = taskRequirements.roles[this.name];
}
}
getDefinition() {
const def = {
name: this.name,
duration: this.duration ? this.duration.getDefinition() : { seconds: 0 }
};
if (this.description) {
def.description = this.description;
}
return def;
}
};
|
// react
import React, { Component } from "react";
// other packages
import { createStackNavigator } from "@react-navigation/stack";
// screens
import { SignInScreen, SignUpScreen } from "../screens";
const Stack = createStackNavigator();
export default class AuthenticationScreensNavigator extends Component {
render() {
return (
<Stack.Navigator screenOptions={{ headerShown: false }}>
<Stack.Screen name="SignIn" component={SignInScreen} />
<Stack.Screen name="SignUp" component={SignUpScreen} />
</Stack.Navigator>
);
}
}
|
import AppToggleButton from "~/components/AppToggleButton";
import { mount } from "@vue/test-utils";
describe('AppToggleButton', () => {
let wrapper;
beforeEach(() => {
// wrapper を null に設定し、GCされるようにする。
wrapper = null;
// wrapper に vue コンポーネントのマウント結果を代入して使い回す
wrapper = mount(AppToggleButton);
});
test('デフォルト状態で off であるか', () => {
expect(wrapper.find('p').text()).toBe('off');
// findメソッドで、this.$el 内から探査された p 要素をデータソースとしてアサーションを行う
});
test('ボタンを押すことによって on となるか', () => {
wrapper.find('button').trigger('click'); // クリックイベントを発火
expect(wrapper.find('p').text()).toBe('on');
});
});
|
import * as tslib_1 from "tslib";
import { Inject, Pipe } from '@angular/core';
import { TIME_LOCALE } from '../tokens/time-locale.token';
import { TimeUnit } from '../models/time-unit.enum';
import { DateTime } from 'luxon';
let TimeLocalizerPipe = class TimeLocalizerPipe {
constructor(locale) {
this.locale = locale;
}
transform(time, timeUnit, isKeyboardEnabled = false) {
if (time == null || time === '') {
return '';
}
switch (timeUnit) {
case TimeUnit.HOUR: {
const format = (time === 0 || isKeyboardEnabled) ? 'HH' : 'H';
return this.formatTime('hour', time, format);
}
case TimeUnit.MINUTE:
return this.formatTime('minute', time, 'mm');
default:
throw new Error(`There is no Time Unit with type ${timeUnit}`);
}
}
formatTime(timeMeasure, time, format) {
try {
return DateTime.fromObject({ [timeMeasure]: +time }).setLocale(this.locale).toFormat(format);
}
catch (_a) {
throw new Error(`Cannot format provided time - ${time} to locale - ${this.locale}`);
}
}
};
TimeLocalizerPipe = tslib_1.__decorate([
Pipe({
name: 'timeLocalizer'
}),
tslib_1.__param(0, Inject(TIME_LOCALE)),
tslib_1.__metadata("design:paramtypes", [String])
], TimeLocalizerPipe);
export { TimeLocalizerPipe };
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoidGltZS1sb2NhbGl6ZXIucGlwZS5qcyIsInNvdXJjZVJvb3QiOiJuZzovL25neC1tYXRlcmlhbC10aW1lcGlja2VyLyIsInNvdXJjZXMiOlsic3JjL2FwcC9tYXRlcmlhbC10aW1lcGlja2VyL3BpcGVzL3RpbWUtbG9jYWxpemVyLnBpcGUudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6IjtBQUFBLE9BQU8sRUFBRSxNQUFNLEVBQUUsSUFBSSxFQUFpQixNQUFNLGVBQWUsQ0FBQztBQUM1RCxPQUFPLEVBQUUsV0FBVyxFQUFFLE1BQU0sNkJBQTZCLENBQUM7QUFDMUQsT0FBTyxFQUFFLFFBQVEsRUFBRSxNQUFNLDBCQUEwQixDQUFDO0FBQ3BELE9BQU8sRUFBRSxRQUFRLEVBQUUsTUFBTSxPQUFPLENBQUM7QUFPakMsSUFBYSxpQkFBaUIsR0FBOUIsTUFBYSxpQkFBaUI7SUFFMUIsWUFBeUMsTUFBYztRQUFkLFdBQU0sR0FBTixNQUFNLENBQVE7SUFDdkQsQ0FBQztJQUVELFNBQVMsQ0FBQyxJQUFxQixFQUFFLFFBQWtCLEVBQUUsaUJBQWlCLEdBQUcsS0FBSztRQUMxRSxJQUFJLElBQUksSUFBSSxJQUFJLElBQUksSUFBSSxLQUFLLEVBQUUsRUFBRTtZQUM3QixPQUFPLEVBQUUsQ0FBQztTQUNiO1FBRUQsUUFBUSxRQUFRLEVBQUU7WUFDZCxLQUFLLFFBQVEsQ0FBQyxJQUFJLENBQUMsQ0FBQztnQkFDaEIsTUFBTSxNQUFNLEdBQUcsQ0FBQyxJQUFJLEtBQUssQ0FBQyxJQUFJLGlCQUFpQixDQUFDLENBQUMsQ0FBQyxDQUFDLElBQUksQ0FBQyxDQUFDLENBQUMsR0FBRyxDQUFDO2dCQUM5RCxPQUFPLElBQUksQ0FBQyxVQUFVLENBQUMsTUFBTSxFQUFFLElBQUksRUFBRSxNQUFNLENBQUMsQ0FBQzthQUNoRDtZQUNELEtBQUssUUFBUSxDQUFDLE1BQU07Z0JBQ2hCLE9BQU8sSUFBSSxDQUFDLFVBQVUsQ0FBQyxRQUFRLEVBQUUsSUFBSSxFQUFFLElBQUksQ0FBQyxDQUFDO1lBQ2pEO2dCQUNJLE1BQU0sSUFBSSxLQUFLLENBQUMsbUNBQW1DLFFBQVEsRUFBRSxDQUFDLENBQUM7U0FDdEU7SUFDTCxDQUFDO0lBRU8sVUFBVSxDQUFDLFdBQXdCLEVBQUUsSUFBcUIsRUFBRSxNQUFjO1FBQzlFLElBQUk7WUFDQSxPQUFPLFFBQVEsQ0FBQyxVQUFVLENBQUMsRUFBQyxDQUFDLFdBQVcsQ0FBQyxFQUFFLENBQUMsSUFBSSxFQUFDLENBQUMsQ0FBQyxTQUFTLENBQUMsSUFBSSxDQUFDLE1BQU0sQ0FBQyxDQUFDLFFBQVEsQ0FBQyxNQUFNLENBQUMsQ0FBQztTQUM5RjtRQUFDLFdBQU07WUFDSixNQUFNLElBQUksS0FBSyxDQUFDLGlDQUFpQyxJQUFJLGdCQUFnQixJQUFJLENBQUMsTUFBTSxFQUFFLENBQUMsQ0FBQztTQUN2RjtJQUNMLENBQUM7Q0FDSixDQUFBO0FBN0JZLGlCQUFpQjtJQUg3QixJQUFJLENBQUM7UUFDRixJQUFJLEVBQUUsZUFBZTtLQUN4QixDQUFDO0lBR2UsbUJBQUEsTUFBTSxDQUFDLFdBQVcsQ0FBQyxDQUFBOztHQUZ2QixpQkFBaUIsQ0E2QjdCO1NBN0JZLGlCQUFpQiIsInNvdXJjZXNDb250ZW50IjpbImltcG9ydCB7IEluamVjdCwgUGlwZSwgUGlwZVRyYW5zZm9ybSB9IGZyb20gJ0Bhbmd1bGFyL2NvcmUnO1xuaW1wb3J0IHsgVElNRV9MT0NBTEUgfSBmcm9tICcuLi90b2tlbnMvdGltZS1sb2NhbGUudG9rZW4nO1xuaW1wb3J0IHsgVGltZVVuaXQgfSBmcm9tICcuLi9tb2RlbHMvdGltZS11bml0LmVudW0nO1xuaW1wb3J0IHsgRGF0ZVRpbWUgfSBmcm9tICdsdXhvbic7XG5cbnR5cGUgVGltZU1lYXN1cmUgPSAnaG91cicgfCAnbWludXRlJztcblxuQFBpcGUoe1xuICAgIG5hbWU6ICd0aW1lTG9jYWxpemVyJ1xufSlcbmV4cG9ydCBjbGFzcyBUaW1lTG9jYWxpemVyUGlwZSBpbXBsZW1lbnRzIFBpcGVUcmFuc2Zvcm0ge1xuXG4gICAgY29uc3RydWN0b3IoQEluamVjdChUSU1FX0xPQ0FMRSkgcHJpdmF0ZSBsb2NhbGU6IHN0cmluZykge1xuICAgIH1cblxuICAgIHRyYW5zZm9ybSh0aW1lOiBudW1iZXIgfCBzdHJpbmcsIHRpbWVVbml0OiBUaW1lVW5pdCwgaXNLZXlib2FyZEVuYWJsZWQgPSBmYWxzZSk6IHN0cmluZyB7XG4gICAgICAgIGlmICh0aW1lID09IG51bGwgfHwgdGltZSA9PT0gJycpIHtcbiAgICAgICAgICAgIHJldHVybiAnJztcbiAgICAgICAgfVxuXG4gICAgICAgIHN3aXRjaCAodGltZVVuaXQpIHtcbiAgICAgICAgICAgIGNhc2UgVGltZVVuaXQuSE9VUjoge1xuICAgICAgICAgICAgICAgIGNvbnN0IGZvcm1hdCA9ICh0aW1lID09PSAwIHx8IGlzS2V5Ym9hcmRFbmFibGVkKSA/ICdISCcgOiAnSCc7XG4gICAgICAgICAgICAgICAgcmV0dXJuIHRoaXMuZm9ybWF0VGltZSgnaG91cicsIHRpbWUsIGZvcm1hdCk7XG4gICAgICAgICAgICB9XG4gICAgICAgICAgICBjYXNlIFRpbWVVbml0Lk1JTlVURTpcbiAgICAgICAgICAgICAgICByZXR1cm4gdGhpcy5mb3JtYXRUaW1lKCdtaW51dGUnLCB0aW1lLCAnbW0nKTtcbiAgICAgICAgICAgIGRlZmF1bHQ6XG4gICAgICAgICAgICAgICAgdGhyb3cgbmV3IEVycm9yKGBUaGVyZSBpcyBubyBUaW1lIFVuaXQgd2l0aCB0eXBlICR7dGltZVVuaXR9YCk7XG4gICAgICAgIH1cbiAgICB9XG5cbiAgICBwcml2YXRlIGZvcm1hdFRpbWUodGltZU1lYXN1cmU6IFRpbWVNZWFzdXJlLCB0aW1lOiBzdHJpbmcgfCBudW1iZXIsIGZvcm1hdDogc3RyaW5nKTogc3RyaW5nIHtcbiAgICAgICAgdHJ5IHtcbiAgICAgICAgICAgIHJldHVybiBEYXRlVGltZS5mcm9tT2JqZWN0KHtbdGltZU1lYXN1cmVdOiArdGltZX0pLnNldExvY2FsZSh0aGlzLmxvY2FsZSkudG9Gb3JtYXQoZm9ybWF0KTtcbiAgICAgICAgfSBjYXRjaCB7XG4gICAgICAgICAgICB0aHJvdyBuZXcgRXJyb3IoYENhbm5vdCBmb3JtYXQgcHJvdmlkZWQgdGltZSAtICR7dGltZX0gdG8gbG9jYWxlIC0gJHt0aGlzLmxvY2FsZX1gKTtcbiAgICAgICAgfVxuICAgIH1cbn1cbiJdfQ== |
import { TOGGLE_NAV_VISIBILITY, TOGGLE_LOADED_RECORDS } from '../actions/siteHeader';
import { SET_CURRENT_PREVIEW } from '../actions/record';
const initialState = {
loaded_records_visibility: false,
nav_visibility: true,
nav_visibility_user: true,
loaded_records_visibility_user: false,
};
export default function siteHeader(state = initialState, action) {
switch (action.type) {
case TOGGLE_LOADED_RECORDS: {
const requested = action.data;
return {
...state,
loaded_records_visibility_user: requested,
loaded_records_visibility: requested,
};
}
case TOGGLE_NAV_VISIBILITY: {
const requested = action.data;
return {
...state,
nav_visibility: requested,
nav_visibility_user: requested,
};
}
case SET_CURRENT_PREVIEW: {
const isPreviewing = !!action.data.tableId;
const newState = {
...state,
nav_visibility: isPreviewing ? false : state.nav_visibility_user,
loaded_records_visibility: isPreviewing ? false : state.loaded_records_visibility_user,
};
return newState;
}
default:
return state;
}
}
|
import auth0 from 'auth0-js';
import { Auth0Consts, FacternPaths, AppPaths } from '../constants/enums';
export default class Auth0 {
auth0 = new auth0.WebAuth({
domain: FacternPaths.auth0Domain,
clientID: Auth0Consts.clientId,
audience: FacternPaths.auth0Audience,
scope: 'openid email profile',
});
constructor() {
this.login = this.login.bind(this);
this.logout = this.logout.bind(this);
}
login(signUp = false) {
this.auth0.authorize({
redirectUri: `${AppPaths.origin}/auth`,
responseType: 'token id_token',
initialScreen: signUp ? 'signUp' : 'login',
});
}
logout() {
this.auth0.logout({
returnTo: `${AppPaths.origin}/login`,
federated: true,
});
}
}
|
(function() {
sap.ui.jsfragment("view.SupplierAddressForm", {
createContent: function(oController) {
var form, grid;
form = new sap.ui.layout.form.SimpleForm({
minWidth: 1024,
editable: false,
content: [
new sap.ui.core.Title({
text: "Company"
}), new sap.m.Label({
text: "SupplierID"
}), new sap.m.Text({
text: "{SupplierID}"
}), new sap.m.Label({
text: "CompanyName"
}), new sap.m.Text({
text: "{CompanyName}"
}), new sap.ui.core.Title({
text: "Contact"
}), new sap.m.Label({
text: "ContactName"
}), new sap.m.Text({
text: "{ContactName}"
}), new sap.m.Label({
text: "ContactTitle"
}), new sap.m.Text({
text: "{ContactTitle}"
}), new sap.m.Label({
text: "PostalCode"
}), new sap.m.Text({
text: "{PostalCode}"
}), new sap.m.Label({
text: "Addreess"
}), new sap.m.Text({
text: {
parts: [
{
path: "Country"
}, {
path: "Region"
}, {
path: "City"
}, {
path: "Address"
}
],
formatter: function(country, region, city, address) {
if (country == null) {
country = "";
}
if (region == null) {
region = "";
}
if (city == null) {
city = "";
}
if (address == null) {
address = "";
}
return "" + country + " " + region + " " + city + " " + address;
}
}
}), new sap.m.Label({
text: "Phone"
}), new sap.m.Text({
text: "{Phone}"
}), new sap.m.Label({
text: "HomePage"
}), new sap.m.Text({
text: "{HomePage}"
})
]
});
return grid = new sap.ui.layout.Grid({
defaultSpan: "L12 M12 S12",
hSpacing: 2,
width: "auto",
content: [form]
});
}
});
}).call(this);
|
$(document).ready(function(){"use strict";$("#products-datatable").DataTable({language:{paginate:{previous:"<i class='mdi mdi-chevron-left'>",next:"<i class='mdi mdi-chevron-right'>"},info:"Showing customers _START_ to _END_ of _TOTAL_",lengthMenu:'Display <select class=\'custom-select custom-select-sm ml-1 mr-1\'><option value="10">10</option><option value="20">20</option><option value="-1">All</option></select> customers'},pageLength:10,columns:[{orderable:!1,render:function(e,o,l,t){return"display"===o&&(e='<div class="custom-control custom-checkbox"><input type="checkbox" class="custom-control-input dt-checkboxes"><label class="custom-control-label"> </label></div>'),e},checkboxes:{selectRow:!0,selectAllRender:'<div class="custom-control custom-checkbox"><input type="checkbox" class="custom-control-input dt-checkboxes"><label class="custom-control-label"> </label></div>'}},{orderable:!0},{orderable:!0},{orderable:!0},{orderable:!0},{orderable:!0},{orderable:!0},{orderable:!0},{orderable:!1}],select:{style:"multi"},order:[[5,"asc"]],drawCallback:function(){$(".dataTables_paginate > .pagination").addClass("pagination-rounded")}})}); |
# TODO Should probably either correct these or ignore linting in tests
# pylint:disable=unused-wildcard-import,unused-variable,function-redefined
from settings import *
import pytest
from c3x.enomo.energy_optimiser import LocalEnergyOptimiser
N_INTERVALS = 48
SIMPLE_TOU_TARIFF = np.array([1.0] * 12 + [2.0] * 24 + [1.0] * 12)
SIMPLE_FLAT_TARIFF = np.array([1.0] * 48)
ZERO_TARIFF = np.zeros(48)
def create_battery():
return EnergyStorage(
max_capacity=4.0,
depth_of_discharge_limit=0,
charging_power_limit=2.0,
discharging_power_limit=-2.0,
charging_efficiency=1.0,
discharging_efficiency=1.0,
throughput_cost=0.0,
)
def create_energy_system(
battery,
load,
generation,
local_energy_import_tariff,
local_energy_export_tariff,
remote_energy_import_tariff,
remote_energy_export_tariff,
local_transport_import_tariff,
local_transport_export_tariff,
remote_transport_import_tariff,
remote_transport_export_tariff,
):
energy_system = EnergySystem()
energy_system.add_energy_storage(battery)
demand = Demand()
demand.add_demand_profile(load)
pv = Generation()
pv.add_generation_profile(generation)
tariff = LocalTariff()
tariff.add_local_energy_tariff_profile_import(local_energy_import_tariff)
tariff.add_local_energy_tariff_profile_export(local_energy_export_tariff)
tariff.add_remote_energy_tariff_profile_import(remote_energy_import_tariff)
tariff.add_remote_energy_tariff_profile_export(remote_energy_export_tariff)
tariff.add_local_transport_tariff_profile_import(local_transport_import_tariff)
tariff.add_local_transport_tariff_profile_export(local_transport_export_tariff)
tariff.add_remote_transport_tariff_profile_import(remote_transport_import_tariff)
tariff.add_remote_transport_tariff_profile_export(remote_transport_export_tariff)
energy_system.add_demand(demand)
energy_system.add_generation(pv)
energy_system.add_local_tariff(tariff)
return energy_system
@pytest.mark.solver('miqp')
def test_local_greedy_solar_optimisation():
"""Storage should preferentially charge from solar earlier
when tariffs are equal across time periods
"""
energy_system = create_energy_system(
create_battery(),
np.array([0.0] * 24 + [5.0] * 24),
np.array([-5.0] * 24 + [0.0] * 24),
dict(enumerate(SIMPLE_FLAT_TARIFF)),
dict(enumerate(SIMPLE_FLAT_TARIFF / 2.0)),
dict(enumerate(SIMPLE_FLAT_TARIFF * 1.5)),
dict(enumerate(SIMPLE_FLAT_TARIFF / 3.0)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
)
optimiser = LocalEnergyOptimiser(
30,
N_INTERVALS,
energy_system,
OptimiserObjectiveSet.LocalModels
+ [OptimiserObjective.GreedyGenerationCharging],
)
storage_charge_generation = optimiser.values("storage_charge_generation")
storage_discharge_demand = optimiser.values("storage_discharge_demand")
for i in range(0, 4):
assert storage_charge_generation[i] == 1.0
for i in range(4, N_INTERVALS):
assert storage_charge_generation[i] == 0.0
for i in range(0, 24):
assert storage_discharge_demand[i] == 0.0
for i in range(24, N_INTERVALS):
assert storage_discharge_demand[i] == pytest.approx(-1.0 / 6.0, 3)
@pytest.mark.solver('miqp')
def test_local_greedy_demand_optimisation():
"""Storage should preferentially discharge to meet demand earlier
when tariffs are equal across time periods
"""
energy_system = create_energy_system(
create_battery(),
np.array([0.0] * 24 + [5.0] * 24),
np.array([-5.0] * 24 + [0.0] * 24),
dict(enumerate(SIMPLE_FLAT_TARIFF)),
dict(enumerate(SIMPLE_FLAT_TARIFF / 2.0)),
dict(enumerate(SIMPLE_FLAT_TARIFF * 1.5)),
dict(enumerate(SIMPLE_FLAT_TARIFF / 3.0)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
dict(enumerate(ZERO_TARIFF)),
)
optimiser = LocalEnergyOptimiser(
30,
N_INTERVALS,
energy_system,
OptimiserObjectiveSet.LocalModels
+ [OptimiserObjective.GreedyDemandDischarging],
)
storage_charge_generation = optimiser.values("storage_charge_generation")
storage_discharge_demand = optimiser.values("storage_discharge_demand")
for i in range(0, 24):
assert storage_charge_generation[i] == pytest.approx(1.0 / 6.0, 3)
for i in range(24, N_INTERVALS):
assert storage_charge_generation[i] == 0.0
for i in range(0, 24):
assert storage_discharge_demand[i] == 0.0
for i in range(24, 28):
assert storage_discharge_demand[i] == -1.0
for i in range(28, N_INTERVALS):
assert storage_discharge_demand[i] == 0.0
|
define(
({
commonWebmap: {
selector: {
lblWebMap: "Web 地图",
lblLocation: "位置",
lblContent: "内容",
lblPopup: "弹出窗口",
lblControls: "额外部分",
lblOverview: "鹰眼图",
lblLegend: "图例",
loadingTitle: "加载标题",
section: "节",
sections: "节",
and: "和",
action: "节中的操作",
actions: "节中的操作",
originalWebmap: "用于发布 %TPL_NAME% 的 Web 地图",
browseMaps: "浏览地图",
current: "当前 web 地图",
select: "选择 web 地图",
newMap: "新选择的 web 地图",
webmapDefault: "默认 Web 地图",
customCfg: "自定义配置",
tooltipLocation: "定义读者导航到此部分时所用的地图位置。",
tooltipContent: "定义可见图层。",
tooltipPopup: "选择读者导航到此节时要显示的弹出窗口。",
tooltipOverview: "显示主地图的同时显示一个小鹰眼图",
tooltipLegend: "在地图上显示地图图例,地图具有许多图层和符号的情况下非常有用。",
mapCfgInvite: "使用这些控件来配置地图"
},
configure: {
btnReset: "重置",
btnCancel: "取消",
tocTitle: "地图内容",
tocExplain: "选择将显示哪些图层。",
tocNoData: "无法配置任何图层。",
tocSave: "保存地图内容",
extentTitle: "地图位置",
extentExplain: "平移并缩放地图以定义供读者观看的地图外观。",
extentSave: "保存地图位置",
popupTitle: "地图弹出窗口",
popupExplain: "单击要素以打开您要显示的弹出窗口。",
popupSave: "保存弹出窗口配置",
hintNavigation: "地图导航已禁用。"
}
},
configure: {
mapdlg:{
items:{
organizationLabel: "我的组织",
onlineLabel: "ArcGIS Online",
contentLabel: "我的内容",
favoritesLabel: "我的收藏夹"
},
title: "选择 Web 地图",
searchTitle: "搜索",
ok: "确定",
cancel: "取消",
placeholder: "输入搜索词或 Web 地图 ID..."
}
}
})
);
|
from typing import (
AbstractSet, Any, Callable, Dict, Iterable, List, Mapping, MutableMapping,
Optional, Sequence, Set, Tuple, Union, cast
)
from typing_extensions import TypedDict
import django.db.utils
from django.db.models import Count, Exists, OuterRef
from django.contrib.contenttypes.models import ContentType
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.conf import settings
from django.core.files import File
from analytics.lib.counts import COUNT_STATS, do_increment_logging_stat, \
RealmCount
from zerver.lib.bugdown import (
version as bugdown_version,
url_embed_preview_enabled,
convert as bugdown_convert,
)
from zerver.lib.addressee import Addressee
from zerver.lib.bot_config import (
ConfigError,
get_bot_config,
get_bot_configs,
set_bot_config,
)
from zerver.lib.cache import (
bot_dict_fields,
display_recipient_cache_key,
delete_user_profile_caches,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.emoji import emoji_name_to_emoji_code, get_emoji_file_name
from zerver.lib.exceptions import StreamDoesNotExistError, \
StreamWithIDDoesNotExistError
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.message import (
access_message,
MessageDict,
render_markdown,
update_first_visible_message_id,
)
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import send_email, FromAddress, send_email_to_admins, \
clear_scheduled_emails, clear_scheduled_invitation_emails
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.storage import static_path
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.topic import (
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
ORIG_TOPIC,
LEGACY_PREV_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
)
from zerver.lib.topic_mutes import (
get_topic_mutes,
add_topic_mute,
remove_topic_mute,
)
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.user_status import (
update_user_status,
)
from zerver.lib.user_groups import create_user_group, access_user_group_by_id
from zerver.models import Realm, RealmEmoji, Stream, UserProfile, UserActivity, \
RealmDomain, Service, SubMessage, \
Subscription, Recipient, Message, Attachment, UserMessage, RealmAuditLog, \
UserHotspot, MultiuseInvite, ScheduledMessage, UserStatus, \
Client, DefaultStream, DefaultStreamGroup, UserPresence, \
ScheduledEmail, MAX_TOPIC_NAME_LENGTH, \
MAX_MESSAGE_LENGTH, get_client, get_stream, \
get_user_profile_by_id, PreregistrationUser, \
email_to_username, \
get_user_by_delivery_email, get_stream_cache_key, active_non_guest_user_ids, \
UserActivityInterval, active_user_ids, get_active_streams, \
realm_filters_for_realm, RealmFilter, stream_name_in_use, \
get_old_unclaimed_attachments, is_cross_realm_bot_email, \
Reaction, EmailChangeStatus, CustomProfileField, \
custom_profile_fields_for_realm, get_huddle_user_ids, \
CustomProfileFieldValue, validate_attachment_request, get_system_bot, \
query_for_ids, get_huddle_recipient, \
UserGroup, UserGroupMembership, get_default_stream_groups, \
get_bot_services, get_bot_dicts_in_realm, \
get_user_including_cross_realm, get_user_by_id_in_realm_including_cross_realm, \
get_stream_by_id_in_realm
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.email_validation import get_realm_email_validator, \
validate_email_is_valid, get_existing_user_errors, \
email_reserved_for_system_bots_error
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from django.db import transaction, IntegrityError, connection
from django.db.models import F, Q, Max, Sum
from django.db.models.query import QuerySet
from django.core.exceptions import ValidationError
from django.utils.timezone import now as timezone_now
from confirmation.models import Confirmation, create_confirmation_link, generate_key, \
confirmation_url
from confirmation import settings as confirmation_settings
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.timestamp import timestamp_to_datetime, datetime_to_timestamp
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib import bugdown
from zerver.lib.cache import cache_with_key, cache_set, \
user_profile_by_email_cache_key, \
cache_set_many, cache_delete, cache_delete_many
from zerver.decorator import statsd_increment
from zerver.lib.utils import log_statsd_event, statsd
from zerver.lib.i18n import get_language_name
from zerver.lib.alert_words import add_user_alert_words, \
remove_user_alert_words, set_user_alert_words
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.exceptions import JsonableError, ErrorCode, BugdownRenderingException
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.upload import claim_attachment, delete_message_image, \
upload_emoji_image, delete_avatar_image, \
delete_export_tarball
from zerver.lib.video_calls import request_zoom_video_call_url
from zerver.tornado.event_queue import send_event
from zerver.lib.types import ProfileFieldData
from analytics.models import StreamCount
if settings.BILLING_ENABLED:
from corporate.lib.stripe import update_license_ledger_if_needed, downgrade_for_realm_deactivation
import ujson
import time
import datetime
import os
import platform
import logging
import itertools
from collections import defaultdict
from operator import itemgetter
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id, }
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
"signups",
"@_**%s|%s** just signed up for Zulip. (total: %i)" % (
user_profile.full_name, user_profile.id, user_count
)
)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
"%s <`%s`> just signed up for Zulip! (total: **%i**)" % (
user_profile.full_name,
user_profile.email,
user_count,
)
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
streams = prereg_user.streams.all()
acting_user = prereg_user.referred_by # type: Optional[UserProfile]
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
"%s <`%s`> accepted your invitation to join Zulip!" % (
user_profile.full_name,
user_profile.email,
)
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email).exclude(
id=prereg_user.id).update(status=0)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email).update(status=0)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar=False :(
client_gravatar=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={})
event = dict(type="realm_user", op="add", person=person) # type: Dict[str, Any]
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner'] = user_profile.bot_owner.email
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
short_name: str, bot_type: Optional[int]=None,
is_realm_admin: bool=False, is_guest: bool=False,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: List[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name, short_name=short_name,
is_realm_admin=is_realm_admin, is_guest=is_guest,
bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any) -> None:
"""Takes in a realm object, the name of an attribute to update, and the
value to update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, property_type,))
setattr(realm, name, value)
realm.save(update_fields=[name])
if name == 'zoom_api_secret':
# Send '' as the value through the API for the API secret
value = ''
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
if name == "email_address_visibility":
for user_profile in UserProfile.objects.filter(realm=realm, is_bot=False):
# TODO: This does linear queries in the number of users
# and thus is potentially very slow. Probably not super
# important since this is a feature few folks will toggle,
# but as a policy matter, we don't do linear queries
# ~anywhere in Zulip.
old_email = user_profile.email
user_profile.email = get_display_email_address(user_profile, realm)
user_profile.save(update_fields=["email"])
# TODO: Design a bulk event for this or force-reload all clients
if user_profile.email != old_email:
send_user_email_update_event(user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool]) -> None:
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=realm.authentication_methods_dict())
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
]
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Stream, stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Stream,
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_for_realm_deactivation(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm)
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm)
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user)
user.full_name = "Scrubbed {}".format(generate_key()[:15])
scrubbed_email = "scrubbed-{}@{}".format(generate_key()[:15], realm.host)
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm)
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has alrady been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, user_profile.realm.host, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
short_name=email_to_username(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender,
"Congratulations on your first reply! :tada:\n\n"
"Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[bugdown.MentionData]=None,
email_gateway: Optional[bool]=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
user_ids=user_ids,
mention_data=mention_data,
email_gateway=email_gateway,
)
except BugdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
RecipientInfoResult = TypedDict('RecipientInfoResult', {
'active_user_ids': Set[int],
'push_notify_user_ids': Set[int],
'stream_email_user_ids': Set[int],
'stream_push_user_ids': Set[int],
'wildcard_mention_user_ids': Set[int],
'um_eligible_user_ids': Set[int],
'long_term_idle_user_ids': Set[int],
'default_bot_user_ids': Set[int],
'service_bot_tuples': List[Tuple[int, int]],
})
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: Optional[Set[int]]=None,
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids = set() # type: Set[int]
stream_email_user_ids = set() # type: Set[int]
wildcard_mention_user_ids = set() # type: Set[int]
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
if possibly_mentioned_user_ids:
# Important note: Because we haven't rendered bugdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id'
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications']
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r)
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle']
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via bugdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = set([
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
])
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples
) # type: RecipientInfoResult
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict = defaultdict(list) # type: Dict[str, List[Dict[str, Any]]]
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s' %
(user_profile_id, bot_type))
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messsages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages = [] # type: List[ScheduledMessage]
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: Optional[bool]=False,
mark_as_read: List[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids = [] # type: List[int]
new_messages = [] # type: List[MutableMapping[str, Any]]
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed = set() # type: Set[str]
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = bugdown.MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name()
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = bugdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags = defaultdict(dict) # type: Dict[int, Dict[int, List[str]]]
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums = [] # type: List[UserMessageLite]
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'])
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if url_embed_preview_enabled(message['message']) and links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
}
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: Set[int],
long_term_idle_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
mentioned_user_ids: Set[int],
mark_as_read: List[int]=[]) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the bugdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = ','.join([
'(%d, %d, %d)' % (um.user_profile_id, um.message_id, um.flags)
for um in ums
])
query = '''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES
''' + vals
with connection.cursor() as cursor:
cursor.execute(query)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event = {'type': 'reaction',
'op': op,
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type} # type: Dict[str, Any]
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
(emoji_code, reaction_type) = emoji_name_to_emoji_code(user_profile.realm, emoji_name)
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction_legacy(user_profile: UserProfile, message: Message, emoji_name: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_name=emoji_name).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile, notification_to: Union[Sequence[str], Sequence[int]],
operator: str) -> None:
realm = sender.realm
if len(notification_to) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
''' The next chunk of code will go away when we upgrade old mobile
users away from versions of mobile that send emails. For the
small number of very outdated mobile clients, we do double work
here in terms of fetching users, but this structure reduces lots
of other unnecessary duplicated code and will make it convenient
to mostly delete code when we desupport old versions of the
app.'''
if isinstance(notification_to[0], int):
user_ids = cast(List[int], notification_to)
else:
try:
emails = cast(Sequence[str], notification_to)
user_ids = user_ids_for_emails(realm, emails)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def send_stream_creation_event(stream: Stream, user_ids: List[int]) -> None:
event = dict(type="stream", op="create",
streams=[stream.to_dict()])
send_event(stream.realm, event, user_ids)
def get_default_value_for_history_public_to_subscribers(
realm: Realm,
invite_only: bool,
history_public_to_subscribers: Optional[bool]
) -> bool:
if invite_only:
if history_public_to_subscribers is None:
# A private stream's history is non-public by default
history_public_to_subscribers = False
else:
# If we later decide to support public streams without
# history, we can remove this code path.
history_public_to_subscribers = True
if realm.is_zephyr_mirror_realm:
# In the Zephyr mirroring model, history is unconditionally
# not public to subscribers, even for public streams.
history_public_to_subscribers = False
return history_public_to_subscribers
def render_stream_description(text: str) -> str:
return bugdown_convert(text, no_previews=True)
def create_stream_if_needed(realm: Realm,
stream_name: str,
*,
invite_only: bool=False,
stream_post_policy: int=Stream.STREAM_POST_POLICY_EVERYONE,
history_public_to_subscribers: Optional[bool]=None,
stream_description: str="") -> Tuple[Stream, bool]:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
realm, invite_only, history_public_to_subscribers)
(stream, created) = Stream.objects.get_or_create(
realm=realm,
name__iexact=stream_name,
defaults = dict(
name=stream_name,
description=stream_description,
invite_only=invite_only,
stream_post_policy=stream_post_policy,
history_public_to_subscribers=history_public_to_subscribers,
is_in_zephyr_realm=realm.is_zephyr_mirror_realm
)
)
if created:
recipient = Recipient.objects.create(type_id=stream.id, type=Recipient.STREAM)
stream.recipient = recipient
stream.rendered_description = render_stream_description(stream_description)
stream.save(update_fields=["recipient", "rendered_description"])
if stream.is_public():
send_stream_creation_event(stream, active_non_guest_user_ids(stream.realm_id))
else:
realm_admin_ids = [user.id for user in
stream.realm.get_admin_users_and_bots()]
send_stream_creation_event(stream, realm_admin_ids)
return stream, created
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="") -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description)[0]
def create_streams_if_needed(realm: Realm,
stream_dicts: List[Mapping[str, Any]]) -> Tuple[List[Stream], List[Stream]]:
"""Note that stream_dict["name"] is assumed to already be stripped of
whitespace"""
added_streams = [] # type: List[Stream]
existing_streams = [] # type: List[Stream]
for stream_dict in stream_dicts:
stream, created = create_stream_if_needed(
realm,
stream_dict["name"],
invite_only=stream_dict.get("invite_only", False),
stream_post_policy=stream_dict.get("stream_post_policy", Stream.STREAM_POST_POLICY_EVERYONE),
history_public_to_subscribers=stream_dict.get("history_public_to_subscribers"),
stream_description=stream_dict.get("description", "")
)
if created:
added_streams.append(stream)
else:
existing_streams.append(stream)
return added_streams, existing_streams
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids = set([user_id for user_id in recipient_profiles_map]) # type: Set[int]
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map = {} # type: Dict[int, UserProfile]
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'%s' is no longer using Zulip.") % (user_profile.email,))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def user_ids_for_emails(
realm: Realm,
emails: Iterable[str],
) -> List[int]:
'''
This function should only stay around while
we still have to support mobile sending emails
in typing notifications.
'''
user_ids = [] # type: List[int]
for email in emails:
try:
user_profile = get_user_including_cross_realm(email, realm)
except UserProfile.DoesNotExist:
raise ValidationError(_("Invalid email '%s'") % (email,))
user_ids.append(user_profile.id)
return user_ids
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise ValueError("Expected exactly one stream")
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise ValueError("Invalid data type for stream")
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise ValueError("Invalid data type for recipients")
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise ValueError("Invalid data type for recipients")
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise TypeError("Recipient lists may contain emails or user IDs, but not both.")
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise TypeError("Recipient lists may contain emails or user IDs, but not both.")
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_stream_name(stream_name: str) -> None:
if stream_name.strip() == "":
raise JsonableError(_("Invalid stream name '%s'") % (stream_name,))
if len(stream_name) > Stream.MAX_NAME_LENGTH:
raise JsonableError(_("Stream name too long (limit: %s characters).") % (Stream.MAX_NAME_LENGTH,))
for i in stream_name:
if ord(i) == 0:
raise JsonableError(_("Stream name '%s' contains NULL (0x00) characters.") % (stream_name,))
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '%s'") % (group_name,))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: %s characters)")
% (DefaultStreamGroup.MAX_NAME_LENGTH,))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '%s' contains NULL (0x00) characters.")
% (group_name,))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": sender.delivery_email,
"stream_id": stream_id,
"stream_name": stream_name,
}
if stream is None:
if stream_id is not None:
content = _("Your bot `%(bot_identity)s` tried to send a message to stream ID "
"%(stream_id)s, but there is no stream with that ID.") % arg_dict
else:
assert(stream_name is not None)
content = _("Your bot `%(bot_identity)s` tried to send a message to stream "
"#**%(stream_name)s**, but that stream does not exist. "
"Click [here](#streams/new) to create it.") % arg_dict
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot `%(bot_identity)s` tried to send a message to "
"stream #**%(stream_name)s**. The stream exists but "
"does not have any subscribers.") % arg_dict
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_sender_can_write_to_stream(sender: UserProfile,
stream: Stream,
forwarder_user_profile: Optional[UserProfile]) -> None:
# Our caller is responsible for making sure that `stream` actually
# matches the realm of the sender.
# Organization admins can send to any stream, irrespective of the stream_post_policy value.
if sender.is_realm_admin or is_cross_realm_bot_email(sender.delivery_email):
pass
elif sender.is_bot and (sender.bot_owner is not None and
sender.bot_owner.is_realm_admin):
pass
elif stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS:
raise JsonableError(_("Only organization administrators can send to this stream."))
elif stream.stream_post_policy == Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS:
if sender.is_bot and (sender.bot_owner is not None and
sender.bot_owner.is_new_member):
raise JsonableError(_("New members cannot send to this stream."))
elif sender.is_new_member:
raise JsonableError(_("New members cannot send to this stream."))
if not (stream.invite_only or sender.is_guest):
# This is a public stream and sender is not a guest user
return
if subscribed_to_stream(sender, stream.id):
# It is private, but your are subscribed
return
if sender.is_api_super_user:
return
if (forwarder_user_profile is not None and forwarder_user_profile.is_api_super_user):
return
if sender.is_bot and (sender.bot_owner is not None and
subscribed_to_stream(sender.bot_owner, stream.id)):
# Bots can send to any stream their owner can.
return
if sender.delivery_email == settings.WELCOME_BOT:
# The welcome bot welcomes folks to the stream.
return
if sender.delivery_email == settings.NOTIFICATION_BOT:
return
# All other cases are an error.
raise JsonableError(_("Not authorized to send to stream '%s'") % (stream.name,))
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
validate_sender_can_write_to_stream(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile
)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
error_msg = check_widget_content(widget_content)
if error_msg:
raise JsonableError(_('Widgets: %s') % (error_msg,))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
if realm is None:
raise RuntimeError("None is not a valid realm for internal_prep_message!")
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s" % (
sender.delivery_email, e))
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: Optional[bool]=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda: subscribed_to_stream(cast(UserProfile, user_profile), stream.id))
def validate_user_access_to_subscribers_helper(user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[], bool]) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed():
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed()):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(user_profile, stream_dict,
lambda: sub_dict[stream_dict["id"]])
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result = dict((stream["id"], []) for stream in stream_dicts) # type: Dict[int, List[int]]
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
id_list = ', '.join(str(recipient_id) for recipient_id in recipient_ids)
query = '''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in (%s) AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''' % (id_list,)
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['is_old_stream'] = is_old_stream(stream.date_created)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream = defaultdict(list) # type: Dict[int, List[int]]
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Optional[Dict[str, str]]=None,
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map = dict((stream.id, stream.recipient_id) for stream in streams) # type: Dict[int, int]
recipient_ids = [recipient_id for recipient_id in recipients_map.values()] # type: List[int]
stream_map = {} # type: Dict[int, Stream]
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user = defaultdict(list) # type: Dict[int, List[Subscription]]
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
subs_to_activate = [] # type: List[Tuple[Subscription, Stream]]
new_subs = [] # type: List[Tuple[UserProfile, int, Stream]]
for user_profile in users:
needs_new_sub = set(recipient_ids) # type: Set[int]
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add = [] # type: List[Tuple[Subscription, Stream]]
for (user_profile, recipient_id, stream) in new_subs:
if color_map is not None and stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event = dict(type="stream", op="occupy",
streams=[stream.to_dict()
for stream in new_occupied_streams])
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user = defaultdict(list) # type: Dict[int, List[Tuple[Subscription, Stream]]]
new_streams = set() # type: Set[Tuple[int, int]]
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
subscriptions=[stream.name],
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed = [] # type: List[Tuple[UserProfile, Stream]]
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate = [] # type: List[Tuple[Subscription, Stream]]
sub_ids_to_deactivate = [] # type: List[int]
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs = [] # type: (List[RealmAuditLog])
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict = defaultdict(list) # type: Dict[int, List[UserProfile]]
streams_by_user = defaultdict(list) # type: Dict[int, List[Stream]]
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
subscriptions=[stream.name],
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(email=user_profile.email,
user_id=user_profile.id,
)),
{previous_owner.id, })
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id, }
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id, })
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id, }
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
) # type: Dict[str, Any]
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
email=user_profile.email,
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
user_id=user_profile.id
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time)
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now())
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name = stream.name # type: Optional[str]
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=user_profile.email,
user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_is_admin(user_profile: UserProfile, value: bool,
permission: str='administer') -> None:
# TODO: This function and do_change_is_guest should be merged into
# a single do_change_user_role function in a future refactor.
if permission == "administer":
old_value = user_profile.role
if value:
user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
else:
user_profile.role = UserProfile.ROLE_MEMBER
user_profile.save(update_fields=["role"])
elif permission == "api_super_user":
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
else:
raise AssertionError("Invalid admin permission")
if permission == 'administer':
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: UserProfile.ROLE_REALM_ADMINISTRATOR,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_admin=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_guest(user_profile: UserProfile, value: bool) -> None:
# TODO: This function and do_change_is_admin should be merged into
# a single do_change_user_role function in a future refactor.
old_value = user_profile.role
if value:
user_profile.role = UserProfile.ROLE_GUEST
else:
user_profile.role = UserProfile.ROLE_MEMBER
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: UserProfile.ROLE_GUEST,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(email=user_profile.email,
user_id=user_profile.id,
is_guest=value))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('@_**%(user_name)s|%(user_id)d** renamed stream **%(old_stream_name)s** to '
'**%(new_stream_name)s**.') % {
'user_name': user_profile.full_name,
'user_id': user_profile.id,
'old_stream_name': old_name,
'new_stream_name': new_name}
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError("Realm %s already exists!" % (string_id,))
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs = {} # type: Dict[str, Any]
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:")
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.")
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
# Send a notification to the admin realm
signup_message = "Signups enabled"
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
'Cannot update %s: %s is not an instance of %s' % (
name, value, notification_setting_type,))
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group %s') % (group_name,))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id))
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm))
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group_name})
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_("Default stream group '%(group_name)s' already exists")
% {'group_name': group_name})
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'%(stream_name)s' is a default stream and cannot be added to '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
if stream in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is already present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '%(stream_name)s' is not present in default stream group '%(group_name)s'")
% {'stream_name': stream.name, 'group_name': group.name})
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '%s'") % (new_group_name,))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '%s' already exists") % (new_group_name,))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_pointer(user_profile: UserProfile, client: Client,
pointer: int, update_flags: bool=False) -> None:
prev_pointer = user_profile.pointer
user_profile.pointer = pointer
user_profile.save(update_fields=["pointer"])
if update_flags: # nocoverage
# This block of code is compatibility code for the
# legacy/original Zulip Android app natively. It's a shim
# that will mark as read any messages up until the pointer
# move; we expect to remove this feature entirely before long,
# when we drop support for the old Android app entirely.
app_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[
UserMessage.where_unread(),
UserMessage.where_active_push_notification(),
]).values_list("message_id", flat=True)
UserMessage.objects.filter(user_profile=user_profile,
message__id__gt=prev_pointer,
message__id__lte=pointer).extra(where=[UserMessage.where_unread()]) \
.update(flags=F('flags').bitor(UserMessage.flags.read))
do_clear_mobile_push_notifications_for_ids(user_profile, app_message_ids)
event = dict(type='pointer', pointer=pointer)
send_event(user_profile.realm, event, [user_profile.id])
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
msgs = UserMessage.objects.filter(
user_profile=user_profile
).extra(
where=[UserMessage.where_unread()]
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True
)
send_event(user_profile.realm, event, [user_profile.id])
statsd.incr("mark_all_as_read", count)
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids(user_profile, all_push_message_ids)
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()]
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read)
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids(user_profile, message_ids)
statsd.incr("mark_stream_as_read", count)
return count
def do_clear_mobile_push_notifications_for_ids(user_profile: UserProfile,
message_ids: List[int]) -> None:
filtered_message_ids = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('message_id', flat=True))
num_detached = settings.MAX_UNBATCHED_REMOVE_NOTIFICATIONS - 1
for message_id in filtered_message_ids[:num_detached]:
# Older clients (all clients older than 2019-02-13) will only
# see the first message ID in a given notification-message.
# To help them out, send a few of these separately.
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": [message_id],
})
if filtered_message_ids[num_detached:]:
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile.id,
"message_ids": filtered_message_ids[num_detached:],
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '%s'") % (flag,))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '%s'") % (flag,))
flagattr = getattr(UserMessage.flags, flag)
assert messages is not None
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
do_clear_mobile_push_notifications_for_ids(user_profile, messages)
statsd.incr("flags.%s.%s" % (flag, operation), count)
return count
def subscribed_to_stream(user_profile: UserProfile, stream_id: int) -> bool:
return Subscription.objects.filter(
user_profile=user_profile,
active=True,
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id).exists()
def truncate_content(content: str, max_length: int, truncation_message: str) -> str:
if len(content) > max_length:
content = content[:max_length - len(truncation_message)] + truncation_message
return content
def truncate_body(body: str) -> str:
return truncate_content(body, MAX_MESSAGE_LENGTH, "\n[message truncated]")
def truncate_topic(topic: str) -> str:
return truncate_content(topic, MAX_TOPIC_NAME_LENGTH, "...")
MessageUpdateUserInfoResult = TypedDict('MessageUpdateUserInfoResult', {
'message_user_ids': Set[int],
'mention_user_ids': Set[int],
})
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums = set() # type: Set[UserMessage]
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message]) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
for changed_message in changed_messages:
message_ids.append(changed_message.id)
key = to_dict_cache_key_id(changed_message.id)
value = MessageDict.to_dict_uncached(changed_message)
items_for_remote_cache[key] = (value,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id} # type: Dict[str, Any]
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message, topic_name: Optional[str],
propagate_mode: str, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[bugdown.MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
message.last_edit_time = timestamp
event = {'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id} # type: Dict[str, Any]
edit_history_event = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
} # type: Dict[str, Any]
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
update_user_message_flags(message, ums)
# mention_data is required if there's a content edit.
assert mention_data is not None
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = bugdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
# message.has_image and message.has_link will have been
# already updated by bugdown rendering in the caller.
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
) # type: Optional[StreamTopicTarget]
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
if topic_name is not None:
orig_topic_name = message.topic_name()
topic_name = truncate_topic(topic_name)
event["propagate_mode"] = propagate_mode
message.set_topic_name(topic_name)
event["stream_id"] = message.recipient.type_id
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = bugdown.topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
)
changed_messages += messages_list
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list()
}
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already in users_to_be_notified list.
# This is the case where a user both has a UserMessage row and is a current Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
# All users that are subscribed to the stream must be notified when a message is edited
subscribers_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscribers_ids))
send_event(user_profile.realm, event, users_to_be_notified)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
message_ids = [message.id for message in messages]
if not message_ids:
return
usermessages = UserMessage.objects.filter(message_id__in=message_ids)
message_id_to_notifiable_users = {} # type: Dict[int, List[int]]
for um in usermessages:
if um.message_id not in message_id_to_notifiable_users:
message_id_to_notifiable_users[um.message_id] = []
message_id_to_notifiable_users[um.message_id].append(um.user_profile_id)
events_and_users_to_notify = []
for message in messages:
message_type = "stream"
if not message.is_stream_message():
message_type = "private"
event = {
'type': 'delete_message',
'sender': message.sender.email,
'sender_id': message.sender_id,
'message_id': message.id,
'message_type': message_type, } # type: Dict[str, Any]
if message_type == "stream":
event['stream_id'] = message.recipient.type_id
event['topic'] = message.topic_name()
else:
event['recipient_id'] = message.recipient_id
# In theory, it's possible for message_id_to_notifiable_users
# to not have a key for the message ID in some weird corner
# case where we've deleted the last user subscribed to the
# target stream before a bot sent a message to it, and thus
# there are no UserMessage objects associated with the
# message.
events_and_users_to_notify.append(
(event, message_id_to_notifiable_users.get(message.id, []))
)
move_messages_to_archive(message_ids)
for event, users_to_notify in events_and_users_to_notify:
# TODO: Figure out some kind of bulk event that we could send just one of?
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id')
if message_ids:
move_messages_to_archive(message_ids)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
def is_old_stream(stream_date_created: datetime.datetime) -> bool:
return (timezone_now() - stream_date_created).days \
>= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_dict['is_old_stream'] = is_old_stream(stream.date_created)
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids = set() # type: Set[int]
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the is_old_stream computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = dict((sub["stream_id"], sub["active"]) for sub in sub_dicts)
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient
) # type: Mapping[int, Optional[List[int]]]
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
stream_dict['in_home_view'] = not stream_dict['is_muted']
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['is_old_stream'] = is_old_stream(stream["date_created"])
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers = subscriber_map[stream["id"]] # type: Optional[List[int]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['is_old_stream'] = is_old_stream(stream["date_created"])
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags = user_flags.get(user_id, []) # type: Iterable[str]
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, referrer.realm.host, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = "%s (via Zulip)" % (referrer.full_name,)
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg = msg # type: str
self.errors = errors # type: List[Tuple[str, str, bool]]
self.sent_invitations = sent_invitations # type: bool
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days)
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact %s to have your limit raised. "
"No invitations were sent.") % (settings.ZULIP_ADMINISTRATOR,)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: Optional[int]=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails = set() # type: Set[str]
errors = [] # type: List[Tuple[str, str, bool]]
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
'''
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
'''
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped = [] # type: List[Tuple[str, str, bool]]
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
days_to_activate = settings.INVITATION_LINK_VALIDITY_DAYS
active_value = getattr(confirmation_settings, 'STATUS_ACTIVE', 1)
lowest_datetime = timezone_now() - datetime.timedelta(days=days_to_activate)
prereg_users = PreregistrationUser.objects.exclude(status=active_value).filter(
invited_at__gte=lowest_datetime,
referred_by__realm=user_profile.realm)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
ref=invitee.referred_by.email,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(ref=invite.referred_by.email,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm.host,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Optional[List[Stream]]=[]) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, realm.host, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actaully want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_set_alert_words(user_profile: UserProfile, alert_words: List[str]) -> None:
set_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, alert_words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id'))
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter = None # type: Optional[Q]
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning("User %s tried to share upload %s in message %s, but lacks permission" % (
user_profile.id, path_id, message.id))
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = set([a.path_id for a in message.attachment_set.all()])
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = dict((_[1], _[0]) for _ in enumerate(order))
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile,
field_id: Union[int, str, List[int]]
) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists.") % (name,))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '%s' already exists.") % (name,))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token,)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(email=bot_profile.email,
user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: str) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts = [] # type: List[Dict[str, Any]]
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid = defaultdict(list) # type: Dict[int, List[Service]]
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid = {} # type: Dict[int, List[Dict[str, Any]]]
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts = [] # type: List[Dict[str, Any]]
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def missing_any_realm_internal_bots() -> bool:
bot_emails = [bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,)
for bot in settings.REALM_INTERNAL_BOTS]
bot_counts = dict(UserProfile.objects.filter(email__in=bot_emails)
.values_list('email')
.annotate(Count('id')))
realm_count = Realm.objects.count()
return any(bot_counts.get(email, 0) < realm_count for email in bot_emails)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, realm.host, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name="Zulip Account Security", context=context)
def get_zoom_video_call_url(realm: Realm) -> str:
response = request_zoom_video_call_url(
realm.zoom_user_id,
realm.zoom_api_key,
realm.zoom_api_secret
)
if response is None:
return ''
return response['join_url']
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
delete_export_tarball(export_data.get('export_path'))
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
|
# import things
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
# Get some objects
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
# Or, equivalently, some dicts
# items = [dict(name='Name1', description='Description1'),
# dict(name='Name2', description='Description2'),
# dict(name='Name3', description='Description3')]
# Or, more likely, load items from your database with something like
# items = ItemModel.query.all()
# Populate the table
table = ItemTable(items)
# Print the html
print(table.__html__())
# or just {{ table }} from within a Jinja template
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import json
import os
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_layer as input_layer_module
from tensorflow.python.keras.engine import node as node_module
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving import hdf5_format
from tensorflow.python.keras.saving import save
from tensorflow.python.keras.saving.saved_model import network_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import py_checkpoint_reader
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
class Network(base_layer.Layer):
"""A `Network` is a composition of layers.
`Network` is the topological form of a "model". A `Model`
is simply a `Network` with added training routines.
Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph
networks are used in the Keras Functional and Sequential APIs. Subclassed
networks are used when a user subclasses the `Model` class. In general,
more Keras features are supported with Graph Networks than with Subclassed
Networks, specifically:
- Model cloning (`keras.models.clone`)
- Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`
- Whole-model saving (`model.save()`)
A Graph Network can be instantiated by passing two arguments to `__init__`.
The first argument is the `keras.Input` Tensors that represent the inputs
to the Network. The second argument specifies the output Tensors that
represent the outputs of this Network. Both arguments can be a nested
structure of Tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2'])
network = Network(inputs, outputs)
```
A Graph Network constructed using the Functional API can also include raw
TensorFlow functions, with the exception of functions that create Variables
or assign ops.
Example:
```
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = tf.nn.relu(x)
network = Network(inputs, outputs)
```
Subclassed Networks can be instantiated via `name` and (optional) `dynamic`
keyword arguments. Subclassed Networks keep track of their Layers, and their
`call` method can be overridden. Subclassed Networks are typically created
indirectly, by subclassing the `Model` class.
Example:
```
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(name='my_model', dynamic=False)
self.layer1 = keras.layers.Dense(10, activation='relu')
def call(self, inputs):
return self.layer1(inputs)
```
Allowed args in `super().__init__`:
name: String name of the model.
dynamic: (Subclassed models only) Set this to `True` if your model should
only be run eagerly, and should not be used to generate a static
computation graph. This attribute is automatically set for Functional API
models.
trainable: Boolean, whether the model's variables should be trainable.
dtype: (Subclassed models only) Default dtype of the model's weights (
default of `None` means use the type of the first input). This attribute
has no effect on Functional API models, which do not have weights of their
own.
"""
# See tf.Module for the usage of this property.
# The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to
# flatten the key since it is trying to convert Trackable/Layer to a string.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_layer_call_argspecs', '_compiled_trainable_state'),
base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES
))
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
# Signature detection
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
# Graph network
self._init_graph_network(*args, **kwargs)
else:
# Subclassed network
self._init_subclassed_network(**kwargs)
tf_utils.assert_no_legacy_layers(self.layers)
# Several Network methods have "no_automatic_dependency_tracking"
# annotations. Since Network does automatic dependency tracking on attribute
# assignment, including for common data structures such as lists, by default
# we'd have quite a few empty dependencies which users don't care about (or
# would need some way to ignore dependencies automatically, which is confusing
# when applied to user code). Some attributes, such as _layers, would cause
# structural issues (_layers being the place where Layers assigned to tracked
# attributes are stored).
#
# Aside from these aesthetic and structural issues, useless dependencies on
# empty lists shouldn't cause issues; adding or removing them will not break
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@trackable.no_automatic_dependency_tracking
def _base_init(self, name=None, **kwargs):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
generic_utils.validate_kwargs(kwargs, {'trainable', 'dtype', 'dynamic',
'autocast'})
super(Network, self).__init__(name=name, **kwargs)
self._is_compiled = False
# This is True for Sequential networks and Functional networks.
self._compute_output_and_mask_jointly = False
if not hasattr(self, 'optimizer'):
# Don't reset optimizer if already set.
self.optimizer = None
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
self._trackable_saver = (
trackable_utils.saver_with_op_caching(self))
@trackable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None, **kwargs):
generic_utils.validate_kwargs(
kwargs, {'trainable'},
'Functional models may only specify `name` and `trainable` keyword '
'arguments during initialization. Got an unexpected argument:')
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_outputs = outputs
self._nested_inputs = inputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
base_layer_utils.create_keras_history(self._nested_outputs)
self._base_init(name=name, **kwargs)
self._validate_graph_inputs_and_outputs()
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._build_input_shape = nest.map_structure(lambda x: x.shape, inputs)
self._compute_output_and_mask_jointly = True
self._is_graph_network = True
# `_expects_training_arg` is True since the `training` argument is always
# present in the signature of the `call` method of a graph network.
self._expects_training_arg = True
self._expects_mask_arg = True
# A graph network does not autocast inputs, as its layers will cast them
# instead.
self._autocast = False
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
self._supports_ragged_inputs = None
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, _ = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
# Create the node linking internal inputs to internal outputs.
node_module.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self._nested_inputs,
output_tensors=self._nested_outputs)
# Build self.input_names and self.output_names.
self._set_output_names()
self.input_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for layer in self._input_layers:
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
# Use batch_input_shape here because non-eager composite tensors may not
# have a shape attribute that's meaningful (sparse, for instance, has
# a tensor that's non-constant and needs to be fed). This means that
# input layers that create placeholders will need to have the
# batch_input_shape attr to allow for input shape validation.
self._feed_input_shapes.append(layer._batch_input_shape)
self._feed_inputs.append(layer.input)
self._compute_tensor_usage_count()
def _set_output_names(self):
"""Assigns unique names to the Network's outputs.
Output layers with multiple output tensors would otherwise lead to duplicate
names in self.output_names.
"""
uniquified = []
output_names = set()
prefix_count = {}
for layer in self._output_layers:
proposal = layer.name
while proposal in output_names:
existing_count = prefix_count.get(layer.name, 1)
proposal = '{}_{}'.format(layer.name, existing_count)
prefix_count[layer.name] = existing_count + 1
output_names.add(proposal)
uniquified.append(proposal)
self.output_names = uniquified
@trackable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None, **kwargs):
self._base_init(name=name, **kwargs)
self._is_graph_network = False
self._init_call_fn_args()
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
self._supports_ragged_inputs = None
self.outputs = []
self.inputs = []
self.built = False
self._build_input_shape = None
@property
@trackable_layer_utils.cache_recursive_attribute('dynamic')
def dynamic(self):
if self._is_graph_network:
return any(layer.dynamic for layer in self.layers)
return self._dynamic or any(layer.dynamic for layer in self.layers)
@property
def _layer_checkpoint_dependencies(self):
"""Dictionary of layer dependencies to be included in the checkpoint."""
# Use getattr because this function can be called from __setattr__, at which
# point the _is_graph_network attribute has not been created.
if (not getattr(self, '_is_graph_network', False) and
base_layer_utils.is_subclassed(self)):
return {} # Only add layer dependencies for graph networks
weight_layer_index = 0
dependencies = {}
for layer_index, layer in enumerate(self.layers):
try:
if layer.weights:
# Keep a separate index for layers which have weights. This allows
# users to insert Layers without weights anywhere in the network
# without breaking checkpoints.
dependencies['layer_with_weights-%d' % weight_layer_index] = layer
weight_layer_index += 1
except ValueError:
# The layer might have weights, but may not be built yet. We just treat
# it as layer without weight.
pass
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Trackable dependencies.
dependencies['layer-%d' % layer_index] = layer
return dependencies
@property
def _checkpoint_dependencies(self):
dependencies = [
trackable.TrackableReference(name=name, ref=layer)
for name, layer in self._layer_checkpoint_dependencies.items()]
dependencies.extend(super(Network, self)._checkpoint_dependencies)
return dependencies
def _lookup_dependency(self, name):
layer_dependencies = self._layer_checkpoint_dependencies
if name in layer_dependencies:
return layer_dependencies[name]
return super(Network, self)._lookup_dependency(name)
def _handle_deferred_layer_dependencies(self, layers):
"""Handles layer checkpoint dependencies that are added after init."""
layer_checkpoint_dependencies = self._layer_checkpoint_dependencies
layer_to_name = {v: k for k, v in layer_checkpoint_dependencies.items()}
for layer in layers:
if layer in layer_to_name:
self._handle_deferred_dependencies(name=layer_to_name[layer],
trackable=layer)
def __setattr__(self, name, value):
if not getattr(self, '_self_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
if all(
isinstance(v, (base_layer.Layer,
data_structures.TrackableDataStructure)) or
trackable_layer_utils.has_weights(v) for v in nest.flatten(value)):
try:
self._is_graph_network
except AttributeError:
# six.raise_from supresses the original AttributeError from being raised
six.raise_from(
RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.'), None)
super(Network, self).__setattr__(name, value)
# Keep track of metric instance created in subclassed model/layer.
# We do this so that we can maintain the correct order of metrics by adding
# the instance to the `metrics` list as soon as it is created.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
if isinstance(value, metrics_module.Metric):
self._metrics.append(value)
@property
@trackable_layer_utils.cache_recursive_attribute('stateful')
def stateful(self):
return any(getattr(layer, 'stateful', False) for layer in self.layers)
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._dedup_weights(self._undeduplicated_weights)
@property
def _undeduplicated_weights(self):
"""Returns the undeduplicated list of all layer variables/weights."""
self._assert_weights_created()
weights = []
for layer in self._layers:
weights += layer.weights
weights += (self._trainable_weights + self._non_trainable_weights)
return weights
@property
@tracking.cached_per_instance
def _should_compute_mask(self):
return self._is_graph_network and super(Network, self)._should_compute_mask
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
output_tensors = self._run_internal_graph(inputs, mask=mask)
return nest.map_structure(lambda t: t._keras_mask, output_tensors)
@property
def layers(self):
return list(
trackable_layer_utils.filter_empty_layer_containers(self._layers))
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@property
def trainable_weights(self):
self._assert_weights_created()
return self._dedup_weights(
trackable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._trainable_weights))
@property
def non_trainable_weights(self):
self._assert_weights_created()
return self._dedup_weights(
trackable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._non_trainable_weights +
self._trainable_weights))
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If subclassed model, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
@base_layer_utils.default
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
This method only exists for users who want to call `model.build()` in a
standalone way (as a substitute for calling the model on real data to
build it). It will never be called by the framework (and thus it will
never throw unexpected errors in an unrelated workflow).
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
super(Network, self).build(input_shape)
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
# We create placeholders for the `None`s in the shape and build the model
# in a Graph. Since tf.Variable is compatible with both eager execution
# and graph building, the variables created after building the model in
# a Graph are still valid when executing eagerly.
if context.executing_eagerly():
graph = func_graph.FuncGraph('build_graph')
else:
graph = backend.get_graph()
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
x = base_layer_utils.generate_placeholders_from_shape(input_shape)
kwargs = {}
call_signature = self._call_full_argspec
call_args = call_signature.args
# Exclude `self`, `inputs`, and any argument with a default value.
if len(call_args) > 2:
if call_signature.defaults:
call_args = call_args[2:-len(call_signature.defaults)]
else:
call_args = call_args[2:]
for arg in call_args:
if arg == 'training':
# Case where `training` is a positional arg with no default.
kwargs['training'] = False
else:
# Has invalid call signature with unknown positional arguments.
raise ValueError(
'Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
elif len(call_args) < 2:
# Signature without `inputs`.
raise ValueError('You can only call `build` on a model if its `call` '
'method accepts an `inputs` argument.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
super(Network, self).build(input_shape)
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
if not self._is_graph_network:
raise NotImplementedError('When subclassing the `Model` class, you should'
' implement a `call` method.')
return self._run_internal_graph(
inputs, training=training, mask=mask,
convert_kwargs_to_constants=base_layer_utils.call_context().saving)
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
return super(Network, self).compute_output_shape(input_shape)
# Convert any shapes in tuple format to TensorShapes.
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shape)
if cache_key in self._output_shape_cache:
# Cache hit. Return shapes as TensorShapes.
return self._output_shape_cache[cache_key]
layers_to_output_shapes = {}
for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor..
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
layer_input_shapes = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
input_layer_key = inbound_layer.name + '_%s_%s' % (node_id,
tensor_id)
layer_input_shapes.append(layers_to_output_shapes[input_layer_key])
layer_input_shapes = nest.pack_sequence_as(node.inbound_layers,
layer_input_shapes)
# Layers expect shapes to be tuples for `compute_output_shape`.
layer_input_shapes = tf_utils.convert_shapes(
layer_input_shapes, to_tuples=True)
layer_output_shapes = layer.compute_output_shape(layer_input_shapes)
# Convert back to TensorShapes.
layer_output_shapes = tf_utils.convert_shapes(
layer_output_shapes, to_tuples=False)
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j, shape in enumerate(nest.flatten(layer_output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = shape
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
# Return shapes as TensorShapes.
return output_shapes
def _run_internal_graph(self, inputs, training=None, mask=None,
convert_kwargs_to_constants=False):
"""Computes output tensors for new inputs.
# Note:
- Can be run on non-Keras tensors.
Arguments:
inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors.
convert_kwargs_to_constants: Whether to convert Tensor kwargs to
constants. This is used when tracing the model call function during
saving to ensure that external tensors aren't captured.
Returns:
Two lists: output_tensors, output_masks
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# Network because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of Network. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
if isinstance(inputs, dict) and isinstance(self._nested_inputs,
(list, tuple)):
# Backwards compat: Allows passing a dict to a Model constructed with a
# list. Matches dict keys to input names.
inputs = [
inputs[inp._keras_history.layer.name] for inp in self._nested_inputs
]
else:
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
for input_t, mask in zip(inputs, masks):
input_t._keras_mask = mask
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
for x, y in zip(self.inputs, inputs):
x_id = str(id(x))
tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]
if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
try:
y.set_shape(y.shape.merge_with(x.shape))
except ValueError:
logging.warning(
'Model was constructed with shape {} for input {}, but it was '
're-called on a Tensor with incompatible shape {}.'
.format(x, x.shape, y.shape))
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Ignore the InputLayers when computing the graph.
depth_keys = depth_keys[1:]
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if all(
str(id(tensor)) in tensor_dict
for tensor in nest.flatten(node.input_tensors)):
# Call layer (reapplying ops to new inputs).
computed_tensors = nest.map_structure(
lambda t: tensor_dict[str(id(t))].pop(), node.input_tensors)
# Ensure `training` arg propagation if applicable.
kwargs = copy.copy(node.arguments) if node.arguments else {}
if convert_kwargs_to_constants:
kwargs = _map_tensors_to_constants(kwargs)
argspec = self._layer_call_argspecs[layer].args
if 'training' in argspec:
kwargs.setdefault('training', training)
if (type(kwargs['training']) is ops.Tensor and # pylint: disable=unidiomatic-typecheck
any([kwargs['training'] is x
for x in backend._GRAPH_LEARNING_PHASES.values()])):
kwargs['training'] = training # Materialize placeholder.
# Map Keras tensors in kwargs to their computed value.
def _map_tensor_if_from_keras_layer(t):
if (isinstance(t,
(ops.Tensor, composite_tensor.CompositeTensor)) and
hasattr(t, '_keras_history')):
t_id = str(id(t))
return tensor_dict[t_id].pop()
return t
kwargs = nest.map_structure(_map_tensor_if_from_keras_layer, kwargs)
# Compute outputs.
output_tensors = layer(computed_tensors, **kwargs)
# Update tensor_dict.
for x, y in zip(
nest.flatten(node.output_tensors), nest.flatten(output_tensors)):
x_id = str(id(x))
tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]
output_tensors = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x)
tensor = tensor_dict[str(id(x))].pop()
output_shapes.append(x.shape)
output_tensors.append(tensor)
if output_shapes is not None:
input_shapes = [x.shape for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = nest.pack_sequence_as(
self._nested_outputs, output_shapes)
output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors)
return output_tensors
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
return copy.deepcopy(get_network_config(self))
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
input_tensors, output_tensors, created_layers = reconstruct_from_config(
config, custom_objects)
model = cls(inputs=input_tensors, outputs=output_tensors,
name=config.get('name'))
connect_ancillary_layers(model, created_layers)
return model
def save(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model` is a compiled model ready to be used
(unless the saved model was never compiled in the first place).
Models built with the Sequential and Functional API can be saved to both the
HDF5 and SavedModel formats. Subclassed models can only be saved with the
SavedModel format.
Note that the model weights may have different scoped names after being
loaded. Scoped names include the model/layer names, such as
"dense_1/kernel:0"`. It is recommended that you use the layer properties to
access specific variables, e.g. `model.get_layer("dense_1").kernel`.
Arguments:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model
to Tensorflow SavedModel or HDF5. Defaults to 'tf' in TF 2.X, and
'h5' in TF 1.X.
signatures: Signatures to save with the SavedModel. Applicable to the
'tf' format only. Please see the `signatures` argument in
`tf.saved_model.save` for details.
options: Optional `tf.saved_model.SaveOptions` object that specifies
options for saving to SavedModel.
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
save.save_model(self, filepath, overwrite, include_optimizer, save_format,
signatures, options)
def save_weights(self, filepath, overwrite=True, save_format=None):
"""Saves all layer weights.
Either saves in HDF5 or in TensorFlow format based on the `save_format`
argument.
When saving in HDF5 format, the weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
When saving in TensorFlow format, all objects referenced by the network are
saved in the same format as `tf.train.Checkpoint`, including any `Layer`
instances or `Optimizer` instances assigned to object attributes. For
networks constructed from inputs and outputs using `tf.keras.Model(inputs,
outputs)`, `Layer` instances used by the network are tracked/saved
automatically. For user-defined classes which inherit from `tf.keras.Model`,
`Layer` instances must be assigned to object attributes, typically in the
constructor. See the documentation of `tf.train.Checkpoint` and
`tf.keras.Model` for details.
While the formats are the same, do not mix `save_weights` and
`tf.train.Checkpoint`. Checkpoints saved by `Model.save_weights` should be
loaded using `Model.load_weights`. Checkpoints saved using
`tf.train.Checkpoint.save` should be restored using the corresponding
`tf.train.Checkpoint.restore`. Prefer `tf.train.Checkpoint` over
`save_weights` for training checkpoints.
The TensorFlow format matches objects and variables by starting at a root
object, `self` for `save_weights`, and greedily matching attribute
names. For `Model.save` this is the `Model`, and for `Checkpoint.save` this
is the `Checkpoint` even if the `Checkpoint` has a model attached. This
means saving a `tf.keras.Model` using `save_weights` and loading into a
`tf.train.Checkpoint` with a `Model` attached (or vice versa) will not match
the `Model`'s variables. See the [guide to training
checkpoints](https://www.tensorflow.org/guide/checkpoint) for details
on the TensorFlow format.
Arguments:
filepath: String, path to the file to save the weights to. When saving
in TensorFlow format, this is the prefix used for checkpoint files
(multiple files are generated). Note that the '.h5' suffix causes
weights to be saved in HDF5 format.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
Raises:
ImportError: If h5py is not available when attempting to save in HDF5
format.
ValueError: For invalid/unknown format arguments.
"""
self._assert_weights_created()
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
hdf5_format.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
optimizer = getattr(self, 'optimizer', None)
if (optimizer
and not isinstance(optimizer, trackable.Trackable)):
logging.warning(
('This model was compiled with a Keras optimizer (%s) but is being '
'saved in TensorFlow format with `save_weights`. The model\'s '
'weights will be saved, but unlike with TensorFlow optimizers in '
'the TensorFlow format the optimizer\'s state will not be '
'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.')
% (optimizer,))
self._trackable_saver.save(filepath, session=session)
# Record this checkpoint so it's visible from tf.train.latest_checkpoint.
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(filepath),
model_checkpoint_path=filepath,
save_relative_paths=True,
all_model_checkpoint_paths=[filepath])
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
skip_mismatch: Boolean, whether to skip loading of layers where there is
a mismatch in the number of weights, or a mismatch in the shape of
the weight (only valid when `by_name=True`).
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
ValueError: If `skip_mismatch` is set to `True` when `by_name` is
`False`.
"""
if skip_mismatch and not by_name:
raise ValueError(
'When calling model.load_weights, skip_mismatch can only be set to '
'True when by_name is True.')
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
py_checkpoint_reader.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._trackable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
trackable_utils.streaming_restore(status=status, session=session)
status.assert_nontrivial_match()
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
self._assert_weights_created()
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
hdf5_format.load_weights_from_hdf5_group_by_name(
f, self.layers, skip_mismatch=skip_mismatch)
else:
hdf5_format.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
"""Util shared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
model_config = self._updated_config()
return json.dumps(
model_config, default=serialization.get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError('This model has not yet been built. '
'Build the model first by calling `build()` or calling '
'`fit()` with some data, or specify '
'an `input_shape` argument in the first layer(s) for '
'automatic build.')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len({id(i) for i in self.inputs}) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer = x._keras_history.layer
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
if isinstance(x, ragged_tensor.RaggedTensor):
self._supports_ragged_inputs = True
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history.layer)
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _insert_layers(self, layers, relevant_nodes=None):
"""Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Arguments:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
"""
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
# The nodes of these Layers that are relevant to this Network. If not
# provided, assume all Nodes are relevant
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
"""Gets the minimum depth at which node can be computed."""
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound(include_arguments=True):
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
# Previous relevant nodes haven't been processed yet.
return None
# New node is one shallower than its shallowest input.
return min_depth - 1
# Insert nodes into `_nodes_by_depth` and other node attrs.
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
# Do a sanity check. This can occur if `Input`s from outside this Model
# are being relied on.
if i > 10000:
raise ValueError('Layers could not be added due to missing '
'dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None: # Defer until inbound nodes are processed.
unprocessed_nodes.append(node)
continue
node_key = _make_node_key(node.outbound_layer.name,
node.outbound_layer._inbound_nodes.index(node))
if node_key not in self._network_nodes:
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
# Insert layers and update other layer attrs.
layer_set = set(self._layers)
deferred_layers = []
for layer in layers:
if layer not in layer_set:
self._layers.append(layer)
deferred_layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
# This allows the added layer to broadcast mutations to the current
# layer, which is necessary to ensure cache correctness.
layer._attribute_sentinel.add_parent(self._attribute_sentinel)
layer_set.add(layer)
self._handle_deferred_layer_dependencies(deferred_layers)
self._compute_tensor_usage_count()
def _compute_tensor_usage_count(self):
"""Compute the #. of tensor usages for all the output tensors of layers.
The computed tensor usage count is saved as `self._tensor_usage_count`. This
is later used for saving memory in eager computation by releasing
no-longer-needed tensors as early as possible.
"""
tensor_usage_count = collections.Counter()
available_tensors = set(str(id(tensor)) for tensor in self.inputs)
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
depth_keys = depth_keys[1:]
for depth in depth_keys:
for node in self._nodes_by_depth[depth]:
input_tensors = {
str(id(tensor)) for tensor in nest.flatten(node.input_tensors)
}
if input_tensors.issubset(available_tensors):
kwargs = copy.copy(node.arguments) if node.arguments else {}
for tensor in nest.flatten(kwargs):
if (isinstance(tensor,
(ops.Tensor, composite_tensor.CompositeTensor)) and
hasattr(tensor, '_keras_history')):
tensor_usage_count[str(id(tensor))] += 1
for tensor in nest.flatten(node.input_tensors):
tensor_usage_count[str(id(tensor))] += 1
for output_tensor in nest.flatten(node.output_tensors):
available_tensors.add(str(id(output_tensor)))
for tensor in self.outputs:
tensor_usage_count[str(id(tensor))] += 1
self._tensor_usage_count = tensor_usage_count
def _assert_weights_created(self):
"""Asserts that all the weights for the network have been created.
For a non-dynamic network, the weights must already be created after the
layer has been called. For a dynamic network, the exact list of weights can
never be known for certain since it may change at any time during execution.
We run this check right before accessing weights or getting the Numpy value
for the current weights. Otherwise, if the layer has never been called,
the user would just get an empty list, which is misleading.
Raises:
ValueError: if the weights of the network has not yet been created.
"""
if self.dynamic:
return
if (not self._is_graph_network and
'build' in self.__class__.__dict__ and
not self.built):
# For any model that has customized build() method but hasn't
# been invoked yet, this will cover both sequential and subclass model.
raise ValueError('Weights for model %s have not yet been created. '
'Weights are created when the Model is first called on '
'inputs or `build()` is called with an `input_shape`.' %
self.name)
def _graph_network_add_loss(self, symbolic_loss):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [symbolic_loss])
# Losses must be keyed on inputs no matter what in order to be supported in
# DistributionStrategy.
add_loss_layer = base_layer.AddLoss(
unconditional=False, dtype=symbolic_loss.dtype)
add_loss_layer(symbolic_loss)
new_nodes.extend(add_loss_layer.inbound_nodes)
new_layers.append(add_loss_layer)
self._insert_layers(new_layers, new_nodes)
def _graph_network_add_metric(self, value, aggregation, name):
new_nodes, new_layers = _map_subgraph_network(self.inputs, [value])
add_metric_layer = base_layer.AddMetric(
aggregation, name, dtype=value.dtype)
add_metric_layer(value)
new_nodes.extend(add_metric_layer.inbound_nodes)
new_layers.append(add_metric_layer)
self._insert_layers(new_layers, new_nodes)
@property
def _trackable_saved_model_saver(self):
return network_serialization.NetworkSavedModelSaver(self)
def _is_hdf5_filepath(filepath):
return (filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for layer, node_index, tensor_index, tensor in node.iterate_inbound(
include_arguments=True):
build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index,
tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all nodes it is connected to + 1.
for node_dep in node._get_all_node_dependencies():
previous_depth = nodes_depths.get(node_dep, 0)
nodes_depths[node_dep] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
# We do not error out here because the inputs may be used to compute losses
# and metrics.
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
network_nodes.add(_make_node_key(input_layer.name, 0))
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers ordered by depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = set()
for x in inputs:
computable_tensors.add(id(x))
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in nest.flatten(node.input_tensors):
if id(x) not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in nest.flatten(node.output_tensors):
computable_tensors.add(id(x))
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
def _map_subgraph_network(inputs, outputs):
"""Returns the nodes and layers in the topology from `inputs` to `outputs`.
Args:
inputs: List of input tensors.
outputs: List of output tensors.
Returns:
A tuple of List{Node] and List[Layer].
"""
base_layer_utils.create_keras_history(outputs)
# Keep only nodes and layers in the topology between inputs and outputs.
_, nodes_by_depth, layers, _ = _map_graph_network(inputs, outputs)
return nest.flatten([nodes for nodes in nodes_by_depth.values()]), layers
def _should_skip_first_node(layer):
"""Returns True if the first layer node should not be saved or loaded."""
# Networks start with a pre-existing node linking their input to output.
return issubclass(layer.__class__, Network) and layer._is_graph_network
def _serialize_tensors(kwargs):
"""Serializes Tensors passed to `call`."""
def _serialize_keras_tensor(t):
"""Serializes a single Tensor passed to `call`."""
if hasattr(t, '_keras_history'):
kh = t._keras_history
return [kh.layer.name, kh.node_index, kh.tensor_index]
if isinstance(t, np.ndarray):
return t.tolist()
if isinstance(t, ops.Tensor):
return backend.get_value(t).tolist()
return t
return nest.map_structure(_serialize_keras_tensor, kwargs)
def _map_tensors_to_constants(kwargs):
def _map_to_constants(t):
if not hasattr(t, '_keras_history') and isinstance(t, ops.Tensor):
return constant_op.constant(backend.get_value(t))
return t
return nest.map_structure(_map_to_constants, kwargs)
def _deserialize_keras_tensors(kwargs, layer_map):
"""Deserializes Keras Tensors passed to `call`.."""
def _deserialize_keras_tensor(t):
"""Deserializes a single Keras Tensor passed to `call`."""
if isinstance(t, tf_utils.ListWrapper):
t = t.as_list()
layer_name = t[0]
node_index = t[1]
tensor_index = t[2]
layer = layer_map[layer_name]
node = layer._inbound_nodes[node_index]
return nest.flatten(node.output_tensors)[tensor_index]
return t
kwargs = tf_utils.convert_inner_node_data(kwargs, wrap=True)
return nest.map_structure(_deserialize_keras_tensor, kwargs)
def connect_ancillary_layers(model, created_layers):
"""Adds layers that are not connected to the outputs to the model."""
# Layers not connected to outputs, such as those added in `add_loss`.
ancillary_layers = [
layer for layer in created_layers.values() if layer not in model.layers
]
if ancillary_layers:
relevant_nodes = nest.flatten([
layer.inbound_nodes[1:]
if _should_skip_first_node(layer) else layer.inbound_nodes
for layer in created_layers.values()
])
model._insert_layers(ancillary_layers, relevant_nodes)
return model
def reconstruct_from_config(config, custom_objects=None, created_layers=None):
"""Reconstructs graph from config object.
Args:
config: Dictionary returned from Network.get_config()
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
created_layers: Optional dictionary mapping names to Layer objects. Any
layer not in this dictionary will be be created and added to the dict.
This function will add new nodes to all layers (excluding InputLayers),
instead of re-using pre-existing nodes in the layers.
Returns:
Tuple of (input tensors, output tensors, dictionary of created layers)
"""
# Layer instances created during the graph reconstruction process.
created_layers = created_layers or collections.OrderedDict()
# Maps input data (tuple of inbound layer name, node index) from the config
# to node indices in the newly generated model. The node indices may be
# different if the layers have already been called previously.
node_index_map = {}
node_count_by_layer = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def get_node_index(layer, config_node_index):
"""Returns node index in layer (might differ from config_node_index)."""
if isinstance(layer, input_layer_module.InputLayer):
return 0
return node_index_map.get((layer.name, config_node_index), None)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`.
"""
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
kwargs = _deserialize_keras_tensors(kwargs, created_layers)
else:
raise ValueError('Improperly formatted model config.')
inbound_layer = created_layers[inbound_layer_name]
inbound_node_index = get_node_index(inbound_layer, inbound_node_index)
if inbound_node_index is None:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
nest.flatten(inbound_node.output_tensors)[inbound_tensor_index])
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors is not None:
input_tensors = base_layer_utils.unnest_if_single_tensor(input_tensors)
output_tensors = layer(input_tensors, **kwargs)
# Update node index map.
output_index = nest.flatten(output_tensors)[0]._keras_history.node_index
node_index_map[(layer.name, node_count_by_layer[layer])] = output_index
node_count_by_layer[layer] += 1
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
if layer_name in created_layers:
layer = created_layers[layer_name]
else:
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
node_count_by_layer[layer] = int(_should_skip_first_node(layer))
# Gather layer inputs and convert to `ListWrapper` objects.
inbound_nodes_data = layer_data['inbound_nodes']
inbound_nodes_data = tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
input_tensors = []
output_tensors = []
input_layers = tf_utils.convert_inner_node_data(
config['input_layers'], wrap=True)
for layer_data in nest.flatten(input_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
output_layers = tf_utils.convert_inner_node_data(
config['output_layers'], wrap=True)
for layer_data in nest.flatten(output_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
node_index = get_node_index(layer, node_index)
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
input_tensors = nest.pack_sequence_as(input_layers, input_tensors)
output_tensors = nest.pack_sequence_as(output_layers, output_tensors)
return input_tensors, output_tensors, created_layers
def get_network_config(network, serialize_layer_fn=None):
"""Builds the config, which consists of the node graph and serialized layers.
Args:
network: A Network object.
serialize_layer_fn: Function used to serialize layers.
Returns:
Config dictionary.
"""
serialize_layer_fn = (
serialize_layer_fn or generic_utils.serialize_keras_object)
config = {
'name': network.name,
}
node_conversion_map = {}
for layer in network.layers:
kept_nodes = 1 if _should_skip_first_node(layer) else 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in network.layers: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in network._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
kwargs = _serialize_tensors(node.arguments)
try:
json.dumps(kwargs)
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
node_key = _make_node_key(inbound_layer.name, node_id)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
tf_utils.ListWrapper(
[inbound_layer.name, new_node_index, tensor_id, kwargs]))
node_data = nest.pack_sequence_as(node.input_tensors, node_data)
if not nest.is_sequence(node_data):
node_data = [node_data]
# Convert ListWrapper to list for backwards compatible configs.
node_data = tf_utils.convert_inner_node_data(node_data)
filtered_inbound_nodes.append(node_data)
layer_config = serialize_layer_fn(layer)
layer_config['name'] = layer.name
layer_config['inbound_nodes'] = filtered_inbound_nodes
layer_configs.append(layer_config)
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(network._input_layers)):
layer, node_index, tensor_index = network._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_inputs = nest.pack_sequence_as(network._nested_inputs, model_inputs)
# Preserve external Keras compat for Models with single input.
if not nest.is_sequence(model_inputs):
model_inputs = [model_inputs]
model_inputs = tf_utils.convert_inner_node_data(model_inputs)
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(network._output_layers)):
layer, node_index, tensor_index = network._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in network._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_outputs = nest.pack_sequence_as(network._nested_outputs, model_outputs)
# Preserve external Keras compat for Models with single output.
if not nest.is_sequence(model_outputs):
model_outputs = [model_outputs]
model_outputs = tf_utils.convert_inner_node_data(model_outputs)
config['output_layers'] = model_outputs
return config
|
import roundedCorners from '../../src/macros/roundedCorners.es6'
describe('roundedCorners', () => {
it('should provide a default curvature of 10', () => {
expect(roundedCorners()).toEqual({'-moz-border-radius': 10, 'border-radius': 10, '-webkit-border-radius': 10});
});
it('should respect input radius', () => {
expect(roundedCorners(5)).toEqual({'-moz-border-radius': 5, 'border-radius': 5, '-webkit-border-radius': 5});
});
it('should curve all corners when asked explicitly', () => {
expect(roundedCorners('all', 7)).toEqual({
'-moz-border-radius': 7,
'border-radius': 7,
'-webkit-border-radius': 7
});
});
it('should curve just top left corner', () => {
expect(roundedCorners('tl', 7)).toEqual({
'-moz-border-radius-topleft': 7,
'border-top-left-radius': 7,
'-webkit-border-top-left-radius': 7
});
});
it('should curve both top side corners', () => {
expect(roundedCorners('top', 7)).toEqual({
'-moz-border-radius-topleft': 7, 'border-top-left-radius': 7, '-webkit-border-top-left-radius': 7,
'-moz-border-radius-topright': 7, 'border-top-right-radius': 7, '-webkit-border-top-right-radius': 7
});
});
});
|
/* eslint-disable no-console */
import { mergeDeepRight, propOr, pathOr } from 'ramda'
import { json } from '@podlove/utils/request'
import * as playerConfig from '@podlove/player-config'
const fetchChapters = async config => {
try {
return await json(playerConfig.chapters(config))
} catch (err) {
console.warn(`Couldn't parse chapters "${chapters}", falling back to empty list`)
return []
}
}
const fetchTranscripts = async config => {
try {
return await json(playerConfig.transcripts(config))
} catch (err) {
console.warn(`Couldn't parse transcripts "${transcripts}", falling back to empty list`)
return []
}
}
const fetchPlaylist = async config => {
try {
return json(playerConfig.playlist(config))
} catch (err) {
console.warn(`Couldn't parse playlist "${playlist}", falling back to empty list`)
return []
}
}
const reference = ({ episode, config }, resolved) => ({
episode: typeof episode === 'string' ? episode : null,
config: typeof config === 'string' ? config : null,
base: propOr(null, 'base', resolved.config),
share: pathOr(null, ['share', 'outlet'], resolved.config)
})
const resolve = async url => {
try {
return await json(url)
} catch (err) {
throw new Error(`Couldn't parse configuration "${url}"`)
}
}
export const parseConfig = (episode, config) =>
Promise.all([resolve(episode), resolve(config)]).then(
async ([resolvedEpisode, resolvedConfig]) => {
const [transcripts, chapters, playlist] = await Promise.all([
fetchTranscripts(resolvedEpisode),
fetchChapters(resolvedEpisode),
fetchPlaylist(resolvedConfig)
])
return mergeDeepRight(
Object.assign({}, resolvedEpisode, {
transcripts,
chapters
}),
Object.assign({}, resolvedConfig, {
playlist,
reference: reference(
{ episode, config },
{ episode: resolvedEpisode, config: resolvedConfig }
)
})
)
}
)
|
module.exports = [
{
fileName: 'no-favicon.html',
response: [
{
href: 'http://www.example.com/favicon.ico',
name: 'favicon.ico',
active: true
}
]
},
{
fileName: 'all-favicon.html',
predicates: [
(i) => i.name === 'apple-touch-icon-precomposed',
(i) => i.name === 'apple-touch-icon',
(i) => i.name === 'favicon.ico'
],
response: [{
href: 'http://example.com/apple-touch-icon.png',
name: 'apple-touch-icon'
}, {
href: 'http://example.com/apple-touch-icon-precomposed.png',
name: 'apple-touch-icon-precomposed',
active: true
}, {
href: 'http://example.com/icon.png',
name: 'icon'
}, {
href: 'http://example.com/shortcut_icon.png',
name: 'shortcut icon'
}, {
href: 'http://example.com/msapplication-TileImage.png',
name: 'msapplication-TileImage'
}, {
href: 'http://example.com/og_image.png',
name: 'og:image'
}, {
href: 'http://example.com/twitter_image.png',
name: 'twitter:image'
}, {
href: 'http://www.example.com/favicon.ico',
name: 'favicon.ico'
}]
},
{
fileName: 'all-favicon.html',
predicates: [
(i) => i.name === 'apple-touch-icon',
(i) => i.name === 'apple-touch-icon-precomposed',
(i) => i.name === 'favicon.ico'
],
response: [{
href: 'http://example.com/apple-touch-icon.png',
name: 'apple-touch-icon',
active: true
}, {
href: 'http://example.com/apple-touch-icon-precomposed.png',
name: 'apple-touch-icon-precomposed'
}, {
href: 'http://example.com/icon.png',
name: 'icon'
}, {
href: 'http://example.com/shortcut_icon.png',
name: 'shortcut icon'
}, {
href: 'http://example.com/msapplication-TileImage.png',
name: 'msapplication-TileImage'
}, {
href: 'http://example.com/og_image.png',
name: 'og:image'
}, {
href: 'http://example.com/twitter_image.png',
name: 'twitter:image'
}, {
href: 'http://www.example.com/favicon.ico',
name: 'favicon.ico'
}]
},
{
fileName: 'all-favicon.html',
predicates: [
(i) => i.name === 'favicon.ico',
(i) => i.name === 'apple-touch-icon',
(i) => i.name === 'apple-touch-icon-precomposed'
],
response: [{
href: 'http://example.com/apple-touch-icon.png',
name: 'apple-touch-icon'
}, {
href: 'http://example.com/apple-touch-icon-precomposed.png',
name: 'apple-touch-icon-precomposed'
}, {
href: 'http://example.com/icon.png',
name: 'icon'
}, {
href: 'http://example.com/shortcut_icon.png',
name: 'shortcut icon'
}, {
href: 'http://example.com/msapplication-TileImage.png',
name: 'msapplication-TileImage'
}, {
href: 'http://example.com/og_image.png',
name: 'og:image'
}, {
href: 'http://example.com/twitter_image.png',
name: 'twitter:image'
}, {
href: 'http://www.example.com/favicon.ico',
name: 'favicon.ico',
active: true
}]
}
]
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset',views.HelloViewSet,base_name='hello-viewset')
router.register('profile',views.UserProfileViewSet)
router.register('feed',views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/',views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('',include(router.urls))
]
|
""" run test suites written for nose. """
import sys
import pytest
from _pytest import python
from _pytest import runner
from _pytest import unittest
from _pytest.config import hookimpl
def get_skip_exceptions():
skip_classes = set()
for module_name in ("unittest", "unittest2", "nose"):
mod = sys.modules.get(module_name)
if hasattr(mod, "SkipTest"):
skip_classes.add(mod.SkipTest)
return tuple(skip_classes)
def pytest_runtest_makereport(item, call):
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
# let's substitute the excinfo with a pytest.skip one
call2 = runner.CallInfo.from_call(
lambda: pytest.skip(str(call.excinfo.value)), call.when
)
call.excinfo = call2.excinfo
@hookimpl(trylast=True)
def pytest_runtest_setup(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "setup"):
# call module level setup if there is no object level one
call_optional(item.parent.obj, "setup")
# XXX this implies we only call teardown when setup worked
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
def teardown_nose(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "teardown"):
call_optional(item.parent.obj, "teardown")
# if hasattr(item.parent, '_nosegensetup'):
# #call_optional(item._nosegensetup, 'teardown')
# del item.parent._nosegensetup
def is_potential_nosetest(item):
# extra check needed since we do not do nose style setup/teardown
# on direct unittest style classes
return isinstance(item, python.Function) and not isinstance(
item, unittest.TestCaseFunction
)
def call_optional(obj, name):
method = getattr(obj, name, None)
isfixture = hasattr(method, "_pytestfixturefunction")
if method is not None and not isfixture and callable(method):
# If there's any problems allow the exception to raise rather than
# silently ignoring them
method()
return True
|
import React from 'react';
import ReactDOM from 'react-dom';
import './index.css';
import App from './App';
import reportWebVitals from './reportWebVitals';
ReactDOM.render(
<React.StrictMode>
<App />
</React.StrictMode>,
document.getElementById('root')
);
// If you want to start measuring performance in your app, pass a function
// to log results (for example: reportWebVitals(console.log))
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
reportWebVitals(console.log);
|
'use strict'
/** @typedef {import('@adonisjs/framework/src/Request')} Request */
/** @typedef {import('@adonisjs/framework/src/Response')} Response */
/** @typedef {import('@adonisjs/framework/src/View')} View */
/** @typedef {import('@adonisjs/auth/src/Auth')} Auth */
const { HttpException } = use('@adonisjs/generic-exceptions')
const Database = use('Database')
const Schedule = use('App/Models/Schedule')
const { daysDiff } = use('App/helpers')
/**
* Resourceful controller for interacting with schedules
*/
class ScheduleController {
/**
* Show a list of all schedules.
* GET schedules
*
* @param {object} ctx
* @param {Request} ctx.request
* @param {Response} ctx.response
* @param {Auth} ctx.auth
*/
async index({ request, response, auth }) {
return await auth.user.schedules().with('services').orderBy('id').fetch()
}
/**
* Create/save a new schedule.
* POST schedules
*
* @param {object} ctx
* @param {Request} ctx.request
* @param {Response} ctx.response
* @param {Auth} ctx.auth
*/
async store({ request, response, auth }) {
const trx = await Database.beginTransaction()
try {
const data = request.only(['date', 'note'])
const schedule = await Schedule.create({ ...data, user_id: auth.user.id }, trx)
await schedule.services().sync(request.only(['services']).services, null, trx)
await trx.commit()
await schedule.load('services')
return schedule
} catch (e) {
await trx.rollback()
}
}
/**
* Display a single schedule.
* GET schedules/:id
*
* @param {object} ctx
* @param {Request} ctx.request
* @param {Response} ctx.response
* @param {Auth} ctx.auth
*/
async show({ params, request, response, auth }) {
const schedule = await auth.user.schedules().with('services').where('id', '=', params.id).firstOrFail()
return schedule
}
/**
* Update schedule details.
* PUT or PATCH schedules/:id
*
* @param {object} ctx
* @param {Request} ctx.request
* @param {Response} ctx.response
* @param {Auth} ctx.auth
*/
async update({ params, request, response, auth }) {
const schedule = await auth.user.schedules().where('id', '=', params.id).firstOrFail()
// Solicitado: cliente alterar o agendamento pelo sistema até 2 dias antes do agendado
if (daysDiff(new Date(), schedule.date) < 2) {
throw new HttpException('Unauthorized', 403)
}
const trx = await Database.beginTransaction()
try {
const data = request.only(['date', 'note'])
schedule.merge(data)
await schedule.save(trx)
await schedule.services().sync(request.only(['services']).services, null, trx)
await trx.commit()
await schedule.load('services')
return schedule
} catch (e) {
await trx.rollback()
}
}
/**
* Delete a schedule with id.
* DELETE schedules/:id
*
* @param {object} ctx
* @param {Request} ctx.request
* @param {Response} ctx.response
* @param {Auth} ctx.auth
*/
async destroy({ params, request, response, auth }) {
const schedule = await auth.user.schedules().where('id', '=', params.id).firstOrFail()
// Solicitado: cliente alterar o agendamento pelo sistema até 2 dias antes do agendado
if (daysDiff(new Date(), schedule.date) < 2) {
throw new HttpException('Unauthorized', 403)
}
const trx = await Database.beginTransaction()
try {
await schedule.delete(trx)
await trx.commit()
return schedule
} catch (e) {
await trx.rollback()
}
}
}
module.exports = ScheduleController
|
/*var crypto = require('crypto');
exports.encode = function(payload, secret) {
var algorithm = 'HS256';
var header = {
type: 'JWT', alg: algorithm
};
var jwt = base64Encode(JSON.stringify(header)) + '.' + base64Encode(JSON.stringify(payload));
console.log();
return jwt + '.'+ sign (jwt, secret);
};
exports.decode = function(token, secret){
var segments = token.split('.');
if(segments.length !== 3)
throw new Error('Token structure incorrect');
var header = JSON.parse(base64Decode(segments[0]));
var payload = JSON.parse(base64Decode(segments[1]));
var rawSignature = segments[0] + '.' + segments[1];
if(!verify(rawSignature, secret, segments[2])) {
throw new Error('Verification failed');
};
return payload;
};
function verify(raw, secret, signature){
return signature === sign(raw, secret);
}
function sign(str, key) {
return crypto.createHmac('sha256', key).update(str).digest('base64');
}
function base64Encode(str) {
return new Buffer(str).toString('base64');
}
function base64Decode(str) {
return new Buffer(str, 'base64').toString();
}*/
var jwt = require('jwt-simple');
var moment = require('moment');
module.exports = function (user, response){
var payload = {
sub: user.id,
exp: moment().add(10, 'days').unix()
};
var token = jwt.encode(payload, 'sh...');
response.status(200).send({
user: user.toJSON(),
token: token
});
};
|
// via: https://github.com/diafygi/webrtc-ips
function DetectLocalIPAddress(callback, stream) {
if (!DetectRTC.isWebRTCSupported) {
return;
}
getIPs(function(ip) {
if (ip.match(/^(192\.168\.|169\.254\.|10\.|172\.(1[6-9]|2\d|3[01]))/)) {
callback('Local: ' + ip);
} else {
callback('Public: ' + ip);
}
}, stream);
}
function getIPs(callback, stream) {
if (typeof document === 'undefined' || typeof document.getElementById !== 'function') {
return;
}
var ipDuplicates = {};
var RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
if (!RTCPeerConnection) {
var iframe = document.getElementById('iframe');
if (!iframe) {
return;
}
var win = iframe.contentWindow;
RTCPeerConnection = win.RTCPeerConnection || win.mozRTCPeerConnection || win.webkitRTCPeerConnection;
}
if (!RTCPeerConnection) {
return;
}
var peerConfig = null;
if (DetectRTC.browser === 'Chrome' && DetectRTC.browser.version < 58) {
// todo: add support for older Opera
peerConfig = {
optional: [{
RtpDataChannels: true
}]
};
}
var servers = {
iceServers: [{
urls: 'stun:stun.l.google.com:19302'
}]
};
var pc = new RTCPeerConnection(servers, peerConfig);
if (stream) {
if (pc.addStream) {
pc.addStream(stream);
} else if (pc.addTrack && stream.getTracks()[0]) {
pc.addTrack(stream.getTracks()[0], stream);
}
}
function handleCandidate(candidate) {
var ipRegex = /([0-9]{1,3}(\.[0-9]{1,3}){3})/;
var match = ipRegex.exec(candidate);
if (!match) {
return;
}
var ipAddress = match[1];
if (ipDuplicates[ipAddress] === undefined) {
callback(ipAddress);
}
ipDuplicates[ipAddress] = true;
}
// listen for candidate events
pc.onicecandidate = function(ice) {
if (ice.candidate) {
handleCandidate(ice.candidate.candidate);
}
};
// create data channel
if (!stream) {
try {
pc.createDataChannel('sctp', {});
} catch (e) {}
}
// create an offer sdp
if (DetectRTC.isPromisesSupported) {
pc.createOffer().then(function(result) {
pc.setLocalDescription(result).then(afterCreateOffer);
});
} else {
pc.createOffer(function(result) {
pc.setLocalDescription(result, afterCreateOffer, function() {});
}, function() {});
}
function afterCreateOffer() {
var lines = pc.localDescription.sdp.split('\n');
lines.forEach(function(line) {
if (line.indexOf('a=candidate:') === 0) {
handleCandidate(line);
}
});
}
}
|
(function () {
'use strict';
const userNameInput = document.getElementById('user-name');
const assessmentButton = document.getElementById('assessment');
const resultDivided = document.getElementById('result-area');
const tweetDivided = document.getElementById('tweet-area');
/**
* 指定した要素の子どもを全て除去する
* @param {HTMLElement} element HTMLの要素
*/
function removeAllChildren(element) {
while (element.firstChild) { // 子どもの要素があるかぎり削除
element.removeChild(element.firstChild);
}
}
assessmentButton.onclick = () => {
const userName = userNameInput.value;
if (userName.length === 0) { // 名前が空の時は処理を終了する
return;
}
// 診断結果表示エリアの作成
removeAllChildren(resultDivided);
const header = document.createElement('h3');
header.innerText = '診断結果';
resultDivided.appendChild(header);
const paragraph = document.createElement('p');
const result = assessment(userName);
paragraph.innerText = result;
resultDivided.appendChild(paragraph);
// ツイートエリアの作成
removeAllChildren(tweetDivided);
const anchor = document.createElement('a');
const hrefValue = 'https://twitter.com/intent/tweet?button_hashtag=%E3%81%82%E3%81%AA%E3%81%9F%E3%81%AE%E3%81%84%E3%81%84%E3%81%A8%E3%81%93%E3%82%8D&text='
+ encodeURIComponent(result);
anchor.setAttribute('href', hrefValue);
anchor.className = 'twitter-hashtag-button';
anchor.innerText = 'Tweet #%E3%81%82%E3%81%AA%E3%81%9F%E3%81%AE%E3%81%84%E3%81%84%E3%81%A8%E3%81%93%E3%82%8D';
tweetDivided.appendChild(anchor);
twttr.widgets.load();
};
userNameInput.onkeydown = (event) => {
if (event.keyCode === 13) {
assessmentButton.onclick();
}
};
const answers = [
'{userName}のいいところは声です。{userName}の特徴的な声はみなを惹きつけ、心に残ります。',
'{userName}のいいところはまなざしです。{userName}に見つめられた人は、気になって仕方がないでしょう。',
'{userName}のいいところは情熱です。{userName}の情熱に周りの人は感化されます。',
'{userName}のいいところは厳しさです。{userName}の厳しさがものごとをいつも成功に導きます。',
'{userName}のいいところは知識です。博識な{userName}を多くの人が頼りにしています。',
'{userName}のいいところはユニークさです。{userName}だけのその特徴が皆を楽しくさせます。',
'{userName}のいいところは用心深さです。{userName}の洞察に、多くの人が助けられます。',
'{userName}のいいところは見た目です。内側から溢れ出る{userName}の良さに皆が気を惹かれます。',
'{userName}のいいところは決断力です。{userName}がする決断にいつも助けられる人がいます。',
'{userName}のいいところは思いやりです。{userName}に気をかけてもらった多くの人が感謝しています。',
'{userName}のいいところは感受性です。{userName}が感じたことに皆が共感し、わかりあうことができます。',
'{userName}のいいところは節度です。強引すぎない{userName}の考えに皆が感謝しています。',
'{userName}のいいところは好奇心です。新しいことに向かっていく{userName}の心構えが多くの人に魅力的に映ります。',
'{userName}のいいところは気配りです。{userName}の配慮が多くの人を救っています。',
'{userName}のいいところはその全てです。ありのままの{userName}自身がいいところなのです。',
'{userName}のいいところは自制心です。やばいと思ったときにしっかりと衝動を抑えられる{userName}が皆から評価されています。'
'{userName}のいいところは優しさです。{userName}の優しい雰囲気や立ち振る舞いに多くの人が癒されています。★',
];
/**
* 名前の文字列を渡すと診断結果を返す関数
* @param {string} userName ユーザーの名前
* @return {string} 診断結果
*/
function assessment(userName) {
// 全文字のコード番号を取得してそれを足し合わせる
let sumOfcharCode = 0;
for (let i = 0; i < userName.length; i++) {
sumOfcharCode = sumOfcharCode + userName.charCodeAt(i);
}
// 文字のコード番号の合計を回答の数で割って添字の数値を求める
const index = sumOfcharCode % answers.length;
let result = answers[index];
result = result.replace(/{userName}/g, userName);
return result;
}
// テストコード
console.assert(
assessment('太郎') === '太郎のいいところは決断力です。太郎がする決断にいつも助けられる人がいます。',
'診断結果の文言の特定の部分を名前に置き換える処理が正しくありません。'
);
console.assert(
assessment('太郎') === assessment('太郎'),
'入力が同じ名前なら同じ診断結果を出力する処理が正しくありません。'
);
})();
|
/*
Author: Seth Spawn ([email protected])
Date: Mar 20, 2019 [last modified]
Purpose: Defines aggregate300m() function that aggregates biomass and error maps to 300m spatial
resolution and exports them as an asset.
Usage: Source the function and provide parameters.
Parameters: image: the map to be aggregated
assetIdd: name to be assigned to the new asset
err: whether map includes an error band (true/false)
*/
// ==========================================================================================================
// ----------------------------------------------------------------------------------------------------------
// source exportAsset300m function
var exportAsset300m = require('users/spawnwisc/globalBiomass:finalGlobal/commonFunctions/exportAsset300m.js')
exportAsset300m = exportAsset300m.exportAsset300m
// function to aggregate error using summation in quadrature [e.g. sqrt((sum(err^2)/count)]
var aggregateError = function(errImage){
var err2 = errImage.updateMask(errImage.gt(0)).pow(2)
var errAgg = err2.reduceResolution(ee.Reducer.mean()).sqrt()
return errAgg.unmask(0).mask(1).rename('err')
}
// define function for aggregating mean and error images to 300m resolution and exporting as asset
var aggregate300m = function(image, assetId, err){
// get extent and projection info from ESA CCI landcover map
var esaLandcover = ee.Image('users/spawnwisc/ESACCI-LC-L4-LCCS-Map-300m-P1Y-1992_2015-v207')
var proj = esaLandcover.projection()
var bounds = esaLandcover.geometry()
// get agb image and reduce resolution with mean reducer
var meanImg = image.select(0).float()
var meanImg300 = meanImg.reduceResolution(ee.Reducer.mean())
// if there is an error band, aggegate in in accordance with summation in quadrature
if (err = true){
meanImg = meanImg.addBands(aggregateError(image.select(1).float()))
}
meanImg = meanImg.reproject({crs:proj})
// implementation of export method for global data from Matt Hancher:
// https://code.earthengine.google.com/842bb3b598dcb9b1e8d6c6e33123943c
exportAsset300m(meanImg, assetId)
}
exports.aggregate300m = aggregate300m
|
# This file is part of the GhostDriver project from Neustar inc.
#
# Copyright (c) 2012, Ivan De Marino <[email protected] / [email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#import sys
#sys.path.insert(0, "utils")
import properties
import unittest
from selenium import webdriver
from settings import *
class BaseTest(unittest.TestCase):
def __init__(self, arguments):
super(BaseTest, self).__init__(arguments)
# Reading configuration
self.config = properties.Properties()
#self.config.load(open("../config.ini"))
self.config.load(open("headless_tests/config.ini"))
# Preparing Capabilities
self.caps = {
'takeScreenshot': False,
'javascriptEnabled': True
}
def setUp(self):
driver = self.config.getProperty("driver")
# TODO Use/Make a PhantomJSDriver for Python
# TODO Handle the case where "driver" is a URL to a RemoteWebDriver instance
# Decide the Driver to use
if driver == "firefox":
self.driver = webdriver.Firefox()
else:
self.driver = webdriver.Remote(
command_executor="http://{0}:{1}/wd/hub".format(DEVELOPMENT_SERVER_HOST,PHANTOMJS_GHOSTDRIVER_PORT),
desired_capabilities=self.caps)
self.driver.implicitly_wait(30)
self.driver.set_window_size(800,600)
self.base_url = "http://{0}:{1}".format(DEVELOPMENT_SERVER_HOST,DEVELOPMENT_SERVER_PORT)
self.verificationErrors = []
self.accept_next_alert = True
def tearDown(self):
self.driver.close()
|
from pysc2.agents import base_agent
from pysc2.env import sc2_env
from pysc2.lib import actions, features, units
from absl import app
import random
import time
import logging
# Functions
_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id
_BUILD_SUPPLYDEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id
_NOOP = actions.FUNCTIONS.no_op.id
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id
_RALLY_UNITS_MINIMAP = actions.FUNCTIONS.Rally_Units_minimap.id
# Features
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index
# Unit IDs
_TERRAN_BARRACKS = 21
_TERRAN_COMMANDCENTER = 18
_TERRAN_SUPPLYDEPOT = 19
_TERRAN_SCV = 45
# Parameters
_PLAYER_SELF = 1
_SUPPLY_USED = 3
_SUPPLY_MAX = 4
_NOT_QUEUED = [0]
_QUEUED = [1]
class SimpleAgent(base_agent.BaseAgent):
def __init__(self):
super(SimpleAgent, self).__init__()
self.attack_coordinates = None
self.supply_depot_built = None
self.barracks_built = None
self.scv_selected = None
self.base_top_left = None
self.commandcenter_selected = None
def unit_type_is_selected(self, obs, unit_type):
if (len(obs.observation.single_select) > 0 and
obs.observation.single_select[0].unit_type == unit_type):
return True
if (len(obs.observation.multi_select) > 0 and
obs.observation.multi_select[0].unit_type == unit_type):
return True
return False
def get_units_by_type(self, obs, unit_type):
return [unit for unit in obs.observation.feature_units
if unit.unit_type == unit_type]
def can_do(self, obs, action):
return action in obs.observation.available_actions
def transformLocation(self, x, x_distance, y, y_distance):
if not self.base_top_left:
return [x - x_distance, y - y_distance]
return [x + x_distance, y + y_distance]
def step(self, obs):
super(SimpleAgent, self).step(obs)
time.sleep(0.25)
if obs.first():
player_y, player_x = (obs.observation.feature_minimap.player_relative ==
features.PlayerRelative.SELF).nonzero()
xmean = player_x.mean()
ymean = player_y.mean()
if xmean <= 31 and ymean <= 31:
self.attack_coordinates = (49, 49)
else:
self.attack_coordinates = (12, 16)
if not self.supply_depot_built:
if not self.scv_selected:
unit_type_scv = self.get_units_by_type(obs, units.Terran.SCV)
#logging.warning(unit_type_scv)
if len(unit_type_scv) > 0:
if self.unit_type_is_selected(obs, units.Terran.SCV):
if (actions.FUNCTIONS.Build_SupplyDepot_screen.id in
obs.observation.available_actions):
x = random.randint(0, 83)
y = random.randint(0, 83)
logging.warning("unit type selected scv buidling supply depot" )
#self.supply_depot_built = True #flag
#self.scv_selected = True #flag
return(actions.FUNCTIONS.Build_SupplyDepot_screen("now", (x , y)))
if not self.commandcenter_selected:
if self.can_do(obs, actions.FUNCTIONS.Train_SCV_quick.id):
return(actions.FUNCTIONS.Train_SCV_quick("now"))
if not self.barracks_built: #TODO add condition count > 3
if not self.scv_selected:
unit_type_scv = self.get_units_by_type(obs, units.Terran.SCV)
if len(unit_type_scv) > 0:
logging.warning("barracks point 1" )
if self.unit_type_is_selected(obs, units.Terran.SCV):
if (actions.FUNCTIONS.Build_Barracks_screen.id in
obs.observation.available_actions):
logging.warning("barracks point 2" )
x = random.randint(0, 83)
y = random.randint(0, 83)
return(actions.FUNCTIONS.Build_Barracks_screen("now", (x , y)))
return actions.FUNCTIONS.no_op()
def main(unused_argv):
logging.basicConfig(filename='pysc2.log',level=logging.DEBUG , format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
agent = SimpleAgent()
try:
while True:
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.terran),
sc2_env.Bot(sc2_env.Race.random,
sc2_env.Difficulty.very_easy)],
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=84, minimap=64),
use_feature_units=True),
step_mul=16,
game_steps_per_episode=0,
visualize=True) as env:
agent.setup(env.observation_spec(), env.action_spec())
timesteps = env.reset()
agent.reset()
while True:
step_actions = [agent.step(timesteps[0])]
if timesteps[0].last():
break
timesteps = env.step(step_actions)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
app.run(main)
|
define("Types/_entity/format/RealField",["require","exports","tslib","Types/_entity/format/Field"],function(e,t,i,o){"use strict";Object.defineProperty(t,"__esModule",{value:true});var r=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return i.__extends(t,e),t.prototype.getPrecision=function(){return this._$precision},t.prototype.setPrecision=function(e){this._$precision=e},t}(o.default);(t.default=r).prototype["[Types/_entity/format/RealField]"]=true,r.prototype._moduleName="Types/entity:format.RealField",r.prototype._typeName="Real",r.prototype._$defaultValue=0,r.prototype._$precision=16}); |
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": {
"0": "\u062f\u0646",
"1": "\u0631\u0627\u062a"
},
"DAY": {
"0": "\u0627\u062a\u0648\u0627\u0631",
"1": "\u067e\u064a\u0631",
"2": "\u0645\u0646\u06af\u0644",
"3": "\u0628\u062f\u0647",
"4": "\u062c\u0645\u0639\u0631\u0627\u062a",
"5": "\u062c\u0645\u0639\u06c1",
"6": "\u06c1\u0641\u062a\u06c1"
},
"MONTH": {
"0": "\u062c\u0646\u0648\u0631\u06cc",
"1": "\u0641\u0631\u0648\u0631\u06cc",
"2": "\u0645\u0627\u0631\u0686",
"3": "\u0627\u067e\u0631\u064a\u0644",
"4": "\u0645\u0626",
"5": "\u062c\u0648\u0646",
"6": "\u062c\u0648\u0644\u0627\u0626",
"7": "\u0627\u06af\u0633\u062a",
"8": "\u0633\u062a\u0645\u0628\u0631",
"9": "\u0627\u06a9\u062a\u0648\u0628\u0631",
"10": "\u0646\u0648\u0645\u0628\u0631",
"11": "\u062f\u0633\u0645\u0628\u0631"
},
"SHORTDAY": {
"0": "\u0627\u062a\u0648\u0627\u0631",
"1": "\u067e\u064a\u0631",
"2": "\u0645\u0646\u06af\u0644",
"3": "\u0628\u062f\u0647",
"4": "\u062c\u0645\u0639\u0631\u0627\u062a",
"5": "\u062c\u0645\u0639\u06c1",
"6": "\u06c1\u0641\u062a\u06c1"
},
"SHORTMONTH": {
"0": "\u062c\u0646\u0648\u0631\u06cc",
"1": "\u0641\u0631\u0648\u0631\u06cc",
"2": "\u0645\u0627\u0631\u0686",
"3": "\u0627\u067e\u0631\u064a\u0644",
"4": "\u0645\u0626",
"5": "\u062c\u0648\u0646",
"6": "\u062c\u0648\u0644\u0627\u0626",
"7": "\u0627\u06af\u0633\u062a",
"8": "\u0633\u062a\u0645\u0628\u0631",
"9": "\u0627\u06a9\u062a\u0648\u0628\u0631",
"10": "\u0646\u0648\u0645\u0628\u0631",
"11": "\u062f\u0633\u0645\u0628\u0631"
},
"fullDate": "EEEE\u060d d\u060d MMMM y",
"longDate": "d\u060d MMMM y",
"medium": "d\u060d MMM y h:mm:ss a",
"mediumDate": "d\u060d MMM y",
"mediumTime": "h:mm:ss a",
"short": "d/M/yy h:mm a",
"shortDate": "d/M/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "Rs",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": {
"0": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
"1": {
"gSize": 3,
"lgSize": 3,
"macFrac": 0,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
}
},
"id": "ur-pk",
"pluralCat": function (n) { if (n == 1) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); |
/**
* Stacked 3D columns are column charts where categories are stacked on top of each other.
* This is typically done to visually represent the total of all categories
* for a given period or value.
*/
Ext.define('KitchenSink.view.charts.column3d.Stacked', {
extend: 'Ext.panel.Panel',
xtype: 'column-stacked-3d',
controller: 'column-stacked-3d',
//<example>
otherContent: [{
type: 'Controller',
path: 'classic/samples/view/charts/column3d/StackedController.js'
}, {
type: 'Store',
path: 'app/store/EconomySectors.js'
}],
//</example>
requires: [
'Ext.chart.CartesianChart',
'Ext.chart.axis.Numeric',
'Ext.chart.axis.Time',
'Ext.draw.modifier.Highlight',
'Ext.chart.interactions.ItemHighlight',
'Ext.chart.theme.*'
],
layout: 'vbox',
width: '${width}',
profiles: {
classic: {
width: 650
},
neptune: {
width: 650
},
graphite: {
width: 900
}
},
tbar: [
'->',
{
text: 'Switch Theme',
handler: 'onThemeSwitch'
},
{
xtype: 'segmentedbutton',
width: 200,
items: [{
text: 'Stack',
pressed: true
}, {
text: 'Group'
}],
listeners: {
toggle: 'onStackedToggle'
}
}
],
items: [{
xtype: 'cartesian',
reference: 'chart',
captions: {
title: 'Major economies by GDP sector composition (2011)'
},
store: {type: 'economy-sectors'},
theme: 'Muted',
width: '100%',
height: 500,
interactions: ['itemhighlight'],
series: {
type: 'bar3d',
xField: 'country',
yField: ['agr', 'ind', 'ser'],
title: ['Agriculture', 'Industry', 'Services'],
style: {
maxBarWidth: 80
},
highlight: true,
tooltip: {
trackMouse: true,
renderer: 'onTooltipRender'
}
},
legend: {
docked: 'bottom'
},
axes: [{
type: 'numeric3d',
position: 'left',
grid: {
odd: {
fillStyle: 'rgba(255, 255, 255, 0.06)'
},
even: {
fillStyle: 'rgba(0, 0, 0, 0.03)'
}
},
title: 'Billions of USD',
renderer: 'onAxisLabelRender',
listeners: {
rangechange: 'onAxisRangeChange'
}
}, {
type: 'category3d',
position: 'bottom',
grid: true
}]
}, {
xtype: 'container',
width: '100%',
padding: 10,
layout: {
type: 'hbox',
pack: 'center'
},
items: {
xtype: 'form',
defaults: {
labelAlign: 'right',
labelPad: 15,
width: 400
},
items: [{
xtype: 'sliderfield',
fieldLabel: 'Saturation',
value: 1,
maxValue: 1.5,
increment: 0.05,
decimalPrecision: 2,
listeners: {
change: 'onSaturationChange',
dragstart: 'onSliderDragStart',
dragend: 'onSliderDragEnd'
}
}, {
xtype: 'sliderfield',
fieldLabel: 'Brightness',
value: 1,
maxValue: 1.5,
increment: 0.05,
decimalPrecision: 2,
listeners: {
change: 'onBrightnessChange',
dragstart: 'onSliderDragStart',
dragend: 'onSliderDragEnd'
}
}, {
xtype: 'sliderfield',
fieldLabel: 'Color Spread',
value: 1,
maxValue: 1.5,
increment: 0.05,
decimalPrecision: 2,
listeners: {
change: 'onColorSpreadChange',
dragstart: 'onSliderDragStart',
dragend: 'onSliderDragEnd'
}
}]
}
}]
});
|
#!/usr/bin/env python
class Node:
def __init__(self, data):
self.vertex = data
self.next = None
class Graph:
"""Adjacency list representation by a Graph."""
def __init__(self, vertices):
self.v = vertices
self.graph = [None] * self.v
def add_edge(self, src, dest):
"""Add and edge to undirected graph."""
node = Node(dest) # Adding node to the source node
node.next = self.graph[src]
self.graph[src] = node
node = Node(src) # Adding source node to the destination
node.next = self.graph[dest]
self.graph[dest] = node
def print(self):
for i in range(self.v):
print("Adjacency list of vertex{}\n head.".format(i), end="")
temp = self.graph[i]
while temp:
print(" -> {}".format(temp.vertex), end="")
temp = temp.next
print(" \n")
|
/**
* @jest-environment node
*/
// @flow
const puppeteer = require('puppeteer');
const urlSingleList = 'http://localhost:9002/iframe.html?selectedKind=single%20vertical%20list&selectedStory=basic';
const timeout = 30000;
const returnPositionAndText = async (page, elem) =>
page.$$eval(elem, el =>
({
height: el[0].offsetHeight,
left: el[0].offsetLeft,
top: el[0].offsetTop,
text: el[0].innerText,
}));
/* Css selectors used */
const singleListContainer = '[data-react-beautiful-dnd-droppable="0"]';
const firstCard = '#root div > div:nth-child(1) > a';
const secondCard = '#root div > div:nth-child(2) > a';
describe('Single List > ', () => {
let browser;
let page;
beforeAll(async () => {
/* args are supplied to avoid issues in Travis CI to launch chromium:
https://github.com/GoogleChrome/puppeteer/issues/807 */
browser = await puppeteer.launch({ headless: true, slowMo: 100, args: ['--no-sandbox', '--disable-setuid-sandbox'] });
page = await browser.newPage();
await page.goto(urlSingleList);
await page.waitForSelector(singleListContainer);
}, timeout);
afterAll(async () => {
await browser.close();
});
it('should be able to drag the first card under the second', async () => {
/* Before drag */
let firstPosition = await returnPositionAndText(page, firstCard);
let secondPosition = await returnPositionAndText(page, secondCard);
const beforeDragFirstCardContent = firstPosition.text;
const beforeDragSecondCardContent = secondPosition.text;
// Check
// - Cards should not be at the same position
// - Texts content should not be the same
expect(firstPosition.top).toBeLessThan(secondPosition.top);
expect(firstPosition.text).not.toBe(secondPosition.text);
// Select first element and move to the second place
await page.keyboard.press('Tab');
await page.keyboard.press('Space');
await page.keyboard.press('ArrowDown');
await page.keyboard.press('Space');
/* After drag first and second cards are swapped */
firstPosition = await returnPositionAndText(page, firstCard);
secondPosition = await returnPositionAndText(page, secondCard);
const afterDragFirstCardContent = firstPosition.text;
const afterDragSecondCardContent = secondPosition.text;
// Check
// - Texts content should not be the same after drag
expect(beforeDragFirstCardContent).toEqual(afterDragSecondCardContent);
expect(beforeDragSecondCardContent).toEqual(afterDragFirstCardContent);
});
});
|
import asyncio
import logging
import pathlib
import time
import traceback
from asyncio import Task
from math import floor
from typing import Dict, Optional, Set, List, Tuple, Callable
import os
import yaml
from blspy import AugSchemeMPL, G1Element
from spare.consensus.block_rewards import calculate_pool_reward
from spare.pools.pool_wallet_info import PoolState, PoolSingletonState
from spare.protocols.pool_protocol import (
PoolErrorCode,
PostPartialRequest,
PostPartialResponse,
PostFarmerRequest,
PostFarmerResponse,
PutFarmerRequest,
PutFarmerResponse,
POOL_PROTOCOL_VERSION,
)
from spare.rpc.wallet_rpc_client import WalletRpcClient
from spare.types.blockchain_format.coin import Coin
from spare.types.coin_record import CoinRecord
from spare.types.coin_solution import CoinSolution
from spare.util.bech32m import decode_puzzle_hash
from spare.consensus.constants import ConsensusConstants
from spare.util.ints import uint8, uint16, uint32, uint64
from spare.util.byte_types import hexstr_to_bytes
from spare.util.default_root import DEFAULT_ROOT_PATH
from spare.rpc.full_node_rpc_client import FullNodeRpcClient
from spare.full_node.signage_point import SignagePoint
from spare.types.end_of_slot_bundle import EndOfSubSlotBundle
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.consensus.pot_iterations import calculate_iterations_quality
from spare.util.lru_cache import LRUCache
from spare.util.spare_logging import initialize_logging
from spare.wallet.transaction_record import TransactionRecord
from spare.pools.pool_puzzles import (
get_most_recent_singleton_coin_from_coin_solution,
get_delayed_puz_info_from_launcher_spend,
launcher_id_to_p2_puzzle_hash,
)
from .difficulty_adjustment import get_new_difficulty
from .singleton import create_absorb_transaction, get_singleton_state, get_coin_spend, get_farmed_height
from .store.abstract import AbstractPoolStore
from .store.sqlite_store import SqlitePoolStore
from .record import FarmerRecord
from .util import error_dict, RequestMetadata
class Pool:
def __init__(
self,
config: Dict,
constants: ConsensusConstants,
pool_store: Optional[AbstractPoolStore] = None,
difficulty_function: Callable = get_new_difficulty,
):
self.follow_singleton_tasks: Dict[bytes32, asyncio.Task] = {}
self.log = logging
# If you want to log to a file: use filename='example.log', encoding='utf-8'
self.log.basicConfig(level=logging.INFO)
# We load our configurations from here
with open(os.getcwd() + "/config.yaml") as f:
pool_config: Dict = yaml.safe_load(f)
initialize_logging("pool", pool_config["logging"], pathlib.Path(pool_config["logging"]["log_path"]))
# Set our pool info here
self.info_default_res = pool_config["pool_info"]["default_res"]
self.info_name = pool_config["pool_info"]["name"]
self.info_logo_url = pool_config["pool_info"]["logo_url"]
self.info_description = pool_config["pool_info"]["description"]
self.welcome_message = pool_config["welcome_message"]
self.config = config
self.constants = constants
self.store: AbstractPoolStore = pool_store or SqlitePoolStore()
self.pool_fee = pool_config["pool_fee"]
# This number should be held constant and be consistent for every pool in the network. DO NOT CHANGE
self.iters_limit = self.constants.POOL_SUB_SLOT_ITERS // 64
# This number should not be changed, since users will put this into their singletons
self.relative_lock_height = uint32(pool_config["relative_lock_height"])
# TODO(pool): potentially tweak these numbers for security and performance
# This is what the user enters into the input field. This exact value will be stored on the blockchain
self.pool_url = pool_config["pool_url"]
self.min_difficulty = uint64(pool_config["min_difficulty"]) # 10 difficulty is about 1 proof a day per plot
self.default_difficulty: uint64 = uint64(pool_config["default_difficulty"])
self.difficulty_function: Callable = difficulty_function
self.pending_point_partials: Optional[asyncio.Queue] = None
self.recent_points_added: LRUCache = LRUCache(20000)
# The time in minutes for an authentication token to be valid. See "Farmer authentication" in SPECIFICATION.md
self.authentication_token_timeout: uint8 = pool_config["authentication_token_timeout"]
# This is where the block rewards will get paid out to. The pool needs to support this address forever,
# since the farmers will encode it into their singleton on the blockchain. WARNING: the default pool code
# completely spends this wallet and distributes it to users, do don't put any additional funds in here
# that you do not want to distribute. Even if the funds are in a different address than this one, they WILL
# be spent by this code! So only put funds that you want to distribute to pool members here.
# Using 2164248527
self.default_target_puzzle_hash: bytes32 = bytes32(decode_puzzle_hash(pool_config["default_target_address"]))
# The pool fees will be sent to this address. This MUST be on a different key than the target_puzzle_hash,
# otherwise, the fees will be sent to the users. Using 690783650
self.pool_fee_puzzle_hash: bytes32 = bytes32(decode_puzzle_hash(pool_config["pool_fee_address"]))
# This is the wallet fingerprint and ID for the wallet spending the funds from `self.default_target_puzzle_hash`
self.wallet_fingerprint = pool_config["wallet_fingerprint"]
self.wallet_id = pool_config["wallet_id"]
# We need to check for slow farmers. If farmers cannot submit proofs in time, they won't be able to win
# any rewards either. This number can be tweaked to be more or less strict. More strict ensures everyone
# gets high rewards, but it might cause some of the slower farmers to not be able to participate in the pool.
self.partial_time_limit: int = pool_config["partial_time_limit"]
# There is always a risk of a reorg, in which case we cannot reward farmers that submitted partials in that
# reorg. That is why we have a time delay before changing any account points.
self.partial_confirmation_delay: int = pool_config["partial_confirmation_delay"]
# Only allow PUT /farmer per launcher_id every n seconds to prevent difficulty change attacks.
self.farmer_update_blocked: set = set()
self.farmer_update_cooldown_seconds: int = 600
# These are the phs that we want to look for on chain, that we can claim to our pool
self.scan_p2_singleton_puzzle_hashes: Set[bytes32] = set()
# Don't scan anything before this height, for efficiency (for example pool start date)
self.scan_start_height: uint32 = uint32(pool_config["scan_start_height"])
# Interval for scanning and collecting the pool rewards
self.collect_pool_rewards_interval = pool_config["collect_pool_rewards_interval"]
# After this many confirmations, a transaction is considered final and irreversible
self.confirmation_security_threshold = pool_config["confirmation_security_threshold"]
# Interval for making payout transactions to farmers
self.payment_interval = pool_config["payment_interval"]
# We will not make transactions with more targets than this, to ensure our transaction gets into the blockchain
# faster.
self.max_additions_per_transaction = pool_config["max_additions_per_transaction"]
# This is the list of payments that we have not sent yet, to farmers
self.pending_payments: Optional[asyncio.Queue] = None
# Keeps track of the latest state of our node
self.blockchain_state = {"peak": None}
# Whether or not the wallet is synced (required to make payments)
self.wallet_synced = False
# We target these many partials for this number of seconds. We adjust after receiving this many partials.
self.number_of_partials_target: int = pool_config["number_of_partials_target"]
self.time_target: int = pool_config["time_target"]
# Tasks (infinite While loops) for different purposes
self.confirm_partials_loop_task: Optional[asyncio.Task] = None
self.collect_pool_rewards_loop_task: Optional[asyncio.Task] = None
self.create_payment_loop_task: Optional[asyncio.Task] = None
self.submit_payment_loop_task: Optional[asyncio.Task] = None
self.get_peak_loop_task: Optional[asyncio.Task] = None
self.node_rpc_client: Optional[FullNodeRpcClient] = None
self.node_rpc_port = pool_config["node_rpc_port"]
self.wallet_rpc_client: Optional[WalletRpcClient] = None
self.wallet_rpc_port = pool_config["wallet_rpc_port"]
async def start(self):
await self.store.connect()
self.pending_point_partials = asyncio.Queue()
self_hostname = self.config["self_hostname"]
self.node_rpc_client = await FullNodeRpcClient.create(
self_hostname, uint16(self.node_rpc_port), DEFAULT_ROOT_PATH, self.config
)
self.wallet_rpc_client = await WalletRpcClient.create(
self.config["self_hostname"], uint16(self.wallet_rpc_port), DEFAULT_ROOT_PATH, self.config
)
self.blockchain_state = await self.node_rpc_client.get_blockchain_state()
res = await self.wallet_rpc_client.log_in_and_skip(fingerprint=self.wallet_fingerprint)
if not res["success"]:
raise ValueError(f"Error logging in: {res['error']}. Make sure your config fingerprint is correct.")
self.log.info(f"Logging in: {res}")
res = await self.wallet_rpc_client.get_wallet_balance(self.wallet_id)
self.log.info(f"Obtaining balance: {res}")
self.scan_p2_singleton_puzzle_hashes = await self.store.get_pay_to_singleton_phs()
self.confirm_partials_loop_task = asyncio.create_task(self.confirm_partials_loop())
self.collect_pool_rewards_loop_task = asyncio.create_task(self.collect_pool_rewards_loop())
self.create_payment_loop_task = asyncio.create_task(self.create_payment_loop())
self.submit_payment_loop_task = asyncio.create_task(self.submit_payment_loop())
self.get_peak_loop_task = asyncio.create_task(self.get_peak_loop())
self.pending_payments = asyncio.Queue()
async def stop(self):
if self.confirm_partials_loop_task is not None:
self.confirm_partials_loop_task.cancel()
if self.collect_pool_rewards_loop_task is not None:
self.collect_pool_rewards_loop_task.cancel()
if self.create_payment_loop_task is not None:
self.create_payment_loop_task.cancel()
if self.submit_payment_loop_task is not None:
self.submit_payment_loop_task.cancel()
if self.get_peak_loop_task is not None:
self.get_peak_loop_task.cancel()
self.wallet_rpc_client.close()
await self.wallet_rpc_client.await_closed()
self.node_rpc_client.close()
await self.node_rpc_client.await_closed()
await self.store.connection.close()
async def get_peak_loop(self):
"""
Periodically contacts the full node to get the latest state of the blockchain
"""
while True:
try:
self.blockchain_state = await self.node_rpc_client.get_blockchain_state()
self.wallet_synced = await self.wallet_rpc_client.get_synced()
await asyncio.sleep(30)
except asyncio.CancelledError:
self.log.info("Cancelled get_peak_loop, closing")
return
except Exception as e:
self.log.error(f"Unexpected error in get_peak_loop: {e}")
await asyncio.sleep(30)
async def collect_pool_rewards_loop(self):
"""
Iterates through the blockchain, looking for pool rewards, and claims them, creating a transaction to the
pool's puzzle_hash.
"""
while True:
try:
if not self.blockchain_state["sync"]["synced"]:
await asyncio.sleep(60)
continue
scan_phs: List[bytes32] = list(self.scan_p2_singleton_puzzle_hashes)
peak_height = self.blockchain_state["peak"].height
# Only get puzzle hashes with a certain number of confirmations or more, to avoid reorg issues
coin_records: List[CoinRecord] = await self.node_rpc_client.get_coin_records_by_puzzle_hashes(
scan_phs,
include_spent_coins=False,
start_height=self.scan_start_height,
)
self.log.info(
f"Scanning for block rewards from {self.scan_start_height} to {peak_height}. "
f"Found: {len(coin_records)}"
)
ph_to_amounts: Dict[bytes32, int] = {}
ph_to_coins: Dict[bytes32, List[CoinRecord]] = {}
not_buried_amounts = 0
for cr in coin_records:
if not cr.coinbase:
self.log.info(f"Non coinbase coin: {cr.coin}, ignoring")
continue
if cr.confirmed_block_index > peak_height - self.confirmation_security_threshold:
not_buried_amounts += cr.coin.amount
continue
if cr.coin.puzzle_hash not in ph_to_amounts:
ph_to_amounts[cr.coin.puzzle_hash] = 0
ph_to_coins[cr.coin.puzzle_hash] = []
ph_to_amounts[cr.coin.puzzle_hash] += cr.coin.amount
ph_to_coins[cr.coin.puzzle_hash].append(cr)
# For each p2sph, get the FarmerRecords
farmer_records = await self.store.get_farmer_records_for_p2_singleton_phs(
set([ph for ph in ph_to_amounts.keys()])
)
# For each singleton, create, submit, and save a claim transaction
claimable_amounts = 0
not_claimable_amounts = 0
for rec in farmer_records:
if rec.is_pool_member:
claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]
else:
not_claimable_amounts += ph_to_amounts[rec.p2_singleton_puzzle_hash]
if len(coin_records) > 0:
self.log.info(f"Claimable amount: {claimable_amounts / (10**12)}")
self.log.info(f"Not claimable amount: {not_claimable_amounts / (10**12)}")
self.log.info(f"Not buried amounts: {not_buried_amounts / (10**12)}")
for rec in farmer_records:
if rec.is_pool_member:
singleton_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_solution(
rec.singleton_tip
)
if singleton_tip is None:
continue
singleton_coin_record: Optional[
CoinRecord
] = await self.node_rpc_client.get_coin_record_by_name(singleton_tip.name())
if singleton_coin_record is None:
continue
if singleton_coin_record.spent:
self.log.warning(
f"Singleton coin {singleton_coin_record.coin.name()} is spent, will not "
f"claim rewards"
)
continue
spend_bundle = await create_absorb_transaction(
self.node_rpc_client,
rec,
self.blockchain_state["peak"].height,
ph_to_coins[rec.p2_singleton_puzzle_hash],
self.constants.GENESIS_CHALLENGE,
)
if spend_bundle is None:
continue
push_tx_response: Dict = await self.node_rpc_client.push_tx(spend_bundle)
if push_tx_response["status"] == "SUCCESS":
# TODO(pool): save transaction in records
self.log.info(f"Submitted transaction successfully: {spend_bundle.name().hex()}")
else:
self.log.error(f"Error submitting transaction: {push_tx_response}")
await asyncio.sleep(self.collect_pool_rewards_interval)
except asyncio.CancelledError:
self.log.info("Cancelled collect_pool_rewards_loop, closing")
return
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Unexpected error in collect_pool_rewards_loop: {e} {error_stack}")
await asyncio.sleep(self.collect_pool_rewards_interval)
async def create_payment_loop(self):
"""
Calculates the points of each farmer, and splits the total funds received into coins for each farmer.
Saves the transactions that we should make, to `amount_to_distribute`.
"""
while True:
try:
if not self.blockchain_state["sync"]["synced"]:
self.log.warning("Not synced, waiting")
await asyncio.sleep(60)
continue
if self.pending_payments.qsize() != 0:
self.log.warning(f"Pending payments ({self.pending_payments.qsize()}), waiting")
await asyncio.sleep(60)
continue
self.log.info("Starting to create payment")
coin_records: List[CoinRecord] = await self.node_rpc_client.get_coin_records_by_puzzle_hash(
self.default_target_puzzle_hash, include_spent_coins=False
)
if len(coin_records) == 0:
self.log.info("No funds to distribute.")
await asyncio.sleep(120)
continue
total_amount_claimed = sum([c.coin.amount for c in coin_records])
pool_coin_amount = int(total_amount_claimed * self.pool_fee)
amount_to_distribute = total_amount_claimed - pool_coin_amount
if total_amount_claimed < calculate_pool_reward(uint32(1)): # 1.75 SPARE
self.log.info(f"Do not have enough funds to distribute: {total_amount_claimed}, skipping payout")
continue
self.log.info(f"Total amount claimed: {total_amount_claimed / (10 ** 12)}")
self.log.info(f"Pool coin amount (includes blockchain fee) {pool_coin_amount / (10 ** 12)}")
self.log.info(f"Total amount to distribute: {amount_to_distribute / (10 ** 12)}")
async with self.store.lock:
# Get the points of each farmer, as well as payout instructions. Here a spare address is used,
# but other blockchain addresses can also be used.
points_and_ph: List[
Tuple[uint64, bytes]
] = await self.store.get_farmer_points_and_payout_instructions()
total_points = sum([pt for (pt, ph) in points_and_ph])
if total_points > 0:
mojo_per_point = floor(amount_to_distribute / total_points)
self.log.info(f"Paying out {mojo_per_point} mojo / point")
additions_sub_list: List[Dict] = [
{"puzzle_hash": self.pool_fee_puzzle_hash, "amount": pool_coin_amount}
]
for points, ph in points_and_ph:
if points > 0:
additions_sub_list.append({"puzzle_hash": ph, "amount": points * mojo_per_point})
if len(additions_sub_list) == self.max_additions_per_transaction:
await self.pending_payments.put(additions_sub_list.copy())
self.log.info(f"Will make payments: {additions_sub_list}")
additions_sub_list = []
if len(additions_sub_list) > 0:
self.log.info(f"Will make payments: {additions_sub_list}")
await self.pending_payments.put(additions_sub_list.copy())
# Subtract the points from each farmer
await self.store.clear_farmer_points()
else:
self.log.info(f"No points for any farmer. Waiting {self.payment_interval}")
await asyncio.sleep(self.payment_interval)
except asyncio.CancelledError:
self.log.info("Cancelled create_payments_loop, closing")
return
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Unexpected error in create_payments_loop: {e} {error_stack}")
await asyncio.sleep(self.payment_interval)
async def submit_payment_loop(self):
while True:
try:
peak_height = self.blockchain_state["peak"].height
await self.wallet_rpc_client.log_in_and_skip(fingerprint=self.wallet_fingerprint)
if not self.blockchain_state["sync"]["synced"] or not self.wallet_synced:
self.log.warning("Waiting for wallet sync")
await asyncio.sleep(60)
continue
payment_targets = await self.pending_payments.get()
assert len(payment_targets) > 0
self.log.info(f"Submitting a payment: {payment_targets}")
# TODO(pool): make sure you have enough to pay the blockchain fee, this will be taken out of the pool
# fee itself. Alternatively you can set it to 0 and wait longer
# blockchain_fee = 0.00001 * (10 ** 12) * len(payment_targets)
blockchain_fee = 0
try:
transaction: TransactionRecord = await self.wallet_rpc_client.send_transaction_multi(
self.wallet_id, payment_targets, fee=blockchain_fee
)
except ValueError as e:
self.log.error(f"Error making payment: {e}")
await asyncio.sleep(10)
await self.pending_payments.put(payment_targets)
continue
self.log.info(f"Transaction: {transaction}")
while (
not transaction.confirmed
or not (peak_height - transaction.confirmed_at_height) > self.confirmation_security_threshold
):
transaction = await self.wallet_rpc_client.get_transaction(self.wallet_id, transaction.name)
peak_height = self.blockchain_state["peak"].height
self.log.info(
f"Waiting for transaction to obtain {self.confirmation_security_threshold} confirmations"
)
if not transaction.confirmed:
self.log.info(f"Not confirmed. In mempool? {transaction.is_in_mempool()}")
else:
self.log.info(f"Confirmations: {peak_height - transaction.confirmed_at_height}")
await asyncio.sleep(10)
# TODO(pool): persist in DB
self.log.info(f"Successfully confirmed payments {payment_targets}")
except asyncio.CancelledError:
self.log.info("Cancelled submit_payment_loop, closing")
return
except Exception as e:
# TODO(pool): retry transaction if failed
self.log.error(f"Unexpected error in submit_payment_loop: {e}")
await asyncio.sleep(60)
async def confirm_partials_loop(self):
"""
Pulls things from the queue of partials one at a time, and adjusts balances.
"""
while True:
try:
# The points are based on the difficulty at the time of partial submission, not at the time of
# confirmation
partial, time_received, points_received = await self.pending_point_partials.get()
# Wait a few minutes to check if partial is still valid in the blockchain (no reorgs)
await asyncio.sleep((max(0, time_received + self.partial_confirmation_delay - time.time() - 5)))
# Starts a task to check the remaining things for this partial and optionally update points
asyncio.create_task(self.check_and_confirm_partial(partial, points_received))
except asyncio.CancelledError:
self.log.info("Cancelled confirm partials loop, closing")
return
except Exception as e:
self.log.error(f"Unexpected error: {e}")
async def check_and_confirm_partial(self, partial: PostPartialRequest, points_received: uint64) -> None:
try:
# TODO(pool): these lookups to the full node are not efficient and can be cached, especially for
# scaling to many users
if partial.payload.end_of_sub_slot:
response = await self.node_rpc_client.get_recent_signage_point_or_eos(None, partial.payload.sp_hash)
if response is None or response["reverted"]:
self.log.info(f"Partial EOS reverted: {partial.payload.sp_hash}")
return
else:
response = await self.node_rpc_client.get_recent_signage_point_or_eos(partial.payload.sp_hash, None)
if response is None or response["reverted"]:
self.log.info(f"Partial SP reverted: {partial.payload.sp_hash}")
return
# Now we know that the partial came on time, but also that the signage point / EOS is still in the
# blockchain. We need to check for double submissions.
pos_hash = partial.payload.proof_of_space.get_hash()
if self.recent_points_added.get(pos_hash):
self.log.info(f"Double signage point submitted for proof: {partial.payload}")
return
self.recent_points_added.put(pos_hash, uint64(1))
# Now we need to check to see that the singleton in the blockchain is still assigned to this pool
singleton_state_tuple: Optional[
Tuple[CoinSolution, PoolState, bool]
] = await self.get_and_validate_singleton_state(partial.payload.launcher_id)
if singleton_state_tuple is None:
self.log.info(f"Invalid singleton {partial.payload.launcher_id}")
return
_, _, is_member = singleton_state_tuple
if not is_member:
self.log.info(f"Singleton is not assigned to this pool")
return
async with self.store.lock:
farmer_record: Optional[FarmerRecord] = await self.store.get_farmer_record(partial.payload.launcher_id)
assert (
partial.payload.proof_of_space.pool_contract_puzzle_hash == farmer_record.p2_singleton_puzzle_hash
)
if farmer_record.is_pool_member:
await self.store.add_partial(partial.payload.launcher_id, uint64(int(time.time())), points_received)
self.log.info(
f"Farmer {farmer_record.launcher_id} updated points to: "
f"{farmer_record.points + points_received}"
)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception in confirming partial: {e} {error_stack}")
async def add_farmer(self, request: PostFarmerRequest, metadata: RequestMetadata) -> Dict:
async with self.store.lock:
farmer_record: Optional[FarmerRecord] = await self.store.get_farmer_record(request.payload.launcher_id)
if farmer_record is not None:
return error_dict(
PoolErrorCode.FARMER_ALREADY_KNOWN,
f"Farmer with launcher_id {request.payload.launcher_id} already known.",
)
singleton_state_tuple: Optional[
Tuple[CoinSolution, PoolState, bool]
] = await self.get_and_validate_singleton_state(request.payload.launcher_id)
if singleton_state_tuple is None:
return error_dict(PoolErrorCode.INVALID_SINGLETON, f"Invalid singleton {request.payload.launcher_id}")
last_spend, last_state, is_member = singleton_state_tuple
if is_member is None:
return error_dict(PoolErrorCode.INVALID_SINGLETON, f"Singleton is not assigned to this pool")
if (
request.payload.suggested_difficulty is None
or request.payload.suggested_difficulty < self.min_difficulty
):
difficulty: uint64 = self.default_difficulty
else:
difficulty = request.payload.suggested_difficulty
if len(hexstr_to_bytes(request.payload.payout_instructions)) != 32:
return error_dict(
PoolErrorCode.INVALID_PAYOUT_INSTRUCTIONS,
f"Payout instructions must be an SPARE address for this pool.",
)
if not AugSchemeMPL.verify(last_state.owner_pubkey, request.payload.get_hash(), request.signature):
return error_dict(PoolErrorCode.INVALID_SIGNATURE, f"Invalid signature")
launcher_coin: Optional[CoinRecord] = await self.node_rpc_client.get_coin_record_by_name(
request.payload.launcher_id
)
assert launcher_coin is not None and launcher_coin.spent
launcher_solution: Optional[CoinSolution] = await get_coin_spend(self.node_rpc_client, launcher_coin)
delay_time, delay_puzzle_hash = get_delayed_puz_info_from_launcher_spend(launcher_solution)
if delay_time < 3600:
return error_dict(PoolErrorCode.DELAY_TIME_TOO_SHORT, f"Delay time too short, must be at least 1 hour")
p2_singleton_puzzle_hash = launcher_id_to_p2_puzzle_hash(
request.payload.launcher_id, delay_time, delay_puzzle_hash
)
farmer_record = FarmerRecord(
request.payload.launcher_id,
p2_singleton_puzzle_hash,
delay_time,
delay_puzzle_hash,
request.payload.authentication_public_key,
last_spend,
last_state,
uint64(0),
difficulty,
request.payload.payout_instructions,
True,
)
self.scan_p2_singleton_puzzle_hashes.add(p2_singleton_puzzle_hash)
await self.store.add_farmer_record(farmer_record, metadata)
return PostFarmerResponse(self.welcome_message).to_json_dict()
async def update_farmer(self, request: PutFarmerRequest) -> Dict:
launcher_id = request.payload.launcher_id
# First check if this launcher_id is currently blocked for farmer updates, if so there is no reason to validate
# all the stuff below
if launcher_id in self.farmer_update_blocked:
return error_dict(PoolErrorCode.REQUEST_FAILED, f"Cannot update farmer yet.")
farmer_record: Optional[FarmerRecord] = await self.store.get_farmer_record(launcher_id)
if farmer_record is None:
return error_dict(PoolErrorCode.FARMER_NOT_KNOWN, f"Farmer with launcher_id {launcher_id} not known.")
singleton_state_tuple: Optional[
Tuple[CoinSolution, PoolState, bool]
] = await self.get_and_validate_singleton_state(launcher_id)
if singleton_state_tuple is None:
return error_dict(PoolErrorCode.INVALID_SINGLETON, f"Invalid singleton {request.payload.launcher_id}")
last_spend, last_state, is_member = singleton_state_tuple
if is_member is None:
return error_dict(PoolErrorCode.INVALID_SINGLETON, f"Singleton is not assigned to this pool")
if not AugSchemeMPL.verify(last_state.owner_pubkey, request.payload.get_hash(), request.signature):
return error_dict(PoolErrorCode.INVALID_SIGNATURE, f"Invalid signature")
farmer_dict = farmer_record.to_json_dict()
response_dict = {}
if request.payload.authentication_public_key is not None:
is_new_value = farmer_record.authentication_public_key != request.payload.authentication_public_key
response_dict["authentication_public_key"] = is_new_value
if is_new_value:
farmer_dict["authentication_public_key"] = request.payload.authentication_public_key
if request.payload.payout_instructions is not None:
is_new_value = (
farmer_record.payout_instructions != request.payload.payout_instructions
and request.payload.payout_instructions is not None
and len(hexstr_to_bytes(request.payload.payout_instructions)) == 32
)
response_dict["payout_instructions"] = is_new_value
if is_new_value:
farmer_dict["payout_instructions"] = request.payload.payout_instructions
if request.payload.suggested_difficulty is not None:
is_new_value = (
farmer_record.difficulty != request.payload.suggested_difficulty
and request.payload.suggested_difficulty is not None
and request.payload.suggested_difficulty >= self.min_difficulty
)
response_dict["suggested_difficulty"] = is_new_value
if is_new_value:
farmer_dict["difficulty"] = request.payload.suggested_difficulty
async def update_farmer_later():
await asyncio.sleep(self.farmer_update_cooldown_seconds)
await self.store.add_farmer_record(FarmerRecord.from_json_dict(farmer_dict))
self.farmer_update_blocked.remove(launcher_id)
self.log.info(f"Updated farmer: {response_dict}")
self.farmer_update_blocked.add(launcher_id)
asyncio.create_task(update_farmer_later())
# TODO Fix spare-blockchain's Streamable implementation to support Optional in `from_json_dict`, then use
# PutFarmerResponse here and in the trace up.
return response_dict
async def get_and_validate_singleton_state(
self, launcher_id: bytes32
) -> Optional[Tuple[CoinSolution, PoolState, bool]]:
"""
:return: the state of the singleton, if it currently exists in the blockchain, and if it is assigned to
our pool, with the correct parameters. Otherwise, None. Note that this state must be buried (recent state
changes are not returned)
"""
singleton_task: Optional[Task] = self.follow_singleton_tasks.get(launcher_id, None)
remove_after = False
farmer_rec = None
if singleton_task is None or singleton_task.done():
farmer_rec: Optional[FarmerRecord] = await self.store.get_farmer_record(launcher_id)
singleton_task = asyncio.create_task(
get_singleton_state(
self.node_rpc_client,
launcher_id,
farmer_rec,
self.blockchain_state["peak"].height,
self.confirmation_security_threshold,
self.constants.GENESIS_CHALLENGE,
)
)
self.follow_singleton_tasks[launcher_id] = singleton_task
remove_after = True
optional_result: Optional[Tuple[CoinSolution, PoolState, PoolState]] = await singleton_task
if remove_after and launcher_id in self.follow_singleton_tasks:
await self.follow_singleton_tasks.pop(launcher_id)
if optional_result is None:
return None
buried_singleton_tip, buried_singleton_tip_state, singleton_tip_state = optional_result
# Validate state of the singleton
is_pool_member = True
if singleton_tip_state.target_puzzle_hash != self.default_target_puzzle_hash:
self.log.info(
f"Wrong target puzzle hash: {singleton_tip_state.target_puzzle_hash} for launcher_id {launcher_id}"
)
is_pool_member = False
elif singleton_tip_state.relative_lock_height != self.relative_lock_height:
self.log.info(
f"Wrong relative lock height: {singleton_tip_state.relative_lock_height} for launcher_id {launcher_id}"
)
is_pool_member = False
elif singleton_tip_state.version != POOL_PROTOCOL_VERSION:
self.log.info(f"Wrong version {singleton_tip_state.version} for launcher_id {launcher_id}")
is_pool_member = False
elif singleton_tip_state.state == PoolSingletonState.SELF_POOLING.value:
self.log.info(f"Invalid singleton state {singleton_tip_state.state} for launcher_id {launcher_id}")
is_pool_member = False
elif singleton_tip_state.state == PoolSingletonState.LEAVING_POOL.value:
coin_record: Optional[CoinRecord] = await self.node_rpc_client.get_coin_record_by_name(
buried_singleton_tip.coin.name()
)
assert coin_record is not None
if self.blockchain_state["peak"].height - coin_record.confirmed_block_index > self.relative_lock_height:
self.log.info(f"launcher_id {launcher_id} got enough confirmations to leave the pool")
is_pool_member = False
self.log.info(f"Is {launcher_id} pool member: {is_pool_member}")
if farmer_rec is not None and (
farmer_rec.singleton_tip != buried_singleton_tip
or farmer_rec.singleton_tip_state != buried_singleton_tip_state
):
# This means the singleton has been changed in the blockchain (either by us or someone else). We
# still keep track of this singleton if the farmer has changed to a different pool, in case they
# switch back.
self.log.info(f"Updating singleton state for {launcher_id}")
await self.store.update_singleton(
launcher_id, buried_singleton_tip, buried_singleton_tip_state, is_pool_member
)
return buried_singleton_tip, buried_singleton_tip_state, is_pool_member
async def process_partial(
self,
partial: PostPartialRequest,
farmer_record: FarmerRecord,
time_received_partial: uint64,
) -> Dict:
# Validate signatures
message: bytes32 = partial.payload.get_hash()
pk1: G1Element = partial.payload.proof_of_space.plot_public_key
pk2: G1Element = farmer_record.authentication_public_key
valid_sig = AugSchemeMPL.aggregate_verify([pk1, pk2], [message, message], partial.aggregate_signature)
if not valid_sig:
return error_dict(
PoolErrorCode.INVALID_SIGNATURE,
f"The aggregate signature is invalid {partial.aggregate_signature}",
)
if partial.payload.proof_of_space.pool_contract_puzzle_hash != farmer_record.p2_singleton_puzzle_hash:
return error_dict(
PoolErrorCode.INVALID_P2_SINGLETON_PUZZLE_HASH,
f"Invalid pool contract puzzle hash {partial.payload.proof_of_space.pool_contract_puzzle_hash}",
)
async def get_signage_point_or_eos():
if partial.payload.end_of_sub_slot:
return await self.node_rpc_client.get_recent_signage_point_or_eos(None, partial.payload.sp_hash)
else:
return await self.node_rpc_client.get_recent_signage_point_or_eos(partial.payload.sp_hash, None)
response = await get_signage_point_or_eos()
if response is None:
# Try again after 10 seconds in case we just didn't yet receive the signage point
await asyncio.sleep(10)
response = await get_signage_point_or_eos()
if response is None or response["reverted"]:
return error_dict(
PoolErrorCode.NOT_FOUND, f"Did not find signage point or EOS {partial.payload.sp_hash}, {response}"
)
node_time_received_sp = response["time_received"]
signage_point: Optional[SignagePoint] = response.get("signage_point", None)
end_of_sub_slot: Optional[EndOfSubSlotBundle] = response.get("eos", None)
if time_received_partial - node_time_received_sp > self.partial_time_limit:
return error_dict(
PoolErrorCode.TOO_LATE,
f"Received partial in {time_received_partial - node_time_received_sp}. "
f"Make sure your proof of space lookups are fast, and network connectivity is good."
f"Response must happen in less than {self.partial_time_limit} seconds. NAS or network"
f" farming can be an issue",
)
# Validate the proof
if signage_point is not None:
challenge_hash: bytes32 = signage_point.cc_vdf.challenge
else:
challenge_hash = end_of_sub_slot.challenge_chain.get_hash()
quality_string: Optional[bytes32] = partial.payload.proof_of_space.verify_and_get_quality_string(
self.constants, challenge_hash, partial.payload.sp_hash
)
if quality_string is None:
return error_dict(PoolErrorCode.INVALID_PROOF, f"Invalid proof of space {partial.payload.sp_hash}")
current_difficulty = farmer_record.difficulty
required_iters: uint64 = calculate_iterations_quality(
self.constants.DIFFICULTY_CONSTANT_FACTOR,
quality_string,
partial.payload.proof_of_space.size,
current_difficulty,
partial.payload.sp_hash,
)
if required_iters >= self.iters_limit:
return error_dict(
PoolErrorCode.PROOF_NOT_GOOD_ENOUGH,
f"Proof of space has required iters {required_iters}, too high for difficulty " f"{current_difficulty}",
)
await self.pending_point_partials.put((partial, time_received_partial, current_difficulty))
async with self.store.lock:
# Obtains the new record in case we just updated difficulty
farmer_record: Optional[FarmerRecord] = await self.store.get_farmer_record(partial.payload.launcher_id)
if farmer_record is not None:
current_difficulty = farmer_record.difficulty
# Decide whether to update the difficulty
recent_partials = await self.store.get_recent_partials(
partial.payload.launcher_id, self.number_of_partials_target
)
# Only update the difficulty if we meet certain conditions
new_difficulty: uint64 = self.difficulty_function(
recent_partials,
int(self.number_of_partials_target),
int(self.time_target),
current_difficulty,
time_received_partial,
self.min_difficulty,
)
if current_difficulty != new_difficulty:
await self.store.update_difficulty(partial.payload.launcher_id, new_difficulty)
current_difficulty = new_difficulty
return PostPartialResponse(current_difficulty).to_json_dict()
|
// Copyright 2020 Phyronnaz
#pragma once
#include "CoreMinimal.h"
#include "InputCoreTypes.h"
#include "Templates/SubclassOf.h"
#include "Framework/Commands/InputChord.h"
#include "VoxelGraphShortcuts.generated.h"
class UVoxelNode;
USTRUCT()
struct FVoxelGraphEditorKeyBinding
{
GENERATED_BODY()
UPROPERTY(EditAnywhere, Category = "Voxel")
bool bCtrlDown = false;
UPROPERTY(EditAnywhere, Category = "Voxel")
bool bAltDown = false;
UPROPERTY(EditAnywhere, Category = "Voxel")
bool bShiftDown = false;
UPROPERTY(EditAnywhere, Category = "Voxel")
FKey Key;
UPROPERTY(EditAnywhere, Category = "Voxel")
TSubclassOf<UVoxelNode> Class;
FVoxelGraphEditorKeyBinding() = default;
FVoxelGraphEditorKeyBinding(FKey Key, TSubclassOf<UVoxelNode> Class)
: Key(Key)
, Class(Class)
{
}
inline bool IsSameAs(const FInputChord& Chord)
{
return bCtrlDown == Chord.bCtrl && bAltDown == Chord.bAlt && bShiftDown == Chord.bShift && Key == Chord.Key;
}
};
UCLASS(Config = EditorKeyBindings)
class UVoxelGraphShortcuts : public UObject
{
GENERATED_BODY()
public:
UVoxelGraphShortcuts();
UPROPERTY(Config, EditAnywhere, Category = "Voxel")
TArray<FVoxelGraphEditorKeyBinding> Shortcuts;
//~ Begin UObject Interface
#if WITH_EDITOR
virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override;
#endif
}; |
import subprocess
from sys import platform
from time import sleep, time
import bitstring
import numpy as np
import pygatt
from . import helper
from .constants import *
class Muse():
"""Muse 2016 headband"""
def __init__(self, address, callback_eeg=None, callback_control=None,
callback_telemetry=None, callback_acc=None, callback_gyro=None,
backend='auto', interface=None, time_func=time, name=None):
"""Initialize
callback_eeg -- callback for eeg data, function(data, timestamps)
callback_control -- function(message)F
callback_telemetry -- function(timestamp, battery, fuel_gauge,
adc_volt, temperature)
callback_acc -- function(timestamp, samples)
callback_gyro -- function(timestamp, samples)
- samples is a list of 3 samples, where each sample is [x, y, z]
"""
self.address = address
self.name = name
self.callback_eeg = callback_eeg
self.callback_telemetry = callback_telemetry
self.callback_control = callback_control
self.callback_acc = callback_acc
self.callback_gyro = callback_gyro
self.enable_eeg = not callback_eeg is None
self.enable_control = not callback_control is None
self.enable_telemetry = not callback_telemetry is None
self.enable_acc = not callback_acc is None
self.enable_gyro = not callback_gyro is None
self.interface = interface
self.time_func = time_func
self.backend = helper.resolve_backend(backend)
def connect(self, interface=None, backend='auto'):
"""Connect to the device"""
try:
if self.backend == 'bluemuse':
print('Starting BlueMuse.')
subprocess.call('start bluemuse:', shell=True)
else:
print('Connecting to %s : %s...' %
(self.name if self.name else 'Muse', self.address))
if self.backend == 'gatt':
self.interface = self.interface or 'hci0'
self.adapter = pygatt.GATTToolBackend(self.interface)
else:
self.adapter = pygatt.BGAPIBackend(
serial_port=self.interface)
self.adapter.start()
self.device = self.adapter.connect(self.address)
# subscribes to EEG stream
if self.enable_eeg:
self._subscribe_eeg()
if self.enable_control:
self._subscribe_control()
if self.enable_telemetry:
self._subscribe_telemetry()
if self.enable_acc:
self._subscribe_acc()
if self.enable_gyro:
self._subscribe_gyro()
self.last_timestamp = self.time_func()
return True
except pygatt.exceptions.BLEError as error:
print(error)
if("characteristic" in str(error)):
self.ask_reset()
sleep(2)
self.device = self.adapter.connect(self.address)
self.select_preset(preset=21)
# subscribes to EEG stream
if self.enable_eeg:
self._subscribe_eeg()
if self.enable_control:
self._subscribe_control()
if self.enable_telemetry:
self._subscribe_telemetry()
if self.enable_acc:
self._subscribe_acc()
if self.enable_gyro:
self._subscribe_gyro()
self.last_timestamp = self.time_func()
return True
elif 'Timed out connecting' in str(error):
return self.connect(interface, backend)
else:
print('Connection to', self.address, 'failed')
return False
def _write_cmd(self, cmd):
"""Wrapper to write a command to the Muse device.
cmd -- list of bytes"""
self.device.char_write_handle(0x000e, cmd, False)
def ask_control(self):
"""Send a message to Muse to ask for the control status.
Only useful if control is enabled (to receive the answer!)
The message received is a dict with the following keys:
"hn": device name
"sn": serial number
"ma": MAC address
"id":
"bp": battery percentage
"ts":
"ps": preset selected
"rc": return status, if 0 is OK
"""
if self.backend == 'bluemuse':
helper.warn_bluemuse_not_supported()
return
self._write_cmd([0x02, 0x73, 0x0a])
def ask_device_info(self):
"""Send a message to Muse to ask for the device info.
The message received is a dict with the following keys:
"ap":
"sp":
"tp": firmware type, e.g: "consumer"
"hw": hardware version?
"bn": build number?
"fw": firmware version?
"bl":
"pv": protocol version?
"rc": return status, if 0 is OK
"""
if self.backend == 'bluemuse':
helper.warn_bluemuse_not_supported()
return
self._write_cmd([0x03, 0x76, 0x31, 0x0a])
def ask_reset(self):
"""Undocumented command reset for '*1'
The message received is a singleton with:
"rc": return status, if 0 is OK
"""
self._write_cmd([0x03, 0x2a, 0x31, 0x0a])
def start(self):
"""Start streaming."""
if self.backend == 'bluemuse':
address = self.address if self.address is not None else self.name
if address is None:
subprocess.call(
'start bluemuse://start?streamfirst=true', shell=True)
else:
subprocess.call(
'start bluemuse://start?addresses={0}'.format(address), shell=True)
return
self._init_timestamp_correction()
self._init_sample()
self.last_tm = 0
self._init_control()
self.resume()
def resume(self):
"""Resume streaming, sending 'd' command"""
self._write_cmd([0x02, 0x64, 0x0a])
def stop(self):
"""Stop streaming."""
if self.backend == 'bluemuse':
address = self.address if self.address is not None else self.name
if address is None:
subprocess.call('start bluemuse://stopall', shell=True)
else:
subprocess.call(
'start bluemuse://stop?addresses={0}'.format(address), shell=True)
return
self._write_cmd([0x02, 0x68, 0x0a])
def keep_alive(self):
"""Keep streaming, sending 'k' command"""
self._write_cmd([0x02, 0x6b, 0x0a])
def select_preset(self, preset=21):
"""Setting preset for headband configuration
See details on https://goo.gl/FPN1ib
For 2016 headband, possible choice are 'p20' and 'p21'.
Untested but possible values are 'p22' and 'p23'
Default is 'p21'."""
if preset == 20:
self._write_cmd([0x04, 0x70, 0x32, 0x30, 0x0a])
elif preset == 22:
self._write_cmd([0x04, 0x70, 0x32, 0x32, 0x0a])
elif preset == 23:
self._write_cmd([0x04, 0x70, 0x32, 0x33, 0x0a])
else:
self._write_cmd([0x04, 0x70, 0x32, 0x31, 0x0a])
def disconnect(self):
"""disconnect."""
if self.backend == 'bluemuse':
subprocess.call('start bluemuse://shutdown', shell=True)
return
self.device.disconnect()
if self.adapter:
self.adapter.stop()
def _subscribe_eeg(self):
"""subscribe to eeg stream."""
self.device.subscribe(MUSE_GATT_ATTR_TP9,
callback=self._handle_eeg)
self.device.subscribe(MUSE_GATT_ATTR_AF7,
callback=self._handle_eeg)
self.device.subscribe(MUSE_GATT_ATTR_AF8,
callback=self._handle_eeg)
self.device.subscribe(MUSE_GATT_ATTR_TP10,
callback=self._handle_eeg)
self.device.subscribe(MUSE_GATT_ATTR_RIGHTAUX,
callback=self._handle_eeg)
def _unpack_eeg_channel(self, packet):
"""Decode data packet of one EEG channel.
Each packet is encoded with a 16bit timestamp followed by 12 time
samples with a 12 bit resolution.
"""
aa = bitstring.Bits(bytes=packet)
pattern = "uint:16,uint:12,uint:12,uint:12,uint:12,uint:12,uint:12, \
uint:12,uint:12,uint:12,uint:12,uint:12,uint:12"
res = aa.unpack(pattern)
packetIndex = res[0]
data = res[1:]
# 12 bits on a 2 mVpp range
data = 0.48828125 * (np.array(data) - 2048)
return packetIndex, data
def _init_sample(self):
"""initialize array to store the samples"""
self.timestamps = np.zeros(5)
self.data = np.zeros((5, 12))
def _init_timestamp_correction(self):
"""Init IRLS params"""
# initial params for the timestamp correction
# the time it started + the inverse of sampling rate
self.sample_index = 0
self.reg_params = np.array([self.time_func(), 1. / MUSE_SAMPLING_RATE])
def _update_timestamp_correction(self, x, y):
"""Update regression for dejittering
use stochastic gradient descent
"""
pass
def _handle_eeg(self, handle, data):
"""Callback for receiving a sample.
samples are received in this order : 44, 41, 38, 32, 35
wait until we get 35 and call the data callback
"""
timestamp = self.time_func()
index = int((handle - 32) / 3)
tm, d = self._unpack_eeg_channel(data)
if self.last_tm == 0:
self.last_tm = tm - 1
self.data[index] = d
self.timestamps[index] = timestamp
# last data received
if handle == 35:
if tm != self.last_tm + 1:
print("missing sample %d : %d" % (tm, self.last_tm))
self.last_tm = tm
# calculate index of time samples
idxs = np.arange(0, 12) + self.sample_index
self.sample_index += 12
# timestamps are extrapolated backwards based on sampling rate and current time
timestamps = self.reg_params[1] * idxs + self.reg_params[0]
# push data
self.callback_eeg(self.data, timestamps)
# save last timestamp for disconnection timer
self.last_timestamp = timestamps[-1]
# reset sample
self._init_sample()
def _init_control(self):
"""Variable to store the current incoming message."""
self._current_msg = ""
def _subscribe_control(self):
self.device.subscribe(
MUSE_GATT_ATTR_STREAM_TOGGLE, callback=self._handle_control)
self._init_control()
def _handle_control(self, handle, packet):
"""Handle the incoming messages from the 0x000e handle.
Each message is 20 bytes
The first byte, call it n, is the length of the incoming string.
The rest of the bytes are in ASCII, and only n chars are useful
Multiple messages together are a json object (or dictionary in python)
If a message has a '}' then the whole dict is finished.
Example:
{'key': 'value',
'key2': 'really-long
-value',
'key3': 'value3'}
each line is a message, the 4 messages are a json object.
"""
if handle != 14:
return
# Decode data
bit_decoder = bitstring.Bits(bytes=packet)
pattern = "uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8, \
uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8,uint:8"
chars = bit_decoder.unpack(pattern)
# Length of the string
n_incoming = chars[0]
# Parse as chars, only useful bytes
incoming_message = "".join(map(chr, chars[1:]))[:n_incoming]
# Add to current message
self._current_msg += incoming_message
if incoming_message[-1] == '}': # Message ended completely
self.callback_control(self._current_msg)
self._init_control()
def _subscribe_telemetry(self):
self.device.subscribe(MUSE_GATT_ATTR_TELEMETRY,
callback=self._handle_telemetry)
def _handle_telemetry(self, handle, packet):
"""Handle the telemetry (battery, temperature and stuff) incoming data
"""
if handle != 26: # handle 0x1a
return
timestamp = self.time_func()
bit_decoder = bitstring.Bits(bytes=packet)
pattern = "uint:16,uint:16,uint:16,uint:16,uint:16" # The rest is 0 padding
data = bit_decoder.unpack(pattern)
battery = data[1] / 512
fuel_gauge = data[2] * 2.2
adc_volt = data[3]
temperature = data[4]
self.callback_telemetry(
timestamp, battery, fuel_gauge, adc_volt, temperature)
def _unpack_imu_channel(self, packet, scale=1):
"""Decode data packet of the accelerometer and gyro (imu) channels.
Each packet is encoded with a 16bit timestamp followed by 9 samples
with a 16 bit resolution.
"""
bit_decoder = bitstring.Bits(bytes=packet)
pattern = "uint:16,int:16,int:16,int:16,int:16, \
int:16,int:16,int:16,int:16,int:16"
data = bit_decoder.unpack(pattern)
packet_index = data[0]
samples = [[
scale * data[index], # x
scale * data[index + 1], # y
scale * data[index + 2] # z
] for index in [1, 4, 7]]
# samples is a list with 3 samples
# each sample is a list with [x, y, z]
return packet_index, samples
def _subscribe_acc(self):
self.device.subscribe(MUSE_GATT_ATTR_ACCELEROMETER,
callback=self._handle_acc)
def _handle_acc(self, handle, packet):
"""Handle incoming accelerometer data.
sampling rate: ~17 x second (3 samples in each message, roughly 50Hz)"""
if handle != 23: # handle 0x17
return
timestamp = self.time_func()
packet_index, samples = self._unpack_imu_channel(
packet, scale=MUSE_ACCELEROMETER_SCALE_FACTOR)
self.callback_acc(timestamp, samples)
def _subscribe_gyro(self):
self.device.subscribe(MUSE_GATT_ATTR_GYRO,
callback=self._handle_gyro)
def _handle_gyro(self, handle, packet):
"""Handle incoming gyroscope data.
sampling rate: ~17 x second (3 samples in each message, roughly 50Hz)"""
if handle != 20: # handle 0x14
return
timestamp = self.time_func()
packet_index, samples = self._unpack_imu_channel(
packet, scale=MUSE_GYRO_SCALE_FACTOR)
self.callback_gyro(timestamp, samples)
|
from django.apps import AppConfig
class LojaConfig(AppConfig):
name = 'loja'
|
//import './FriendListApp.scss';
import React, { Component, PropTypes } from 'react';
import { connect } from 'react-redux';
import { bindActionCreators } from 'redux';
import * as Actions from '../actions/index';
import { Panel, Congratulation, LevelsList } from '../components';
import Levels from '../constants/Levels';
class App extends Component {
static propTypes = {
currentBlockId: PropTypes.number,
actions: PropTypes.object.isRequired,
panelItems: PropTypes.array.isRequired,
isFinish: PropTypes.bool.isRequired,
levelId: PropTypes.number.isRequired
};
render () {
const { actions, currentBlockId, panelItems, isFinish, levelId } = this.props;
return (
<div className="panel-wrap">
<h1>level #{levelId+1}</h1>
<Panel key={levelId} currentBlockId={currentBlockId} items={panelItems} actions={actions}/>
<Congratulation show={isFinish} isLast={levelId === Levels.length - 1} onNext={actions.nextLevel}/>
<LevelsList activeLevel={levelId} onChangeLevel={actions.changeLevel}/>
</div>
);
}
}
function mapStateToProps(state) {
return {
currentBlockId: state.currentBlockId,
panelItems: state.panelItems,
isFinish: state.isFinish,
levelId: state.levelId,
};
}
function mapDispatchToProps(dispatch) {
return {
actions: bindActionCreators(Actions, dispatch)
};
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(App);
|
#pragma once
#include "utils/widgets.h"
#ifdef __APPLE__
#include <SDL2_ttf/SDL_ttf.h>
#else
#if defined(__ANDROID__) || defined(_WIN32) || defined(APPIMAGE)
#include <SDL_ttf.h>
#else
#include <SDL2/SDL_ttf.h>
#endif
#endif
// for drawing
class Shader {
public:
static constexpr GLuint vpos = 0, normal = 1, uvloc = 2;
protected:
GLuint program;
public:
Shader(const string& srcVert, const string& srcFrag);
#ifndef OPENGLES
Shader(const string& srcVert, const string& srcGeom, const string& srcFrag);
#endif
~Shader();
operator GLuint() const;
private:
static GLuint loadShader(const string& source, GLenum type);
template <class C, class I> static void checkStatus(GLuint id, GLenum stat, C check, I info);
};
inline Shader::~Shader() {
glDeleteProgram(program);
}
inline Shader::operator GLuint() const {
return program;
}
class ShaderGeometry : public Shader {
public:
GLint pview, model, normat;
GLint viewPos, farPlane, texsamp, depthMap;
GLint materialDiffuse, materialSpecular, materialShininess;
GLint lightPos, lightAmbient, lightDiffuse, lightLinear, lightQuadratic;
ShaderGeometry(const string& srcVert, const string& srcFrag, const Settings* sets);
private:
static string editShadowAlg(string src, bool calc, bool soft);
};
#ifndef OPENGLES
class ShaderDepth : public Shader {
public:
GLint model;
GLint shadowMats;
GLint lightPos, farPlane;
ShaderDepth(const string& srcVert, const string& srcGeom, const string& srcFrag);
};
#endif
class ShaderGui : public Shader {
public:
GLint pview, rect, uvrc, zloc;
GLint color, texsamp;
Quad wrect;
ShaderGui(const string& srcVert, const string& srcFrag);
};
// loads different font sizes from one font and handles basic log display
class FontSet {
private:
static constexpr char fontTestString[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`~!@#$%^&*()_+-=[]{}'\\\"|;:,.<>/?";
static constexpr int fontTestHeight = 100;
static constexpr float fallbackScale = 0.9f;
#ifdef OPENGLES
static constexpr SDL_Color textColor = { 37, 193, 255, 255 }; // R and B need to be switched
#else
static constexpr SDL_Color textColor = { 255, 193, 37, 255 };
#endif
umap<int, TTF_Font*> fonts;
vector<uint8> fontData;
float heightScale; // for scaling down font size to fit requested height
public:
~FontSet();
string init(string name); // returns name or a chosen fallback name
void clear();
int length(const char* text, int height);
bool hasGlyph(uint16 ch);
Texture render(const char* text, int height);
Texture render(const char* text, int height, uint length);
private:
TTF_Font* load(const string& name);
TTF_Font* findFile(const string& name, const vector<string>& available);
TTF_Font* getFont(int height);
};
inline FontSet::~FontSet() {
clear();
}
inline Texture FontSet::render(const char* text, int height) {
return TTF_RenderUTF8_Blended(getFont(height), text, textColor);
}
inline Texture FontSet::render(const char* text, int height, uint length) {
return TTF_RenderUTF8_Blended_Wrapped(getFont(height), text, textColor, length);
}
// handles window events and contains video settings
class WindowSys {
public:
static constexpr char title[] = "Thrones";
private:
static constexpr char fileCursor[] = "cursor.png";
static constexpr char fileGeometryVert[] = "geometry.vert";
static constexpr char fileGeometryFrag[] = "geometry.frag";
static constexpr char fileDepthVert[] = "depth.vert";
static constexpr char fileDepthGeom[] = "depth.geom";
static constexpr char fileDepthFrag[] = "depth.frag";
static constexpr char fileGuiVert[] = "gui.vert";
static constexpr char fileGuiFrag[] = "gui.frag";
static constexpr uint32 eventCheckTimeout = 50;
static constexpr float ticksPerSec = 1000.f;
#ifdef OPENGLES
static constexpr int imgInitFlags = imgInitFull;
#else
static constexpr int imgInitFlags = IMG_INIT_PNG;
#endif
static constexpr float minimumRatio = 1.4f;
static constexpr array<ivec2, 26> resolutions = {
ivec2(640, 360),
ivec2(768, 432),
ivec2(800, 450),
ivec2(848, 480),
ivec2(854, 480),
ivec2(960, 540),
ivec2(1024, 576),
ivec2(1280, 720),
ivec2(1366, 768),
ivec2(1280, 800),
ivec2(1440, 900),
ivec2(1600, 900),
ivec2(1680, 1050),
ivec2(1920, 1080),
ivec2(2048, 1152),
ivec2(1920, 1200),
ivec2(2560, 1440),
ivec2(2560, 1600),
ivec2(2880, 1620),
ivec2(3200, 1800),
ivec2(3840, 2160),
ivec2(4096, 2304),
ivec2(3840, 2400),
ivec2(5120, 2880),
ivec2(7680, 4320),
ivec2(15360, 8640)
};
struct Loader {
static constexpr int logSize = 18;
enum class State : uint8 {
start,
audio,
objects,
textures,
program,
done
};
string logStr;
State state = State::start;
void addLine(const string& str, WindowSys* win);
};
AudioSys* audio;
InputSys* inputSys;
Program* program;
Scene* scene;
Settings* sets;
SDL_Window* window;
SDL_GLContext context;
ShaderGeometry* geom;
#ifndef OPENGLES
ShaderDepth* depth;
#endif
ShaderGui* gui;
FontSet* fonts;
ivec2 screenView, guiView;
uint32 oldTime;
float dSec; // delta seconds, aka the time between each iteration of the above mentioned loop
bool run; // whether the loop in which the program runs should continue
uint8 cursorHeight;
#ifdef EMSCRIPTEN
Loader> loader;
void (WindowSys::*loopFunc)();
#endif
public:
int start(const Arguments& args);
void close();
const ivec2& getScreenView() const;
const ivec2& getGuiView() const;
uint8 getCursorHeight() const;
vector<ivec2> windowSizes() const;
vector<SDL_DisplayMode> displayModes() const;
int displayID() const;
void setTextCapture(bool on);
void setScreen();
void setVsync(Settings::VSync vsync);
void setGamma(float gamma);
void resetSettings();
void reloadGeom();
AudioSys* getAudio();
FontSet* getFonts();
InputSys* getInput();
Program* getProgram();
Scene* getScene();
Settings* getSets();
const ShaderGeometry* getGeom() const;
#ifndef OPENGLES
const ShaderDepth* getDepth() const;
#endif
const ShaderGui* getGui() const;
SDL_Window* getWindow();
float getDeltaSec() const;
private:
void init(const Arguments& args);
void exec();
#ifdef EMSCRIPTEN
void load();
#else
void load(Loader* loader);
#endif
void createWindow();
void destroyWindow();
void handleEvent(const SDL_Event& event); // pass events to their specific handlers
void eventWindow(const SDL_WindowEvent& winEvent);
void setSwapInterval();
bool trySetSwapInterval();
void setWindowMode();
void updateView();
bool checkCurDisplay();
template <class T> static void checkResolution(T& val, const vector<T>& modes);
};
inline void WindowSys::close() {
run = false;
}
inline const ivec2& WindowSys::getScreenView() const {
return screenView;
}
inline const ivec2& WindowSys::getGuiView() const {
return guiView;
}
inline uint8 WindowSys::getCursorHeight() const {
return cursorHeight;
}
inline AudioSys* WindowSys::getAudio() {
return audio;
}
inline FontSet* WindowSys::getFonts() {
return fonts;
}
inline InputSys* WindowSys::getInput() {
return inputSys;
}
inline Program* WindowSys::getProgram() {
return program;
}
inline Scene* WindowSys::getScene() {
return scene;
}
inline Settings* WindowSys::getSets() {
return sets;
}
inline const ShaderGeometry* WindowSys::getGeom() const {
return geom;
}
#ifndef OPENGLES
inline const ShaderDepth* WindowSys::getDepth() const {
return depth;
}
#endif
inline const ShaderGui* WindowSys::getGui() const {
return gui;
}
inline SDL_Window* WindowSys::getWindow() {
return window;
}
inline float WindowSys::getDeltaSec() const {
return dSec;
}
inline int WindowSys::displayID() const {
return SDL_GetWindowDisplayIndex(window);
}
|
define({
root: ({
_widgetLabel: "Zoom Slider"
}),
"ar": 1,
"bs": 1,
"cs": 1,
"da": 1,
"de": 1,
"el": 1,
"es": 1,
"et": 1,
"fi": 1,
"fr": 1,
"he": 1,
"hi": 1,
"hr": 1,
"it": 1,
"id": 1,
"ja": 1,
"ko": 1,
"lt": 1,
"lv": 1,
"nb": 1,
"nl": 1,
"pl": 1,
"pt-br": 1,
"pt-pt": 1,
"ro": 1,
"ru": 1,
"sl": 1,
"sr": 1,
"sv": 1,
"th": 1,
"tr": 1,
"vi": 1,
"zh-cn": 1,
"zh-hk": 1,
"zh-tw": 1
}); |
import React from 'react';
import { ButtonGroup, Button, Glyphicon, Navbar, Nav, NavItem } from 'react-bootstrap';
import { Switch, Route, Link } from 'react-router-dom';
import * as routes from './routes.js';
import { Helmet } from 'react-helmet';
export default class index extends React.Component {
render() {
return (
<div>
<Helmet>
<title>Dr. Notes | Templates management</title>
</Helmet>
<Switch>
<Route exact={true} path="/template/create" component={routes.form} />
<Route exact={true} path="/template/:id/delete" component={routes.del} />
<Route exact={true} path="/template/:id/edit" component={routes.form} />
<Route exact={true} path="/template/:id" component={routes.details} />
<Route exact={true} path="/template" component={routes.index} />
</Switch>
</div>
);
}
}
|
import {
prepareFinalObject,
handleScreenConfigurationFieldChange as handleField
} from "egov-ui-framework/ui-redux/screen-configuration/actions";
import { updatePFOforSearchResults } from "../../../../ui-utils/commons";
import get from "lodash/get";
import set from "lodash/set";
import { footer } from "../bpastakeholder/applyResource/footer";
import { getQueryArg } from "egov-ui-framework/ui-utils/commons";
import {
header,
formwizardFirstStep,
formwizardSecondStep,
formwizardThirdStep,
formwizardFourthStep,
stepper,
getMdmsData
} from "../bpastakeholder/apply";
import { getLocale, getTenantId, setModule } from "egov-ui-kit/utils/localStorageUtils";
import { addressDestruct, setMobileNoField, setNameOfUser } from "../utils";
import { getModuleName } from "egov-ui-kit/utils/commons";
import { fetchLocalizationLabel } from "egov-ui-kit/redux/app/actions";
const getData = async (action, state, dispatch, tenantId) => {
await getMdmsData(action, state, dispatch);
};
const updateSearchResults = async (
action,
state,
dispatch,
queryValue,
tenantId
) => {
await getData(action, state, dispatch, tenantId);
let aa = await updatePFOforSearchResults(
action,
state,
dispatch,
queryValue,
"",
tenantId
);
addressDestruct(action, state, dispatch);
const subOwnerShipCategory = get(
state.screenConfiguration.preparedFinalObject,
"Licenses[0].tradeLicenseDetail.subOwnerShipCategory"
);
// setOrganizationVisibility(action, state, dispatch, subOwnerShipCategory);
const queryValueFromUrl = getQueryArg(
window.location.href,
"applicationNumber"
);
if (!queryValueFromUrl) {
dispatch(
prepareFinalObject(
"Licenses[0].oldLicenseNumber",
get(
state.screenConfiguration.preparedFinalObject,
"Licenses[0].applicationNumber",
""
)
)
);
dispatch(prepareFinalObject("Licenses[0].applicationNumber", ""));
dispatch(
handleField(
"apply",
"components.div.children.headerDiv.children.header.children.applicationNumber",
"visible",
false
)
);
}
};
const screenConfig = {
uiFramework: "material-ui",
name: "apply",
beforeInitScreen: (action, state, dispatch) => {
if (window.location.pathname.includes("openlink")) {
set(state, "screenConfiguration.moduleName", "BPAREG");
set(action.screenConfig, "components.div.children.footer.props.style", {
width: "100vw"
});
setModule(getModuleName());
const tenantId = getTenantId();
const locale = getLocale() || "en_IN";
dispatch(fetchLocalizationLabel(locale, tenantId, tenantId));
}
const queryValue = getQueryArg(window.location.href, "applicationNumber");
const tenantId = getQueryArg(window.location.href, "tenantId");
const applicationNo = queryValue;
dispatch(prepareFinalObject("Licenses[0]", {}));
dispatch(prepareFinalObject("LicensesTemp[0]", {}));
if (applicationNo) {
updateSearchResults(action, state, dispatch, applicationNo, tenantId);
} else {
getData(action, state, dispatch, tenantId);
// setOrganizationVisibility(action, state, dispatch, "INDIVIDUAL");
dispatch(
prepareFinalObject(
"Licenses[0].tradeLicenseDetail.owners[0].gender",
"MALE"
)
);
dispatch(
prepareFinalObject(
"Licenses[0].tradeLicenseDetail.subOwnerShipCategory",
"INDIVIDUAL"
)
);
if (!window.location.pathname.includes("openlink")) {
setMobileNoField(action, state, dispatch);
setNameOfUser(action, state, dispatch);
}
}
return action;
},
components: {
div: {
uiFramework: "custom-atoms",
componentPath: "Div",
props: {
className: "common-div-css"
},
children: {
headerDiv: {
uiFramework: "custom-atoms",
componentPath: "Container",
children: {
header: {
gridDefination: {
xs: 12,
sm: 10
},
...header
}
}
},
stepper,
formwizardFirstStep,
formwizardSecondStep,
formwizardThirdStep,
formwizardFourthStep,
footer
}
},
breakUpDialog: {
uiFramework: "custom-containers-local",
moduleName: "egov-tradelicence",
componentPath: "ViewBreakupContainer",
props: {
open: false,
maxWidth: "md",
screenKey: "apply"
}
}
}
};
export default screenConfig;
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import numpy as np
import pandas as pd
from .. import opcodes as OperandDef
from ..operands import OperandStage, OutputType
from ..serialize import ValueType, AnyField, BoolField, Int32Field, KeyField, ListField
from ..utils import get_shuffle_input_keys_idxes
from .core import SERIES_CHUNK_TYPE
from .utils import hash_dtypes, filter_dtypes
from .operands import DataFrameMapReduceOperand, DataFrameOperandMixin, \
DataFrameShuffleProxy
from .utils import parse_index, split_monotonic_index_min_max, \
build_split_idx_to_origin_idx, filter_index_value, hash_index
class DataFrameIndexAlign(DataFrameMapReduceOperand, DataFrameOperandMixin):
_op_type_ = OperandDef.DATAFRAME_INDEX_ALIGN
_index_min = AnyField('index_min')
_index_min_close = BoolField('index_min_close')
_index_max = AnyField('index_max')
_index_max_close = BoolField('index_max_close')
_index_shuffle_size = Int32Field('index_shuffle_size')
_column_min = AnyField('column_min')
_column_min_close = BoolField('column_min_close')
_column_max = AnyField('column_max')
_column_max_close = BoolField('column_max_close')
_column_shuffle_size = Int32Field('column_shuffle_size')
_column_shuffle_segments = ListField('column_shuffle_segments', ValueType.series)
_input = KeyField('input')
def __init__(self, index_min_max=None, index_shuffle_size=None, column_min_max=None,
column_shuffle_size=None, column_shuffle_segments=None,
sparse=None, dtype=None, dtypes=None, gpu=None, stage=None, shuffle_key=None,
output_types=None, **kw):
if index_min_max is not None:
kw.update(dict(_index_min=index_min_max[0], _index_min_close=index_min_max[1],
_index_max=index_min_max[2], _index_max_close=index_min_max[3]))
if column_min_max is not None:
kw.update(dict(_column_min=column_min_max[0], _column_min_close=column_min_max[1],
_column_max=column_min_max[2], _column_max_close=column_min_max[3]))
super().__init__(
_index_shuffle_size=index_shuffle_size, _column_shuffle_size=column_shuffle_size,
_column_shuffle_segments=column_shuffle_segments, _sparse=sparse,
_dtype=dtype, _dtypes=dtypes, _gpu=gpu, _stage=stage, _shuffle_key=shuffle_key,
_output_types=output_types, **kw)
@property
def index_min(self):
return self._index_min
@property
def index_min_close(self):
return self._index_min_close
@property
def index_max(self):
return self._index_max
@property
def index_max_close(self):
return self._index_max_close
@property
def index_min_max(self):
if getattr(self, '_index_min', None) is None:
return None
return self._index_min, self._index_min_close, \
self._index_max, self._index_max_close
@property
def index_shuffle_size(self):
return self._index_shuffle_size
@property
def column_min(self):
return self._column_min
@property
def column_min_close(self):
return self._column_min_close
@property
def column_max(self):
return self._column_max
@property
def column_max_close(self):
return self._column_max_close
@property
def column_min_max(self):
if getattr(self, '_column_min', None) is None:
return None
return self._column_min, self._column_min_close, \
self._column_max, self._column_max_close
@property
def column_shuffle_size(self):
return self._column_shuffle_size
@property
def column_shuffle_segments(self):
return self._column_shuffle_segments
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._input = self._inputs[0]
def _build_map_chunk_kw(self, **kw):
inputs = self.inputs
if kw.get('index_value', None) is None and inputs[0].index_value is not None:
input_index_value = inputs[0].index_value
index_min_max = self.index_min_max
if index_min_max is not None:
kw['index_value'] = filter_index_value(input_index_value, index_min_max)
else:
kw['index_value'] = parse_index(inputs[0].index_value.to_pandas(),
input_index_value, type(self).__name__)
if kw.get('columns_value', None) is None and getattr(inputs[0], 'columns_value', None) is not None:
input_columns_value = inputs[0].columns_value
input_dtypes = inputs[0].dtypes
column_min_max = self.column_min_max
if column_min_max is not None:
kw['columns_value'] = filter_index_value(input_columns_value, column_min_max,
store_data=True)
else:
kw['columns_value'] = parse_index(inputs[0].columns_value.to_pandas(), input_columns_value,
type(self).__name__)
kw['dtypes'] = input_dtypes[kw['columns_value'].to_pandas()]
column_shuffle_size = self.column_shuffle_size
if column_shuffle_size is not None:
self._column_shuffle_segments = hash_dtypes(input_dtypes, column_shuffle_size)
if kw.get('dtype', None) and getattr(inputs[0], 'dtype', None) is not None:
kw['dtype'] = inputs[0].dtype
if kw.get('name', None) and getattr(inputs[0], 'name', None) is not None:
kw['name'] = inputs[0].dtype
return kw
def _build_reduce_chunk_kw(self, index, **kw):
inputs = self.inputs
if kw.get('index_value', None) is None and inputs[0].inputs[0].index_value is not None:
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.index_min_max is not None:
# shuffle on columns, all the DataFrameIndexAlignMap has the same index
kw['index_value'] = filter_index_value(index_align_map_chunks[0].index_value,
index_align_map_chunks[0].op.index_min_max)
else:
# shuffle on index
kw['index_value'] = parse_index(index_align_map_chunks[0].index_value.to_pandas(),
[c.key for c in index_align_map_chunks], type(self).__name__)
if kw.get('columns_value', None) is None and getattr(inputs[0].inputs[0], 'columns_value', None) is not None:
index_align_map_chunks = inputs[0].inputs
if index_align_map_chunks[0].op.column_min_max is not None:
# shuffle on index
kw['columns_value'] = filter_index_value(index_align_map_chunks[0].columns_value,
index_align_map_chunks[0].op.column_min_max,
store_data=True)
kw['dtypes'] = index_align_map_chunks[0].dtypes[kw['columns_value'].to_pandas()]
else:
# shuffle on columns
all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks
if c.index[0] == index_align_map_chunks[0].index[0]]
kw['dtypes'] = pd.concat(all_dtypes)
kw['columns_value'] = parse_index(kw['dtypes'].index, store_data=True)
if kw.get('dtype', None) and getattr(inputs[0].inputs[0], 'dtype', None) is not None:
kw['dtype'] = inputs[0].inputs[0].dtype
if kw.get('name', None) and getattr(inputs[0].inputs[0], 'name', None) is not None:
kw['name'] = inputs[0].inputs[0].dtype
return kw
def _create_chunk(self, output_idx, index, **kw):
if self.stage == OperandStage.map:
kw = self._build_map_chunk_kw(**kw)
else:
kw = self._build_reduce_chunk_kw(index, **kw)
return super()._create_chunk(output_idx, index, **kw)
@classmethod
def execute_map(cls, ctx, op):
# TODO(QIN): add GPU support here
df = ctx[op.inputs[0].key]
filters = [[], []]
chunk = op.outputs[0]
if op.index_shuffle_size == -1:
# no shuffle and no min-max filter on index
filters[0].append(slice(None, None, None))
elif op.index_shuffle_size is None:
# no shuffle on index
comp_op = operator.ge if op.index_min_close else operator.gt
index_cond = comp_op(df.index, op.index_min)
comp_op = operator.le if op.index_max_close else operator.lt
index_cond = index_cond & comp_op(df.index, op.index_max)
filters[0].append(index_cond)
else:
# shuffle on index
shuffle_size = op.index_shuffle_size
filters[0].extend(hash_index(df.index, shuffle_size))
if chunk.ndim == 1:
if len(filters[0]) == 1:
# no shuffle
ctx[chunk.key] = df.loc[filters[0][0]]
else:
for index_idx, index_filter in enumerate(filters[0]):
group_key = str(index_idx)
ctx[(chunk.key, group_key)] = df.loc[index_filter]
return
if op.column_shuffle_size == -1:
# no shuffle and no min-max filter on columns
filters[1].append(slice(None, None, None))
if op.column_shuffle_size is None:
# no shuffle on columns
comp_op = operator.ge if op.column_min_close else operator.gt
columns_cond = comp_op(df.columns, op.column_min)
comp_op = operator.le if op.column_max_close else operator.lt
columns_cond = columns_cond & comp_op(df.columns, op.column_max)
filters[1].append(columns_cond)
else:
# shuffle on columns
shuffle_size = op.column_shuffle_size
filters[1].extend(hash_index(df.columns, shuffle_size))
if all(len(it) == 1 for it in filters):
# no shuffle
ctx[chunk.key] = df.loc[filters[0][0], filters[1][0]]
elif len(filters[0]) == 1:
# shuffle on columns
for column_idx, column_filter in enumerate(filters[1]):
group_key = ','.join([str(chunk.index[0]), str(column_idx)])
ctx[(chunk.key, group_key)] = df.loc[filters[0][0], column_filter]
elif len(filters[1]) == 1:
# shuffle on index
for index_idx, index_filter in enumerate(filters[0]):
group_key = ','.join([str(index_idx), str(chunk.index[1])])
ctx[(chunk.key, group_key)] = df.loc[index_filter, filters[1][0]]
else:
# full shuffle
shuffle_index_size = op.index_shuffle_size
shuffle_column_size = op.column_shuffle_size
out_idxes = itertools.product(range(shuffle_index_size), range(shuffle_column_size))
out_index_columns = itertools.product(*filters)
for out_idx, out_index_column in zip(out_idxes, out_index_columns):
index_filter, column_filter = out_index_column
group_key = ','.join(str(i) for i in out_idx)
ctx[(chunk.key, group_key)] = df.loc[index_filter, column_filter]
@classmethod
def execute_reduce(cls, ctx, op):
chunk = op.outputs[0]
input_keys, input_idxes = get_shuffle_input_keys_idxes(op.inputs[0])
input_idx_to_df = {idx: ctx[inp_key, ','.join(str(ix) for ix in chunk.index)]
for inp_key, idx in zip(input_keys, input_idxes)}
row_idxes = sorted({idx[0] for idx in input_idx_to_df})
if chunk.ndim == 2:
col_idxes = sorted({idx[1] for idx in input_idx_to_df})
ress = []
for row_idx in row_idxes:
if chunk.ndim == 2:
row_dfs = []
for col_idx in col_idxes:
row_dfs.append(input_idx_to_df[row_idx, col_idx])
row_df = pd.concat(row_dfs, axis=1)
else:
row_df = input_idx_to_df[(row_idx,)]
ress.append(row_df)
ctx[chunk.key] = pd.concat(ress, axis=0)
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
cls.execute_map(ctx, op)
else:
cls.execute_reduce(ctx, op)
class _AxisMinMaxSplitInfo(object):
def __init__(self, left_split, left_increase, right_split, right_increase, dummy=False):
self._left_split = left_split
self._right_split = right_split
self._dummy = dummy
self._left_split_idx_to_origin_idx = \
build_split_idx_to_origin_idx(self._left_split, left_increase)
self._right_split_idx_to_origin_idx = \
build_split_idx_to_origin_idx(self._right_split, right_increase)
def isdummy(self):
return self._dummy
def get_origin_left_idx(self, idx):
return self._left_split_idx_to_origin_idx[idx][0]
def get_origin_left_split(self, idx):
left_idx, left_inner_idx = \
self._left_split_idx_to_origin_idx[idx]
return self._left_split[left_idx][left_inner_idx]
def get_origin_right_idx(self, idx):
return self._right_split_idx_to_origin_idx[idx][0]
def get_origin_right_split(self, idx):
right_idx, right_inner_idx = \
self._right_split_idx_to_origin_idx[idx]
return self._right_split[right_idx][right_inner_idx]
class _MinMaxSplitInfo(object):
def __init__(self, row_min_max_split_info=None, col_min_max_split_info=None):
self.row_min_max_split_info = row_min_max_split_info
self.col_min_max_split_info = col_min_max_split_info
def all_axes_can_split(self):
return self.row_min_max_split_info is not None and \
self.col_min_max_split_info is not None
def one_axis_can_split(self):
return (self.row_min_max_split_info is None) ^ \
(self.col_min_max_split_info is None)
def no_axis_can_split(self):
return self.row_min_max_split_info is None and \
self.col_min_max_split_info is None
def __getitem__(self, i):
return [self.row_min_max_split_info, self.col_min_max_split_info][i]
def __setitem__(self, axis, axis_min_max_split_info):
assert axis in {0, 1}
if axis == 0:
self.row_min_max_split_info = axis_min_max_split_info
else:
self.col_min_max_split_info = axis_min_max_split_info
def get_row_left_idx(self, out_idx):
return self.row_min_max_split_info.get_origin_left_idx(out_idx)
def get_row_left_split(self, out_idx):
return self.row_min_max_split_info.get_origin_left_split(out_idx)
def get_col_left_idx(self, out_idx):
return self.col_min_max_split_info.get_origin_left_idx(out_idx)
def get_col_left_split(self, out_idx):
return self.col_min_max_split_info.get_origin_left_split(out_idx)
def get_row_right_idx(self, out_idx):
return self.row_min_max_split_info.get_origin_right_idx(out_idx)
def get_row_right_split(self, out_idx):
return self.row_min_max_split_info.get_origin_right_split(out_idx)
def get_col_right_idx(self, out_idx):
return self.col_min_max_split_info.get_origin_right_idx(out_idx)
def get_col_right_split(self, out_idx):
return self.col_min_max_split_info.get_origin_right_split(out_idx)
def get_axis_idx(self, axis, left_or_right, out_idx):
if axis == 0:
if left_or_right == 0:
return self.get_row_left_idx(out_idx)
else:
assert left_or_right == 1
return self.get_row_right_idx(out_idx)
else:
assert axis == 1
if left_or_right == 0:
return self.get_col_left_idx(out_idx)
else:
assert left_or_right == 1
return self.get_col_right_idx(out_idx)
def get_axis_split(self, axis, left_or_right, out_idx):
if axis == 0:
if left_or_right == 0:
return self.get_row_left_split(out_idx)
else:
assert left_or_right == 1
return self.get_row_right_split(out_idx)
else:
assert axis == 1
if left_or_right == 0:
return self.get_col_left_split(out_idx)
else:
assert left_or_right == 1
return self.get_col_right_split(out_idx)
def _get_chunk_index_min_max(index_chunks):
chunk_index_min_max = []
for chunk in index_chunks:
min_val = chunk.min_val
min_val_close = chunk.min_val_close
max_val = chunk.max_val
max_val_close = chunk.max_val_close
if min_val is None or max_val is None:
chunk_index_min_max.append((None, True, None, True))
else:
chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))
return chunk_index_min_max
def _get_monotonic_chunk_index_min_max(index, index_chunks):
chunk_index_min_max = _get_chunk_index_min_max(index_chunks)
if index.is_monotonic_decreasing:
return list(reversed(chunk_index_min_max)), False
for j in range(len(chunk_index_min_max) - 1):
# overlap only if the prev max is close and curr min is close
# and they are identical
prev_max, prev_max_close = chunk_index_min_max[j][2:]
curr_min, curr_min_close = chunk_index_min_max[j + 1][:2]
if prev_max_close and curr_min_close and prev_max == curr_min:
return
return chunk_index_min_max, True
def _need_align_map(input_chunk, index_min_max, column_min_max,
dummy_index_splits=False, dummy_column_splits=False):
if isinstance(input_chunk, SERIES_CHUNK_TYPE):
if input_chunk.index_value is None:
return True
if input_chunk.index_value.min_max != index_min_max:
return True
else:
if not dummy_index_splits:
if input_chunk.index_value is None or input_chunk.index_value.min_max != index_min_max:
return True
if not dummy_column_splits:
if input_chunk.columns_value is None or input_chunk.columns_value.min_max != column_min_max:
return True
return False
def _is_index_identical(left, right):
if len(left) != len(right):
return False
for left_item, right_item in zip(left, right):
if left_item.key != right_item.key:
return False
return True
def _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
if _is_index_identical(left_axis_chunks, right_axis_chunks):
return False
if not left_axis.is_monotonic_increasing_or_decreasing and len(left_axis_chunks) > 1:
return True
if not right_axis.is_monotonic_increasing_or_decreasing and len(right_axis_chunks) > 1:
return True
return False
def _calc_axis_splits(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
if _axis_need_shuffle(left_axis, right_axis, left_axis_chunks, right_axis_chunks):
# do shuffle
out_chunk_size = max(len(left_axis_chunks), len(right_axis_chunks))
return None, [np.nan for _ in range(out_chunk_size)]
else:
# no need to do shuffle on this axis
if _is_index_identical(left_axis_chunks, right_axis_chunks):
left_chunk_index_min_max = _get_chunk_index_min_max(left_axis_chunks)
right_splits = left_splits = [[c] for c in left_chunk_index_min_max]
right_increase = left_increase = None
elif len(left_axis_chunks) == 1 and len(right_axis_chunks) == 1:
left_splits = [_get_chunk_index_min_max(left_axis_chunks)]
left_increase = left_axis_chunks[0].is_monotonic_decreasing
right_splits = [_get_chunk_index_min_max(right_axis_chunks)]
right_increase = right_axis_chunks[0].is_monotonic_decreasing
else:
left_chunk_index_min_max, left_increase = _get_monotonic_chunk_index_min_max(left_axis,
left_axis_chunks)
right_chunk_index_min_max, right_increase = _get_monotonic_chunk_index_min_max(right_axis,
right_axis_chunks)
left_splits, right_splits = split_monotonic_index_min_max(
left_chunk_index_min_max, left_increase, right_chunk_index_min_max, right_increase)
splits = _AxisMinMaxSplitInfo(left_splits, left_increase, right_splits, right_increase)
nsplits = [np.nan for _ in itertools.chain(*left_splits)]
return splits, nsplits
def _build_dummy_axis_split(chunk_shape):
axis_index_min_max, axis_increase = [(i, True, i + 1, True) for i in range(chunk_shape)], True
if len(axis_index_min_max) == 1:
left_splits, right_splits = [axis_index_min_max], [axis_index_min_max]
else:
left_splits, right_splits = split_monotonic_index_min_max(
axis_index_min_max, axis_increase, axis_index_min_max, axis_increase)
return _AxisMinMaxSplitInfo(left_splits, axis_increase,
right_splits, axis_increase, dummy=True)
def _gen_series_chunks(splits, out_shape, left_or_right, series):
out_chunks = []
if splits[0] is not None:
# need no shuffle
for out_idx in range(out_shape[0]):
idx = splits.get_axis_idx(0, left_or_right, out_idx)
index_min_max = splits.get_axis_split(0, left_or_right, out_idx)
chunk = series.cix[(idx,)]
if _need_align_map(chunk, index_min_max, None):
align_op = DataFrameIndexAlign(
stage=OperandStage.map, index_min_max=index_min_max, column_min_max=None,
dtype=chunk.dtype, sparse=series.issparse(), output_types=[OutputType.series])
out_chunk = align_op.new_chunk([chunk], shape=(np.nan,), index=(out_idx,))
else:
out_chunk = chunk
out_chunks.append(out_chunk)
else:
# gen map chunks
map_chunks = []
for chunk in series.chunks:
map_op = DataFrameIndexAlign(
stage=OperandStage.map, sparse=chunk.issparse(), index_shuffle_size=out_shape[0],
output_types=[OutputType.series])
map_chunks.append(map_op.new_chunk([chunk], shape=(np.nan,), index=chunk.index))
proxy_chunk = DataFrameShuffleProxy(output_types=[OutputType.series]).new_chunk(
map_chunks, shape=())
# gen reduce chunks
for out_idx in range(out_shape[0]):
reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=out_idx,
sparse=proxy_chunk.issparse(), shuffle_key=str(out_idx),
output_types=[OutputType.series])
out_chunks.append(
reduce_op.new_chunk([proxy_chunk], shape=(np.nan,), index=(out_idx,)))
return out_chunks
def _gen_dataframe_chunks(splits, out_shape, left_or_right, df):
out_chunks = []
if splits.all_axes_can_split():
# no shuffle for all axes
kw = {
'index_shuffle_size': -1 if splits[0].isdummy() else None,
'column_shuffle_size': -1 if splits[1].isdummy() else None,
}
for out_idx in itertools.product(*(range(s) for s in out_shape)):
row_idx = splits.get_axis_idx(0, left_or_right, out_idx[0])
col_idx = splits.get_axis_idx(1, left_or_right, out_idx[1])
index_min_max = splits.get_axis_split(0, left_or_right, out_idx[0])
column_min_max = splits.get_axis_split(1, left_or_right, out_idx[1])
chunk = df.cix[row_idx, col_idx]
if _need_align_map(chunk, index_min_max, column_min_max,
splits[0].isdummy(), splits[1].isdummy()):
if splits[1].isdummy():
dtypes = chunk.dtypes
else:
dtypes = filter_dtypes(chunk.dtypes, column_min_max)
chunk_kw = {
'index_value': chunk.index_value if splits[0].isdummy() else None,
'columns_value': chunk.columns_value if splits[1].isdummy() else None,
'dtypes': chunk.dtypes if splits[1].isdummy() else None
}
align_op = DataFrameIndexAlign(
stage=OperandStage.map, index_min_max=index_min_max,
column_min_max=column_min_max, dtypes=dtypes, sparse=chunk.issparse(),
output_types=[OutputType.dataframe], **kw)
out_chunk = align_op.new_chunk([chunk], shape=(np.nan, np.nan), index=out_idx, **chunk_kw)
else:
out_chunk = chunk
out_chunks.append(out_chunk)
elif splits.one_axis_can_split():
# one axis needs shuffle
shuffle_axis = 0 if splits[0] is None else 1
align_axis = 1 - shuffle_axis
for align_axis_idx in range(out_shape[align_axis]):
if align_axis == 0:
kw = {
'index_min_max': splits.get_axis_split(align_axis, left_or_right, align_axis_idx),
'index_shuffle_size': -1 if splits[0].isdummy() else None,
'column_shuffle_size': out_shape[shuffle_axis],
}
input_idx = splits.get_axis_idx(align_axis, left_or_right, align_axis_idx)
else:
kw = {
'column_min_max': splits.get_axis_split(align_axis, left_or_right, align_axis_idx),
'index_shuffle_size': out_shape[shuffle_axis],
'column_shuffle_size': -1 if splits[1].isdummy() else None,
}
input_idx = splits.get_axis_idx(align_axis, left_or_right, align_axis_idx)
input_chunks = [c for c in df.chunks if c.index[align_axis] == input_idx]
map_chunks = []
for j, input_chunk in enumerate(input_chunks):
chunk_kw = dict()
if align_axis == 0:
chunk_kw['index_value'] = input_chunk.index_value if splits[0].isdummy() else None
else:
chunk_kw['columns_value'] = input_chunk.columns_value if splits[1].isdummy() else None
map_op = DataFrameIndexAlign(stage=OperandStage.map, sparse=input_chunk.issparse(),
output_types=[OutputType.dataframe], **kw)
idx = [None, None]
idx[align_axis] = align_axis_idx
idx[shuffle_axis] = j
map_chunks.append(map_op.new_chunk([input_chunk], shape=(np.nan, np.nan), index=tuple(idx), **chunk_kw))
proxy_chunk = DataFrameShuffleProxy(
sparse=df.issparse(), output_types=[OutputType.dataframe]).new_chunk(map_chunks, shape=())
for j in range(out_shape[shuffle_axis]):
chunk_kw = dict()
if align_axis == 0:
chunk_kw['index_value'] = proxy_chunk.inputs[0].inputs[0].index_value \
if splits[0].isdummy() else None
else:
chunk_kw['columns_value'] = proxy_chunk.inputs[0].inputs[0].columns_value \
if splits[1].isdummy() else None
reduce_idx = (align_axis_idx, j) if align_axis == 0 else (j, align_axis_idx)
reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=j, sparse=proxy_chunk.issparse(),
shuffle_key=','.join(str(idx) for idx in reduce_idx),
output_types=[OutputType.dataframe])
out_chunks.append(
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=reduce_idx, **chunk_kw))
out_chunks.sort(key=lambda c: c.index)
else:
# all axes need shuffle
assert splits.no_axis_can_split()
# gen map chunks
map_chunks = []
for chunk in df.chunks:
map_op = DataFrameIndexAlign(
stage=OperandStage.map, sparse=chunk.issparse(), index_shuffle_size=out_shape[0],
column_shuffle_size=out_shape[1], output_types=[OutputType.dataframe])
map_chunks.append(map_op.new_chunk([chunk], shape=(np.nan, np.nan), index=chunk.index))
proxy_chunk = DataFrameShuffleProxy(output_types=[OutputType.dataframe]).new_chunk(
map_chunks, shape=())
# gen reduce chunks
for out_idx in itertools.product(*(range(s) for s in out_shape)):
reduce_op = DataFrameIndexAlign(stage=OperandStage.reduce, i=out_idx,
sparse=proxy_chunk.issparse(),
shuffle_key=','.join(str(idx) for idx in out_idx),
output_types=[OutputType.dataframe])
out_chunks.append(
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))
return out_chunks
def align_dataframe_dataframe(left, right):
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
left_columns_chunks = [c.columns_value for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.cix[:, 0]]
right_columns_chunks = [c.columns_value for c in right.cix[0, :]]
index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,
left_index_chunks, right_index_chunks)
if _is_index_identical(left_index_chunks, right_index_chunks):
index_nsplits = left.nsplits[0]
columns_splits, columns_nsplits = _calc_axis_splits(left.columns_value, right.columns_value,
left_columns_chunks, right_columns_chunks)
if _is_index_identical(left_columns_chunks, right_columns_chunks):
columns_nsplits = left.nsplits[1]
nsplits = [index_nsplits, columns_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
splits = _MinMaxSplitInfo(index_splits, columns_splits)
left_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 0, left)
right_chunks = _gen_dataframe_chunks(splits, out_chunk_shape, 1, right)
return nsplits, out_chunk_shape, left_chunks, right_chunks
def align_dataframe_series(left, right, axis='columns'):
if axis == 'columns' or axis == 1:
left_columns_chunks = [c.columns_value for c in left.cix[0, :]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(left.columns_value, right.index_value,
left_columns_chunks, right_index_chunks)
if _is_index_identical(left_columns_chunks, right_index_chunks):
index_nsplits = left.nsplits[1]
dummy_splits, dummy_nsplits = _build_dummy_axis_split(left.chunk_shape[0]), left.nsplits[0]
nsplits = [dummy_nsplits, index_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(_MinMaxSplitInfo(dummy_splits, index_splits), out_chunk_shape, 0, left)
right_chunks = _gen_series_chunks(_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[1],), 1, right)
else:
assert axis == 'index' or axis == 0
left_index_chunks = [c.index_value for c in left.cix[:, 0]]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,
left_index_chunks, right_index_chunks)
if _is_index_identical(left_index_chunks, right_index_chunks):
index_nsplits = left.nsplits[0]
dummy_splits, dummy_nsplits = _build_dummy_axis_split(left.chunk_shape[1]), left.nsplits[1]
nsplits = [index_nsplits, dummy_nsplits]
out_chunk_shape = tuple(len(ns) for ns in nsplits)
left_chunks = _gen_dataframe_chunks(_MinMaxSplitInfo(index_splits, dummy_splits), out_chunk_shape, 0, left)
right_chunks = _gen_series_chunks(_MinMaxSplitInfo(index_splits, None), (out_chunk_shape[0],), 1, right)
return nsplits, out_chunk_shape, left_chunks, right_chunks
def align_series_series(left, right):
left_index_chunks = [c.index_value for c in left.chunks]
right_index_chunks = [c.index_value for c in right.chunks]
index_splits, index_nsplits = _calc_axis_splits(left.index_value, right.index_value,
left_index_chunks, right_index_chunks)
if _is_index_identical(left_index_chunks, right_index_chunks):
index_nsplits = left.nsplits[0]
nsplits = [index_nsplits]
out_chunk_shape = (len(index_nsplits),)
splits = _MinMaxSplitInfo(index_splits, None)
left_chunks = _gen_series_chunks(splits, out_chunk_shape, 0, left)
right_chunks = _gen_series_chunks(splits, out_chunk_shape, 1, right)
return nsplits, out_chunk_shape, left_chunks, right_chunks
|
(window["webpackJsonp"] = window["webpackJsonp"] || []).push([[0],{
/***/ "./node_modules/@ionic/core/dist/esm/legacy/index-3a9dcfed.js":
/*!********************************************************************!*\
!*** ./node_modules/@ionic/core/dist/esm/legacy/index-3a9dcfed.js ***!
\********************************************************************/
/*! exports provided: GESTURE_CONTROLLER, createGesture */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "GESTURE_CONTROLLER", function() { return GESTURE_CONTROLLER; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "createGesture", function() { return createGesture; });
/* harmony import */ var _chunk_09ec7fc0_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./chunk-09ec7fc0.js */ "./node_modules/@ionic/core/dist/esm/legacy/chunk-09ec7fc0.js");
/* harmony import */ var _chunk_1074393c_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./chunk-1074393c.js */ "./node_modules/@ionic/core/dist/esm/legacy/chunk-1074393c.js");
var GestureController = /** @class */ (function () {
function GestureController() {
this.gestureId = 0;
this.requestedStart = new Map();
this.disabledGestures = new Map();
this.disabledScroll = new Set();
}
/**
* Creates a gesture delegate based on the GestureConfig passed
*/
GestureController.prototype.createGesture = function (config) {
return new GestureDelegate(this, this.newID(), config.name, config.priority || 0, !!config.disableScroll);
};
/**
* Creates a blocker that will block any other gesture events from firing. Set in the ion-gesture component.
*/
GestureController.prototype.createBlocker = function (opts) {
if (opts === void 0) { opts = {}; }
return new BlockerDelegate(this, this.newID(), opts.disable, !!opts.disableScroll);
};
GestureController.prototype.start = function (gestureName, id, priority) {
if (!this.canStart(gestureName)) {
this.requestedStart.delete(id);
return false;
}
this.requestedStart.set(id, priority);
return true;
};
GestureController.prototype.capture = function (gestureName, id, priority) {
if (!this.start(gestureName, id, priority)) {
return false;
}
var requestedStart = this.requestedStart;
var maxPriority = -10000;
requestedStart.forEach(function (value) {
maxPriority = Math.max(maxPriority, value);
});
if (maxPriority === priority) {
this.capturedId = id;
requestedStart.clear();
var event = new CustomEvent('ionGestureCaptured', { detail: { gestureName: gestureName } });
document.dispatchEvent(event);
return true;
}
requestedStart.delete(id);
return false;
};
GestureController.prototype.release = function (id) {
this.requestedStart.delete(id);
if (this.capturedId === id) {
this.capturedId = undefined;
}
};
GestureController.prototype.disableGesture = function (gestureName, id) {
var set = this.disabledGestures.get(gestureName);
if (set === undefined) {
set = new Set();
this.disabledGestures.set(gestureName, set);
}
set.add(id);
};
GestureController.prototype.enableGesture = function (gestureName, id) {
var set = this.disabledGestures.get(gestureName);
if (set !== undefined) {
set.delete(id);
}
};
GestureController.prototype.disableScroll = function (id) {
this.disabledScroll.add(id);
if (this.disabledScroll.size === 1) {
document.body.classList.add(BACKDROP_NO_SCROLL);
}
};
GestureController.prototype.enableScroll = function (id) {
this.disabledScroll.delete(id);
if (this.disabledScroll.size === 0) {
document.body.classList.remove(BACKDROP_NO_SCROLL);
}
};
GestureController.prototype.canStart = function (gestureName) {
if (this.capturedId !== undefined) {
// a gesture already captured
return false;
}
if (this.isDisabled(gestureName)) {
return false;
}
return true;
};
GestureController.prototype.isCaptured = function () {
return this.capturedId !== undefined;
};
GestureController.prototype.isScrollDisabled = function () {
return this.disabledScroll.size > 0;
};
GestureController.prototype.isDisabled = function (gestureName) {
var disabled = this.disabledGestures.get(gestureName);
if (disabled && disabled.size > 0) {
return true;
}
return false;
};
GestureController.prototype.newID = function () {
this.gestureId++;
return this.gestureId;
};
return GestureController;
}());
var GestureDelegate = /** @class */ (function () {
function GestureDelegate(ctrl, id, name, priority, disableScroll) {
this.id = id;
this.name = name;
this.disableScroll = disableScroll;
this.priority = priority * 1000000 + id;
this.ctrl = ctrl;
}
GestureDelegate.prototype.canStart = function () {
if (!this.ctrl) {
return false;
}
return this.ctrl.canStart(this.name);
};
GestureDelegate.prototype.start = function () {
if (!this.ctrl) {
return false;
}
return this.ctrl.start(this.name, this.id, this.priority);
};
GestureDelegate.prototype.capture = function () {
if (!this.ctrl) {
return false;
}
var captured = this.ctrl.capture(this.name, this.id, this.priority);
if (captured && this.disableScroll) {
this.ctrl.disableScroll(this.id);
}
return captured;
};
GestureDelegate.prototype.release = function () {
if (this.ctrl) {
this.ctrl.release(this.id);
if (this.disableScroll) {
this.ctrl.enableScroll(this.id);
}
}
};
GestureDelegate.prototype.destroy = function () {
this.release();
this.ctrl = undefined;
};
return GestureDelegate;
}());
var BlockerDelegate = /** @class */ (function () {
function BlockerDelegate(ctrl, id, disable, disableScroll) {
this.id = id;
this.disable = disable;
this.disableScroll = disableScroll;
this.ctrl = ctrl;
}
BlockerDelegate.prototype.block = function () {
if (!this.ctrl) {
return;
}
if (this.disable) {
for (var _i = 0, _a = this.disable; _i < _a.length; _i++) {
var gesture = _a[_i];
this.ctrl.disableGesture(gesture, this.id);
}
}
if (this.disableScroll) {
this.ctrl.disableScroll(this.id);
}
};
BlockerDelegate.prototype.unblock = function () {
if (!this.ctrl) {
return;
}
if (this.disable) {
for (var _i = 0, _a = this.disable; _i < _a.length; _i++) {
var gesture = _a[_i];
this.ctrl.enableGesture(gesture, this.id);
}
}
if (this.disableScroll) {
this.ctrl.enableScroll(this.id);
}
};
BlockerDelegate.prototype.destroy = function () {
this.unblock();
this.ctrl = undefined;
};
return BlockerDelegate;
}());
var BACKDROP_NO_SCROLL = 'backdrop-no-scroll';
var GESTURE_CONTROLLER = new GestureController();
var addEventListener = function (el, eventName, callback, opts) {
// use event listener options when supported
// otherwise it's just a boolean for the "capture" arg
var listenerOpts = supportsPassive(el) ? {
'capture': !!opts.capture,
'passive': !!opts.passive,
} : !!opts.capture;
var add;
var remove;
if (el['__zone_symbol__addEventListener']) {
add = '__zone_symbol__addEventListener';
remove = '__zone_symbol__removeEventListener';
}
else {
add = 'addEventListener';
remove = 'removeEventListener';
}
el[add](eventName, callback, listenerOpts);
return function () {
el[remove](eventName, callback, listenerOpts);
};
};
var supportsPassive = function (node) {
if (_sPassive === undefined) {
try {
var opts = Object.defineProperty({}, 'passive', {
get: function () {
_sPassive = true;
}
});
node.addEventListener('optsTest', function () { return; }, opts);
}
catch (e) {
_sPassive = false;
}
}
return !!_sPassive;
};
var _sPassive;
var MOUSE_WAIT = 2000;
var createPointerEvents = function (el, pointerDown, pointerMove, pointerUp, options) {
var rmTouchStart;
var rmTouchMove;
var rmTouchEnd;
var rmTouchCancel;
var rmMouseStart;
var rmMouseMove;
var rmMouseUp;
var lastTouchEvent = 0;
var handleTouchStart = function (ev) {
lastTouchEvent = Date.now() + MOUSE_WAIT;
if (!pointerDown(ev)) {
return;
}
if (!rmTouchMove && pointerMove) {
rmTouchMove = addEventListener(el, 'touchmove', pointerMove, options);
}
if (!rmTouchEnd) {
rmTouchEnd = addEventListener(el, 'touchend', handleTouchEnd, options);
}
if (!rmTouchCancel) {
rmTouchCancel = addEventListener(el, 'touchcancel', handleTouchEnd, options);
}
};
var handleMouseDown = function (ev) {
if (lastTouchEvent > Date.now()) {
return;
}
if (!pointerDown(ev)) {
return;
}
if (!rmMouseMove && pointerMove) {
rmMouseMove = addEventListener(getDocument(el), 'mousemove', pointerMove, options);
}
if (!rmMouseUp) {
rmMouseUp = addEventListener(getDocument(el), 'mouseup', handleMouseUp, options);
}
};
var handleTouchEnd = function (ev) {
stopTouch();
if (pointerUp) {
pointerUp(ev);
}
};
var handleMouseUp = function (ev) {
stopMouse();
if (pointerUp) {
pointerUp(ev);
}
};
var stopTouch = function () {
if (rmTouchMove) {
rmTouchMove();
}
if (rmTouchEnd) {
rmTouchEnd();
}
if (rmTouchCancel) {
rmTouchCancel();
}
rmTouchMove = rmTouchEnd = rmTouchCancel = undefined;
};
var stopMouse = function () {
if (rmMouseMove) {
rmMouseMove();
}
if (rmMouseUp) {
rmMouseUp();
}
rmMouseMove = rmMouseUp = undefined;
};
var stop = function () {
stopTouch();
stopMouse();
};
var setDisabled = function (disabled) {
if (disabled) {
if (rmTouchStart) {
rmTouchStart();
}
if (rmMouseStart) {
rmMouseStart();
}
rmTouchStart = rmMouseStart = undefined;
stop();
}
else {
if (!rmTouchStart) {
rmTouchStart = addEventListener(el, 'touchstart', handleTouchStart, options);
}
if (!rmMouseStart) {
rmMouseStart = addEventListener(el, 'mousedown', handleMouseDown, options);
}
}
};
var destroy = function () {
setDisabled(true);
pointerUp = pointerMove = pointerDown = undefined;
};
return {
setDisabled: setDisabled,
stop: stop,
destroy: destroy
};
};
var getDocument = function (node) {
return node instanceof Document ? node : node.ownerDocument;
};
var createPanRecognizer = function (direction, thresh, maxAngle) {
var radians = maxAngle * (Math.PI / 180);
var isDirX = direction === 'x';
var maxCosine = Math.cos(radians);
var threshold = thresh * thresh;
var startX = 0;
var startY = 0;
var dirty = false;
var isPan = 0;
return {
start: function (x, y) {
startX = x;
startY = y;
isPan = 0;
dirty = true;
},
detect: function (x, y) {
if (!dirty) {
return false;
}
var deltaX = (x - startX);
var deltaY = (y - startY);
var distance = deltaX * deltaX + deltaY * deltaY;
if (distance < threshold) {
return false;
}
var hypotenuse = Math.sqrt(distance);
var cosine = (isDirX ? deltaX : deltaY) / hypotenuse;
if (cosine > maxCosine) {
isPan = 1;
}
else if (cosine < -maxCosine) {
isPan = -1;
}
else {
isPan = 0;
}
dirty = false;
return true;
},
isGesture: function () {
return isPan !== 0;
},
getDirection: function () {
return isPan;
}
};
};
var createGesture = function (config) {
var hasCapturedPan = false;
var hasStartedPan = false;
var hasFiredStart = true;
var isMoveQueued = false;
var finalConfig = Object.assign({ disableScroll: false, direction: 'x', gesturePriority: 0, passive: true, maxAngle: 40, threshold: 10 }, config);
var canStart = finalConfig.canStart;
var onWillStart = finalConfig.onWillStart;
var onStart = finalConfig.onStart;
var onEnd = finalConfig.onEnd;
var notCaptured = finalConfig.notCaptured;
var onMove = finalConfig.onMove;
var threshold = finalConfig.threshold;
var detail = {
type: 'pan',
startX: 0,
startY: 0,
startTimeStamp: 0,
currentX: 0,
currentY: 0,
velocityX: 0,
velocityY: 0,
deltaX: 0,
deltaY: 0,
timeStamp: 0,
event: undefined,
data: undefined
};
var pan = createPanRecognizer(finalConfig.direction, finalConfig.threshold, finalConfig.maxAngle);
var gesture = GESTURE_CONTROLLER.createGesture({
name: config.gestureName,
priority: config.gesturePriority,
disableScroll: config.disableScroll
});
var pointerDown = function (ev) {
var timeStamp = now(ev);
if (hasStartedPan || !hasFiredStart) {
return false;
}
updateDetail(ev, detail);
detail.startX = detail.currentX;
detail.startY = detail.currentY;
detail.startTimeStamp = detail.timeStamp = timeStamp;
detail.velocityX = detail.velocityY = detail.deltaX = detail.deltaY = 0;
detail.event = ev;
// Check if gesture can start
if (canStart && canStart(detail) === false) {
return false;
}
// Release fallback
gesture.release();
// Start gesture
if (!gesture.start()) {
return false;
}
hasStartedPan = true;
if (threshold === 0) {
return tryToCapturePan();
}
pan.start(detail.startX, detail.startY);
return true;
};
var pointerMove = function (ev) {
// fast path, if gesture is currently captured
// do minimum job to get user-land even dispatched
if (hasCapturedPan) {
if (!isMoveQueued && hasFiredStart) {
isMoveQueued = true;
calcGestureData(detail, ev);
Object(_chunk_09ec7fc0_js__WEBPACK_IMPORTED_MODULE_0__["w"])(fireOnMove);
}
return;
}
// gesture is currently being detected
calcGestureData(detail, ev);
if (pan.detect(detail.currentX, detail.currentY)) {
if (!pan.isGesture() || !tryToCapturePan()) {
abortGesture();
}
}
};
var fireOnMove = function () {
// Since fireOnMove is called inside a RAF, onEnd() might be called,
// we must double check hasCapturedPan
if (!hasCapturedPan) {
return;
}
isMoveQueued = false;
if (onMove) {
onMove(detail);
}
};
var tryToCapturePan = function () {
if (gesture && !gesture.capture()) {
return false;
}
hasCapturedPan = true;
hasFiredStart = false;
// reset start position since the real user-land event starts here
// If the pan detector threshold is big, not resetting the start position
// will cause a jump in the animation equal to the detector threshold.
// the array of positions used to calculate the gesture velocity does not
// need to be cleaned, more points in the positions array always results in a
// more accurate value of the velocity.
detail.startX = detail.currentX;
detail.startY = detail.currentY;
detail.startTimeStamp = detail.timeStamp;
if (onWillStart) {
onWillStart(detail).then(fireOnStart);
}
else {
fireOnStart();
}
return true;
};
var fireOnStart = function () {
if (onStart) {
onStart(detail);
}
hasFiredStart = true;
};
var reset = function () {
hasCapturedPan = false;
hasStartedPan = false;
isMoveQueued = false;
hasFiredStart = true;
gesture.release();
};
// END *************************
var pointerUp = function (ev) {
var tmpHasCaptured = hasCapturedPan;
var tmpHasFiredStart = hasFiredStart;
reset();
if (!tmpHasFiredStart) {
return;
}
calcGestureData(detail, ev);
// Try to capture press
if (tmpHasCaptured) {
if (onEnd) {
onEnd(detail);
}
return;
}
// Not captured any event
if (notCaptured) {
notCaptured(detail);
}
};
var pointerEvents = createPointerEvents(finalConfig.el, pointerDown, pointerMove, pointerUp, {
capture: false,
});
var abortGesture = function () {
reset();
pointerEvents.stop();
if (notCaptured) {
notCaptured(detail);
}
};
return {
setDisabled: function (disabled) {
if (disabled && hasCapturedPan) {
pointerUp(undefined);
}
pointerEvents.setDisabled(disabled);
},
destroy: function () {
gesture.destroy();
pointerEvents.destroy();
}
};
};
var calcGestureData = function (detail, ev) {
if (!ev) {
return;
}
var prevX = detail.currentX;
var prevY = detail.currentY;
var prevT = detail.timeStamp;
updateDetail(ev, detail);
var currentX = detail.currentX;
var currentY = detail.currentY;
var timestamp = detail.timeStamp = now(ev);
var timeDelta = timestamp - prevT;
if (timeDelta > 0 && timeDelta < 100) {
var velocityX = (currentX - prevX) / timeDelta;
var velocityY = (currentY - prevY) / timeDelta;
detail.velocityX = velocityX * 0.7 + detail.velocityX * 0.3;
detail.velocityY = velocityY * 0.7 + detail.velocityY * 0.3;
}
detail.deltaX = currentX - detail.startX;
detail.deltaY = currentY - detail.startY;
detail.event = ev;
};
var updateDetail = function (ev, detail) {
// get X coordinates for either a mouse click
// or a touch depending on the given event
var x = 0;
var y = 0;
if (ev) {
var changedTouches = ev.changedTouches;
if (changedTouches && changedTouches.length > 0) {
var touch = changedTouches[0];
x = touch.clientX;
y = touch.clientY;
}
else if (ev.pageX !== undefined) {
x = ev.pageX;
y = ev.pageY;
}
}
detail.currentX = x;
detail.currentY = y;
};
var now = function (ev) {
return ev.timeStamp || Date.now();
};
/***/ })
}]);
//# sourceMappingURL=0.js.map |
#!/usr/bin/env python
"""
run_1yr_benchmark.py: Driver script for creating benchmark plots and testing
gcpy 1-year TransportTracers benchmark capability.
Run this script to generate benchmark comparisons between:
(1) GCC (aka GEOS-Chem "Classic") vs. GCC
(2) GCHP vs GCC (not yet tested)
(3) GCHP vs GCHP (not yet tested)
You can customize this by editing the settings in the corresponding yaml
config file (eg. 1yr_tt_benchmark.yml).
Calling sequence:
./run_1yr_tt_benchmark.py <path-to-configuration-file>
To test gcpy, copy this script and the corresponding yaml config file
anywhere you want to run the test. Set gcpy_test to True at the top
of the script. Benchmark artifacts will be created locally in new folder
called Plots.
Remarks:
By default, matplotlib will try to open an X window for plotting.
If you are running this script in an environment where you do not have
an active X display (such as in a computational queue), then you will
need to use these commands to disable the X-window functionality.
import os
os.environ["QT_QPA_PLATFORM"]="offscreen"
For more information, please see this issue posted at the ipython site:
https://github.com/ipython/ipython/issues/10627
This script corresponds with GCPy 1.1.0. Edit this version ID if releasing
a new version of GCPy.
"""
# ======================================================================
# Imports and global settings (you should not need to edit these)
# ======================================================================
import os
import sys
import warnings
from os.path import join, exists
from shutil import copyfile
from calendar import monthrange
import numpy as np
from gcpy.util import get_filepaths, read_config_file
from gcpy import benchmark as bmk
import gcpy.budget_tt as ttbdg
import gcpy.ste_flux as ste
# Tell matplotlib not to look for an X-window
os.environ["QT_QPA_PLATFORM"] = "offscreen"
# Suppress annoying warning messages
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
def gchp_metname(prior_to_13):
"""
Deterimines the correct collection name for GCHP StateMet data.
"""
if prior_to_13:
return "StateMet_avg"
return "StateMet"
def run_benchmark(config):
# This script has a fixed benchmark type, year, and months
bmk_type = "TransportTracersBenchmark"
bmk_year_ref = "2019"
bmk_year_dev = "2019"
bmk_mon_strs = ["Jan", "Apr", "Jul", "Oct"]
bmk_mon_inds = [0, 3, 6, 9]
bmk_n_months = len(bmk_mon_strs)
########################################################################
### CONFIGURABLE SETTINGS: ***EDIT AS NEEDED*** ###
########################################################################
# ======================================================================
# Benchmark information
# Note: When doing GCHP vs GCC comparisions gchp_dev will be compared
# to gcc_dev (not gcc_ref!).
# ======================================================================
# Path to species_databse.yml
spcdb_dir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"]
)
# ======================================================================
# Data directories
# For gchp_vs_gcc_refdir use config["data"]["dev"]["gcc"]["version"], not ref (mps, 6/27/19)
# ======================================================================
# Diagnostic file directory paths
gcc_vs_gcc_refdir = join(
config["paths"]["main_dir"],
config["data"]["ref"]["gcc"]["version"],
config["data"]["ref"]["gcc"]["subdir"],
)
gcc_vs_gcc_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["data"]["dev"]["gcc"]["subdir"],
)
gchp_vs_gcc_refdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["data"]["dev"]["gcc"]["subdir"],
)
gchp_vs_gcc_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["data"]["dev"]["gchp"]["subdir"],
)
gchp_vs_gchp_refdir = join(
config["paths"]["main_dir"],
config["data"]["ref"]["gchp"]["version"],
config["data"]["ref"]["gchp"]["subdir"],
)
gchp_vs_gchp_devdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["data"]["dev"]["gchp"]["subdir"],
)
# Restart file directory paths
gcc_vs_gcc_refrstdir = join(
config["paths"]["main_dir"], config["data"]["ref"]["gcc"]["version"], "restarts"
)
gcc_vs_gcc_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"], "restarts"
)
gchp_vs_gcc_refrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gcc"]["version"], "restarts"
)
gchp_vs_gcc_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gchp"]["version"]
)
gchp_vs_gchp_refrstdir = join(
config["paths"]["main_dir"], config["data"]["ref"]["gchp"]["version"]
)
gchp_vs_gchp_devrstdir = join(
config["paths"]["main_dir"], config["data"]["dev"]["gchp"]["version"]
)
# Plots directories
if config["options"]["gcpy_test"]:
mainresultsdir = join(".", config["paths"]["results_dir"])
gcc_vs_gcc_resultsdir = join(
mainresultsdir, config["options"]["comparisons"]["gcc_vs_gcc"]["dir"]
)
gchp_vs_gcc_resultsdir = join(
mainresultsdir, config["options"]["comparisons"]["gchp_vs_gcc"]["dir"]
)
gchp_vs_gchp_resultsdir = join(
mainresultsdir, config["options"]["comparisons"]["gcc_vs_gcc"]["dir"]
)
if not exists(mainresultsdir):
os.mkdir(mainresultsdir)
# Make copy of benchmark script in results directory
curfile = os.path.realpath(__file__)
dest = join(mainresultsdir, curfile.split("/")[-1])
if not exists(dest):
copyfile(curfile, dest)
else:
gcc_vs_gcc_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gcc"]["version"],
config["paths"]["results_dir"],
)
gchp_vs_gchp_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
config["options"]["comparisons"]["gcc_vs_gcc"]["dir"],
)
gchp_vs_gcc_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
config["options"]["comparisons"]["gchp_vs_gcc"]["dir"],
)
base_gchp_resultsdir = join(
config["paths"]["main_dir"],
config["data"]["dev"]["gchp"]["version"],
config["paths"]["results_dir"],
)
# make results directories that don't exist
for resdir, plotting_type in zip(
[
gcc_vs_gcc_resultsdir,
base_gchp_resultsdir,
gchp_vs_gchp_resultsdir,
gchp_vs_gcc_resultsdir,
],
[
config["options"]["comparisons"]["gcc_vs_gcc"]["run"],
config["options"]["comparisons"]["gchp_vs_gcc"]["run"]
or config["options"]["comparisons"]["gchp_vs_gchp"]["run"],
config["options"]["comparisons"]["gchp_vs_gchp"]["run"],
config["options"]["comparisons"]["gchp_vs_gcc"]["run"],
],
):
if plotting_type and not exists(resdir):
os.mkdir(resdir)
if resdir in [gcc_vs_gcc_resultsdir, base_gchp_resultsdir]:
# Make copy of benchmark script in results directory
curfile = os.path.realpath(__file__)
dest = join(resdir, curfile.split("/")[-1])
if not exists(dest):
copyfile(curfile, dest)
# Tables directories
gcc_vs_gcc_tablesdir = join(
gcc_vs_gcc_resultsdir,
config["options"]["comparisons"]["gcc_vs_gcc"]["tables_subdir"],
)
gchp_vs_gcc_tablesdir = join(
gchp_vs_gcc_resultsdir,
config["options"]["comparisons"]["gchp_vs_gcc"]["tables_subdir"],
)
gchp_vs_gchp_tablesdir = join(
gchp_vs_gchp_resultsdir,
config["options"]["comparisons"]["gchp_vs_gchp"]["tables_subdir"],
)
# ======================================================================
# Plot title strings
# For gchp_vs_gcc_refstr use config["data"]["dev"]["gcc"]["version"], not ref (mps, 6/27/19)
# ======================================================================
gcc_vs_gcc_refstr = config["data"]["ref"]["gcc"]["version"]
gcc_vs_gcc_devstr = config["data"]["dev"]["gcc"]["version"]
gchp_vs_gcc_refstr = config["data"]["dev"]["gcc"]["version"]
gchp_vs_gcc_devstr = config["data"]["dev"]["gchp"]["version"]
gchp_vs_gchp_refstr = config["data"]["ref"]["gchp"]["version"]
gchp_vs_gchp_devstr = config["data"]["dev"]["gchp"]["version"]
########################################################################
### THE REST OF THESE SETTINGS SHOULD NOT NEED TO BE CHANGED ###
########################################################################
# ======================================================================
# Dates and times -- Ref data
# ======================================================================
# Month/year strings for use in tabl4e subdirectories (e.g. Jan2016)
bmk_mon_yr_strs_ref = [v + bmk_year_ref for v in bmk_mon_strs]
# Get all months array of start datetimes for benchmark year
bmk_start_ref = np.datetime64(bmk_year_ref + "-01-01")
bmk_end_ref = np.datetime64("{}-01-01".format(int(bmk_year_ref) + 1))
all_months_ref = np.arange(
bmk_start_ref, bmk_end_ref, step=np.timedelta64(1, "M"), dtype="datetime64[M]"
)
all_months_gchp_ref = all_months_ref
# Overwrite all_months_gchp_ref if GCHP ref is legacy filename format.
# Legacy format uses time-averaging period mid-point not start.
if config["data"]["ref"]["gchp"]["is_legacy"]:
sec_per_yr_ref = 0
all_months_gchp_ref = np.zeros(12, dtype="datetime64[h]")
for t in range(12):
days_in_mon = monthrange(int(bmk_year_ref), t + 1)[1]
sec_per_yr_ref += days_in_mon * 86400.0
middle_hr = int(days_in_mon * 24 / 2)
delta = np.timedelta64(middle_hr, "h")
all_months_gchp_ref[t] = all_months_ref[t].astype("datetime64[h]") + delta
# ======================================================================
# Dates and times -- Dev data
# ======================================================================
# Month/year strings for use in table subdirectories (e.g. Jan2016)
bmk_mon_yr_strs_dev = [v + bmk_year_dev for v in bmk_mon_strs]
# Get all months array of start datetimes for benchmark year
bmk_start_dev = np.datetime64(bmk_year_dev + "-01-01")
bmk_end_dev = np.datetime64("{}-01-01".format(int(bmk_year_dev) + 1))
all_months_dev = np.arange(
bmk_start_dev, bmk_end_dev, step=np.timedelta64(1, "M"), dtype="datetime64[M]"
)
all_months_gchp_dev = all_months_dev
# Overwrite all_months_gchp_ref if GCHP ref is legacy filename format.
# Legacy format uses time-averaging period mid-point not start.
if config["data"]["dev"]["gchp"]["is_legacy"]:
sec_per_yr_dev = 0
all_months_gchp_dev = np.zeros(12, dtype="datetime64[h]")
for t in range(12):
days_in_mon = monthrange(int(bmk_year_dev), t + 1)[1]
sec_per_yr_dev += days_in_mon * 86400.0
middle_hr = int(days_in_mon * 24 / 2)
delta = np.timedelta64(middle_hr, "h")
all_months_gchp_dev[t] = all_months_dev[t].astype("datetime64[h]") + delta
# =======================================================================
# Print the list of plots & tables to the screen
# =======================================================================
print("The following plots and tables will be created for {}:".format(bmk_type))
if config["options"]["outputs"]["plot_conc"]:
print(" - Concentration plots")
if config["options"]["outputs"]["plot_wetdep"]:
print(" - Convective and large-scale wet deposition plots")
if config["options"]["outputs"]["rnpbbe_budget"]:
print(" - Radionuclides budget table")
if config["options"]["outputs"]["operations_budget"]:
print(" - Operations budget table")
if config["options"]["outputs"]["ste_table"]:
print(" - Table of strat-trop exchange")
if config["options"]["outputs"]["cons_table"]:
print(" - Table of mass conservation")
print("Comparisons will be made for the following combinations:")
if config["options"]["comparisons"]["gcc_vs_gcc"]["run"]:
print(" - GCC vs GCC")
if config["options"]["comparisons"]["gchp_vs_gcc"]["run"]:
print(" - GCHP vs GCC")
if config["options"]["comparisons"]["gchp_vs_gchp"]["run"]:
print(" - GCHP vs GCHP")
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCC vs GCC benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["comparisons"]["gcc_vs_gcc"]["run"]:
# ==================================================================
# GCC vs GCC filepaths for StateMet collection data
# ==================================================================
refmet = get_filepaths(gcc_vs_gcc_refdir, "StateMet", all_months_ref)[0]
devmet = get_filepaths(gcc_vs_gcc_devdir, "StateMet", all_months_dev)[0]
# ==================================================================
# GCC vs GCC species concentration plots
# ==================================================================
if config["options"]["outputs"]["plot_conc"]:
print("\n%%% Creating GCC vs. GCC concentration plots %%%")
# Only plot concentration categories for TransportTracers
restrict_cats = ["RnPbBeTracers", "PassiveTracers"]
# --------------------------------------------------------------
# GCC vs GCC species concentration plots: Annual mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(gcc_vs_gcc_refdir, "SpeciesConc", all_months_ref)[0]
dev = get_filepaths(gcc_vs_gcc_devdir, "SpeciesConc", all_months_dev)[0]
# Create plots
bmk.make_benchmark_conc_plots(
ref,
gcc_vs_gcc_refstr,
dev,
gcc_vs_gcc_devstr,
refmet=refmet,
devmet=devmet,
dst=gcc_vs_gcc_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCC vs GCC species concentration plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_conc_plots(
ref[mon_ind],
gcc_vs_gcc_refstr,
dev[mon_ind],
gcc_vs_gcc_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
dst=gcc_vs_gcc_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCC vs GCC wet deposition plots
# ==================================================================
if config["options"]["outputs"]["plot_wetdep"]:
print("\n%%% Creating GCC vs. GCC wet deposition plots %%%")
# Diagnostic collection files to read
cols = ["WetLossConv", "WetLossLS"]
# Loop over collections
for col in cols:
# ----------------------------------------------------------
# GCC vs. GCC wet deposition plots: Annual mean
# ----------------------------------------------------------
# Filepaths
ref = get_filepaths(gcc_vs_gcc_refdir, col, all_months_ref)[0]
dev = get_filepaths(gcc_vs_gcc_devdir, col, all_months_dev)[0]
# Create plots
bmk.make_benchmark_wetdep_plots(
ref,
gcc_vs_gcc_refstr,
dev,
gcc_vs_gcc_devstr,
refmet=refmet,
devmet=devmet,
dst=gcc_vs_gcc_resultsdir,
datestr="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
collection=col,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ----------------------------------------------------------
# GCC vs GCC wet deposition plots: Seasonal
# ----------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_wetdep_plots(
ref[mon_ind],
gcc_vs_gcc_refstr,
dev[mon_ind],
gcc_vs_gcc_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
dst=gcc_vs_gcc_resultsdir,
datestr=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
collection=col,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCC vs GCC radionuclides budget tables
# ==================================================================
if config["options"]["outputs"]["rnpbbe_budget"]:
print("\n%%% Creating GCC vs. GCC radionuclides budget table %%%")
ttbdg.transport_tracers_budgets(
config["data"]["dev"]["gcc"]["version"],
gcc_vs_gcc_devdir,
gcc_vs_gcc_devrstdir,
int(bmk_year_dev),
dst=gcc_vs_gcc_tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCC vs GCC operations budgets tables
# ==================================================================
if config["options"]["outputs"]["operations_budget"]:
print("\n%%% Creating GCC vs. GCC operations budget tables %%%")
# Filepaths
refs = get_filepaths(gcc_vs_gcc_refdir, "Budget", all_months_ref)[0]
devs = get_filepaths(gcc_vs_gcc_devdir, "Budget", all_months_dev)[0]
# Create table
bmk.make_benchmark_operations_budget(
config["data"]["ref"]["gcc"]["version"],
refs,
config["data"]["dev"]["gcc"]["version"],
devs,
sec_per_yr_ref,
sec_per_yr_dev,
benchmark_type=bmk_type,
label=bmk_year_dev,
operations=[
"Chemistry",
"Convection",
"EmisDryDep",
"Mixing",
"WetDep",
],
compute_accum=False,
dst=gcc_vs_gcc_tablesdir,
)
# ==================================================================
# GCC dev strat-trop exchange table
# ==================================================================
if config["options"]["outputs"]["ste_table"]:
print("\n%%% Creating GCC vs. GCC Strat-Trop Exchange table %%%")
# Diagnostic collection files to read (all 12 months)
devs = get_filepaths(gcc_vs_gcc_devdir, "AdvFluxVert", all_months_dev)[0]
# Make stat-trop exchange table for subset of species
ste.make_benchmark_ste_table(
config["data"]["dev"]["gcc"]["version"],
devs,
int(bmk_year_dev),
dst=gcc_vs_gcc_tablesdir,
bmk_type=bmk_type,
species=["Pb210", "Be7", "Be10"],
overwrite=True,
)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCC benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["comparisons"]["gchp_vs_gcc"]["run"]:
# ==================================================================
# GCHP vs GCC filepaths for StateMet collection data
#
# (1) GCHP (Dev) and GCC (Ref) use the same benchmark year.
# (2) The GCC version in "GCHP vs GCC" is the Dev of "GCC vs GCC".
# ==================================================================
refmet = get_filepaths(gchp_vs_gcc_refdir, "StateMet", all_months_dev)[0]
devmet = get_filepaths(
gchp_vs_gcc_devdir,
gchp_metname(config["data"]["dev"]["gchp"]["prior_to_13"]),
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# ==================================================================
# GCHP vs GCC species concentration plots
# ==================================================================
if config["options"]["outputs"]["plot_conc"]:
print("\n%%% Creating GCHP vs. GCC concentration plots %%%")
# Only plot concentration categories for TransportTracers
restrict_cats = ["RnPbBeTracers", "PassiveTracers"]
# --------------------------------------------------------------
# GCHP vs GCC species concentration plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(gchp_vs_gcc_refdir, "SpeciesConc", all_months_dev)[0]
dev = get_filepaths(
gchp_vs_gcc_devdir,
"SpeciesConc",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
bmk.make_benchmark_conc_plots(
ref,
gchp_vs_gcc_refstr,
dev,
gchp_vs_gcc_devstr,
refmet=refmet,
devmet=devmet,
dst=gchp_vs_gcc_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCC species concentration plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_conc_plots(
ref[mon_ind],
gchp_vs_gcc_refstr,
dev[mon_ind],
gchp_vs_gcc_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
dst=gchp_vs_gcc_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCC wet deposition plots
# ==================================================================
if config["options"]["outputs"]["plot_wetdep"]:
print("\n%%% Creating GCHP vs. GCC wet deposition plots %%%")
# Create separate set of plots for each wetdep collection
cols = ["WetLossConv", "WetLossLS"]
# Create plots for each collection and benchmark month
for col in cols:
# ----------------------------------------------------------
# GCHP vs GCC wet deposition plots: Annual mean
# ----------------------------------------------------------
# Filepaths
ref = get_filepaths(gchp_vs_gcc_refdir, col, all_months_dev)[0]
dev = get_filepaths(
gchp_vs_gcc_devdir,
col,
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
bmk.make_benchmark_wetdep_plots(
ref,
gchp_vs_gcc_refstr,
dev,
gchp_vs_gcc_devstr,
devmet=devmet,
collection=col,
dst=gchp_vs_gcc_resultsdir,
datestr="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
benchmark_type=bmk_type,
normalize_by_area=True,
spcdb_dir=spcdb_dir,
)
# ----------------------------------------------------------
# GCHP vs GCC wet deposition plots: Seasonal
# ----------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_wetdep_plots(
ref[mon_ind],
gchp_vs_gcc_refstr,
dev[mon_ind],
gchp_vs_gcc_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
collection=col,
dst=gchp_vs_gcc_resultsdir,
datestr=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
benchmark_type=bmk_type,
normalize_by_area=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCC radionuclides budget tables
# ==================================================================
if config["options"]["outputs"]["rnpbbe_budget"]:
print("\n%%% Creating GCHP vs. GCC radionuclides budget table %%%")
ttbdg.transport_tracers_budgets(
config["data"]["dev"]["gchp"]["version"],
gchp_vs_gcc_devdir,
gchp_vs_gcc_devrstdir,
int(bmk_year_dev),
dst=gchp_vs_gcc_tablesdir,
is_gchp=True,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCC operations budgets tables
# ==================================================================
if config["options"]["outputs"]["operations_budget"]:
print("\n%%% Creating GCHP vs. GCC operations budget tables %%%")
# Filepaths
col = "Budget"
refs = get_filepaths(gchp_vs_gcc_refdir, "Budget", all_months_dev)[0]
devs = get_filepaths(
gchp_vs_gcc_devdir,
col,
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Make operations budget table
bmk.make_benchmark_operations_budget(
config["data"]["dev"]["gcc"]["version"],
refs,
config["data"]["dev"]["gchp"]["version"],
devs,
sec_per_yr_ref,
sec_per_yr_dev,
benchmark_type=bmk_type,
label=bmk_year_dev,
operations=[
"Chemistry",
"Convection",
"EmisDryDep",
"Mixing",
"WetDep",
],
compute_accum=False,
dst=gchp_vs_gcc_tablesdir,
)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Create GCHP vs GCHP benchmark plots and tables
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["comparisons"]["gchp_vs_gchp"]["run"]:
# ==================================================================
# GCHP vs GCHP filepaths for StateMet collection data
# ==================================================================
refmet = get_filepaths(
gchp_vs_gchp_refdir,
gchp_metname(config["data"]["ref"]["gchp"]["prior_to_13"]),
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
devmet = get_filepaths(
gchp_vs_gchp_devdir,
gchp_metname(config["data"]["dev"]["gchp"]["prior_to_13"]),
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# ==================================================================
# GCHP vs GCHP species concentration plots
# ==================================================================
if config["options"]["outputs"]["plot_conc"]:
print("\n%%% Creating GCHP vs. GCHP concentration plots %%%")
# Only plot concentration categories for TransportTracers
restrict_cats = ["RnPbBeTracers", "PassiveTracers"]
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Annual Mean
# --------------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
"SpeciesConc",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
"SpeciesConc",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Make concentration plots
bmk.make_benchmark_conc_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
refmet=refmet,
devmet=devmet,
dst=gchp_vs_gchp_resultsdir,
subdst="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# --------------------------------------------------------------
# GCHP vs GCHP species concentration plots: Seasonal
# --------------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_conc_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
dst=gchp_vs_gchp_resultsdir,
subdst=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
benchmark_type=bmk_type,
restrict_cats=restrict_cats,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCHP wet deposition plots
# ==================================================================
if config["options"]["outputs"]["plot_wetdep"]:
print("\n%%% Creating GCHP vs. GCHP wet deposition plots %%%")
# Create separate set of plots for each wetdep collection
cols = ["WetLossConv", "WetLossLS"]
# Create plots for each collection and benchmark month
for col in cols:
# ----------------------------------------------------------
# GCHP vs GCHP wet deposition plots: Annual Mean
# ----------------------------------------------------------
# Filepaths
ref = get_filepaths(
gchp_vs_gchp_refdir,
col,
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
dev = get_filepaths(
gchp_vs_gchp_devdir,
col,
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create plots
bmk.make_benchmark_wetdep_plots(
ref,
gchp_vs_gchp_refstr,
dev,
gchp_vs_gchp_devstr,
refmet=refmet,
devmet=devmet,
collection=col,
dst=gchp_vs_gchp_resultsdir,
datestr="AnnualMean",
time_mean=True,
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
benchmark_type=bmk_type,
normalize_by_area=True,
spcdb_dir=spcdb_dir,
)
# ----------------------------------------------------------
# GCHP vs GCHP wet deposition plots: Seasonal
# ----------------------------------------------------------
for t in range(bmk_n_months):
mon_ind = bmk_mon_inds[t]
bmk.make_benchmark_wetdep_plots(
ref[mon_ind],
gchp_vs_gchp_refstr,
dev[mon_ind],
gchp_vs_gchp_devstr,
refmet=refmet[mon_ind],
devmet=devmet[mon_ind],
collection=col,
dst=gchp_vs_gchp_resultsdir,
datestr=bmk_mon_yr_strs_dev[t],
weightsdir=config["paths"]["weights_dir"],
overwrite=True,
benchmark_type=bmk_type,
normalize_by_area=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCHP radionuclides budget table
# ==================================================================
if config["options"]["outputs"]["rnpbbe_budget"]:
print("\n%%% Creating GCHP vs. GCHP radionuclides budget table %%%")
ttbdg.transport_tracers_budgets(
config["data"]["dev"]["gchp"]["version"],
gchp_vs_gchp_devdir,
gchp_vs_gchp_devrstdir,
int(bmk_year_dev),
dst=gchp_vs_gchp_tablesdir,
is_gchp=True,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ==================================================================
# GCHP vs GCHP operations budgets tables
# ==================================================================
if config["options"]["outputs"]["operations_budget"]:
print("\n%%% Creating GCHP vs. GCHP operations budget tables %%%")
# Filepaths
refs = get_filepaths(
gchp_vs_gchp_refdir,
"Budget",
all_months_gchp_ref,
is_gchp=True,
gchp_format_is_legacy=config["data"]["ref"]["gchp"]["is_legacy"],
)[0]
devs = get_filepaths(
gchp_vs_gchp_devdir,
"Budget",
all_months_gchp_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Create table
bmk.make_benchmark_operations_budget(
config["data"]["dev"]["gchp"]["version"],
refs,
config["data"]["dev"]["gchp"]["version"],
devs,
sec_per_yr_ref,
sec_per_yr_dev,
benchmark_type=bmk_type,
label=bmk_year_dev,
operations=[
"Chemistry",
"Convection",
"EmisDryDep",
"Mixing",
"WetDep",
],
compute_accum=False,
dst=gchp_vs_gchp_tablesdir,
)
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
# Create mass conservations tables for GCC and GCHP
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if config["options"]["outputs"]["cons_table"]:
# ======================================================================
# Create mass conservation table for GCC_dev
# ======================================================================
if (
config["options"]["comparisons"]["gcc_vs_gcc"]["run"]
or config["options"]["comparisons"]["gchp_vs_gcc"]["run"]
):
print("\n%%% Creating GCC dev mass conservation table %%%")
# Filepaths
datafiles = get_filepaths(gcc_vs_gcc_devrstdir, "Restart", all_months_dev)[
0
]
# Pick output folder
if config["options"]["comparisons"]["gchp_vs_gcc"]["run"]:
tablesdir = gchp_vs_gcc_tablesdir
else:
tablesdir = gcc_vs_gcc_tablesdir
# Create table
bmk.make_benchmark_mass_conservation_table(
datafiles,
config["data"]["dev"]["gcc"]["version"],
dst=tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir,
)
# ======================================================================
# Create mass conservation table for GCHP_dev
# ======================================================================
if (
config["options"]["comparisons"]["gchp_vs_gcc"]["run"]
or config["options"]["comparisons"]["gchp_vs_gchp"]["run"]
):
print("\n%%% Creating GCHP dev mass conservation table %%%")
# Filepaths
datafiles = get_filepaths(
gchp_vs_gcc_devrstdir,
"Restart",
all_months_dev,
is_gchp=True,
gchp_format_is_legacy=config["data"]["dev"]["gchp"]["is_legacy"],
)[0]
# Pick output folder
if config["options"]["comparisons"]["gchp_vs_gcc"]["run"]:
tablesdir = gchp_vs_gcc_tablesdir
else:
tablesdir = gchp_vs_gchp_tablesdir
# Create table
bmk.make_benchmark_mass_conservation_table(
datafiles,
config["data"]["dev"]["gchp"]["version"],
dst=tablesdir,
overwrite=True,
spcdb_dir=spcdb_dir,
)
def main():
"""
Driver for extracting config information and running 1yr tt benchmark
Args:
accepts one optional argument pointing to the configuration file. Defaults to benchmarks.yml
"""
config_filename = sys.argv[1] if len(sys.argv) == 2 else "1yr_tt_benchmark.yml"
config = read_config_file(config_filename)
run_benchmark(config)
if __name__ == "__main__":
main()
|
var searchData=
[
['saveload',['SaveLoad',['../automation_2_save_load_8js.html#aab1ff378b1a0571b96d4751a56d5d51a',1,'SaveLoad(): SaveLoad.js'],['../tests_2_save_load_8js.html#a3eb444dd4c395fc7928597dc81aff236',1,'SaveLoad(): SaveLoad.js'],['../regression_8js.html#a2206c837077e2683d87db783e7d49dfb',1,'saveload(): regression.js']]],
['score',['Score',['../_cache_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): Cache.js'],['../_score_8js.html#a3c930391dffbe8b20e232dca6e02bd79',1,'Score(): Score.js'],['../_confidence_score_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): ConfidenceScore.js'],['../tests_2_group_by_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): GroupBy.js'],['../tests_2_save_load_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): SaveLoad.js'],['../_three_hop_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): ThreeHop.js'],['../_undo_redo_8js.html#a5a7271332d2445f674b50552ad4c33e6',1,'Score(): UndoRedo.js']]],
['screenshot',['ScreenShot',['../lib_2ui_2automation_2_screen_shot_8js.html#a95148ddc1d7466eadffcacfa46566ba3',1,'ScreenShot.js']]],
['searchdata',['searchData',['../docs_2html_2search_2all__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_0.js'],['../docs_2html_2search_2all__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_1.js'],['../docs_2html_2search_2all__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_10.js'],['../docs_2html_2search_2all__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_11.js'],['../docs_2html_2search_2all__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_12.js'],['../docs_2html_2search_2all__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_13.js'],['../docs_2html_2search_2all__14_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_14.js'],['../docs_2html_2search_2all__15_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_15.js'],['../docs_2html_2search_2all__16_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_16.js'],['../docs_2html_2search_2all__17_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_17.js'],['../docs_2html_2search_2all__18_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_18.js'],['../docs_2html_2search_2all__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_2.js'],['../docs_2html_2search_2all__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_3.js'],['../docs_2html_2search_2all__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_4.js'],['../docs_2html_2search_2all__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_5.js'],['../docs_2html_2search_2all__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_6.js'],['../docs_2html_2search_2all__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_7.js'],['../docs_2html_2search_2all__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_8.js'],['../docs_2html_2search_2all__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_9.js'],['../docs_2html_2search_2all__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_a.js'],['../docs_2html_2search_2all__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_b.js'],['../docs_2html_2search_2all__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_c.js'],['../docs_2html_2search_2all__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_d.js'],['../docs_2html_2search_2all__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_e.js'],['../docs_2html_2search_2all__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_f.js'],['../docs_2html_2search_2classes__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_0.js'],['../docs_2html_2search_2classes__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_1.js'],['../docs_2html_2search_2classes__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_2.js'],['../docs_2html_2search_2classes__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_3.js'],['../docs_2html_2search_2classes__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_4.js'],['../docs_2html_2search_2classes__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_5.js'],['../docs_2html_2search_2classes__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_6.js'],['../docs_2html_2search_2classes__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_7.js'],['../docs_2html_2search_2classes__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_8.js'],['../docs_2html_2search_2classes__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_9.js'],['../docs_2html_2search_2classes__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_a.js'],['../docs_2html_2search_2classes__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_b.js'],['../docs_2html_2search_2classes__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_c.js'],['../docs_2html_2search_2classes__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_d.js'],['../docs_2html_2search_2classes__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_e.js'],['../docs_2html_2search_2files__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_0.js'],['../docs_2html_2search_2files__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_1.js'],['../docs_2html_2search_2files__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_10.js'],['../docs_2html_2search_2files__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_11.js'],['../docs_2html_2search_2files__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_12.js'],['../docs_2html_2search_2files__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_2.js'],['../docs_2html_2search_2files__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_3.js'],['../docs_2html_2search_2files__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_4.js'],['../docs_2html_2search_2files__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_5.js'],['../docs_2html_2search_2files__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_6.js'],['../docs_2html_2search_2files__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_7.js'],['../docs_2html_2search_2files__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_8.js'],['../docs_2html_2search_2files__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_9.js'],['../docs_2html_2search_2files__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_a.js'],['../docs_2html_2search_2files__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_b.js'],['../docs_2html_2search_2files__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_c.js'],['../docs_2html_2search_2files__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_d.js'],['../docs_2html_2search_2files__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_e.js'],['../docs_2html_2search_2files__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_f.js'],['../docs_2html_2search_2functions__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_0.js'],['../docs_2html_2search_2functions__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_1.js'],['../docs_2html_2search_2functions__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_10.js'],['../docs_2html_2search_2functions__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_11.js'],['../docs_2html_2search_2functions__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_12.js'],['../docs_2html_2search_2functions__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_13.js'],['../docs_2html_2search_2functions__14_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_14.js'],['../docs_2html_2search_2functions__15_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_15.js'],['../docs_2html_2search_2functions__16_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_16.js'],['../docs_2html_2search_2functions__17_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_17.js'],['../docs_2html_2search_2functions__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_2.js'],['../docs_2html_2search_2functions__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_3.js'],['../docs_2html_2search_2functions__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_4.js'],['../docs_2html_2search_2functions__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_5.js'],['../docs_2html_2search_2functions__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_6.js'],['../docs_2html_2search_2functions__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_7.js'],['../docs_2html_2search_2functions__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_8.js'],['../docs_2html_2search_2functions__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_9.js'],['../docs_2html_2search_2functions__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_a.js'],['../docs_2html_2search_2functions__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_b.js'],['../docs_2html_2search_2functions__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_c.js'],['../docs_2html_2search_2functions__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_d.js'],['../docs_2html_2search_2functions__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_e.js'],['../docs_2html_2search_2functions__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_f.js'],['../docs_2html_2search_2groups__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): groups_0.js'],['../docs_2html_2search_2groups__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): groups_1.js'],['../docs_2html_2search_2namespaces__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): namespaces_0.js'],['../docs_2html_2search_2pages__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_0.js'],['../docs_2html_2search_2pages__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_1.js'],['../docs_2html_2search_2pages__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_2.js'],['../docs_2html_2search_2pages__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_3.js'],['../docs_2html_2search_2pages__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_4.js'],['../docs_2html_2search_2variables__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_0.js'],['../docs_2html_2search_2variables__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_1.js'],['../docs_2html_2search_2variables__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_10.js'],['../docs_2html_2search_2variables__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_11.js'],['../docs_2html_2search_2variables__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_12.js'],['../docs_2html_2search_2variables__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_13.js'],['../docs_2html_2search_2variables__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_2.js'],['../docs_2html_2search_2variables__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_3.js'],['../docs_2html_2search_2variables__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_4.js'],['../docs_2html_2search_2variables__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_5.js'],['../docs_2html_2search_2variables__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_6.js'],['../docs_2html_2search_2variables__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_7.js'],['../docs_2html_2search_2variables__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_8.js'],['../docs_2html_2search_2variables__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_9.js'],['../docs_2html_2search_2variables__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_a.js'],['../docs_2html_2search_2variables__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_b.js'],['../docs_2html_2search_2variables__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_c.js'],['../docs_2html_2search_2variables__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_d.js'],['../docs_2html_2search_2variables__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_e.js'],['../docs_2html_2search_2variables__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_f.js'],['../frontend_2docs_2html_2search_2all__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_0.js'],['../frontend_2docs_2html_2search_2all__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_1.js'],['../frontend_2docs_2html_2search_2all__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_10.js'],['../frontend_2docs_2html_2search_2all__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_11.js'],['../frontend_2docs_2html_2search_2all__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_12.js'],['../frontend_2docs_2html_2search_2all__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_13.js'],['../frontend_2docs_2html_2search_2all__14_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_14.js'],['../frontend_2docs_2html_2search_2all__15_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_15.js'],['../frontend_2docs_2html_2search_2all__16_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_16.js'],['../frontend_2docs_2html_2search_2all__17_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_17.js'],['../frontend_2docs_2html_2search_2all__18_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_18.js'],['../all__19_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_19.js'],['../all__1a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_1a.js'],['../all__1b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_1b.js'],['../frontend_2docs_2html_2search_2all__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_2.js'],['../frontend_2docs_2html_2search_2all__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_3.js'],['../frontend_2docs_2html_2search_2all__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_4.js'],['../frontend_2docs_2html_2search_2all__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_5.js'],['../frontend_2docs_2html_2search_2all__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_6.js'],['../frontend_2docs_2html_2search_2all__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_7.js'],['../frontend_2docs_2html_2search_2all__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_8.js'],['../frontend_2docs_2html_2search_2all__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_9.js'],['../frontend_2docs_2html_2search_2all__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_a.js'],['../frontend_2docs_2html_2search_2all__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_b.js'],['../frontend_2docs_2html_2search_2all__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_c.js'],['../frontend_2docs_2html_2search_2all__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_d.js'],['../frontend_2docs_2html_2search_2all__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_e.js'],['../frontend_2docs_2html_2search_2all__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): all_f.js'],['../frontend_2docs_2html_2search_2classes__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_0.js'],['../frontend_2docs_2html_2search_2classes__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_1.js'],['../frontend_2docs_2html_2search_2classes__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_2.js'],['../frontend_2docs_2html_2search_2classes__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_3.js'],['../frontend_2docs_2html_2search_2classes__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_4.js'],['../frontend_2docs_2html_2search_2classes__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_5.js'],['../frontend_2docs_2html_2search_2classes__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_6.js'],['../frontend_2docs_2html_2search_2classes__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_7.js'],['../frontend_2docs_2html_2search_2classes__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_8.js'],['../frontend_2docs_2html_2search_2classes__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_9.js'],['../frontend_2docs_2html_2search_2classes__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_a.js'],['../frontend_2docs_2html_2search_2classes__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_b.js'],['../frontend_2docs_2html_2search_2classes__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_c.js'],['../frontend_2docs_2html_2search_2classes__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_d.js'],['../frontend_2docs_2html_2search_2classes__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): classes_e.js'],['../frontend_2docs_2html_2search_2files__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_0.js'],['../frontend_2docs_2html_2search_2files__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_1.js'],['../frontend_2docs_2html_2search_2files__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_10.js'],['../frontend_2docs_2html_2search_2files__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_11.js'],['../frontend_2docs_2html_2search_2files__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_12.js'],['../files__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_13.js'],['../frontend_2docs_2html_2search_2files__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_2.js'],['../frontend_2docs_2html_2search_2files__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_3.js'],['../frontend_2docs_2html_2search_2files__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_4.js'],['../frontend_2docs_2html_2search_2files__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_5.js'],['../frontend_2docs_2html_2search_2files__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_6.js'],['../frontend_2docs_2html_2search_2files__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_7.js'],['../frontend_2docs_2html_2search_2files__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_8.js'],['../frontend_2docs_2html_2search_2files__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_9.js'],['../frontend_2docs_2html_2search_2files__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_a.js'],['../frontend_2docs_2html_2search_2files__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_b.js'],['../frontend_2docs_2html_2search_2files__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_c.js'],['../frontend_2docs_2html_2search_2files__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_d.js'],['../frontend_2docs_2html_2search_2files__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_e.js'],['../frontend_2docs_2html_2search_2files__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): files_f.js'],['../frontend_2docs_2html_2search_2functions__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_0.js'],['../frontend_2docs_2html_2search_2functions__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_1.js'],['../frontend_2docs_2html_2search_2functions__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_10.js'],['../frontend_2docs_2html_2search_2functions__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_11.js'],['../frontend_2docs_2html_2search_2functions__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_12.js'],['../frontend_2docs_2html_2search_2functions__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_13.js'],['../frontend_2docs_2html_2search_2functions__14_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_14.js'],['../frontend_2docs_2html_2search_2functions__15_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_15.js'],['../frontend_2docs_2html_2search_2functions__16_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_16.js'],['../frontend_2docs_2html_2search_2functions__17_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_17.js'],['../frontend_2docs_2html_2search_2functions__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_2.js'],['../frontend_2docs_2html_2search_2functions__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_3.js'],['../frontend_2docs_2html_2search_2functions__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_4.js'],['../frontend_2docs_2html_2search_2functions__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_5.js'],['../frontend_2docs_2html_2search_2functions__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_6.js'],['../frontend_2docs_2html_2search_2functions__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_7.js'],['../frontend_2docs_2html_2search_2functions__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_8.js'],['../frontend_2docs_2html_2search_2functions__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_9.js'],['../frontend_2docs_2html_2search_2functions__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_a.js'],['../frontend_2docs_2html_2search_2functions__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_b.js'],['../frontend_2docs_2html_2search_2functions__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_c.js'],['../frontend_2docs_2html_2search_2functions__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_d.js'],['../frontend_2docs_2html_2search_2functions__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_e.js'],['../frontend_2docs_2html_2search_2functions__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): functions_f.js'],['../frontend_2docs_2html_2search_2groups__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): groups_0.js'],['../frontend_2docs_2html_2search_2groups__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): groups_1.js'],['../frontend_2docs_2html_2search_2namespaces__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): namespaces_0.js'],['../frontend_2docs_2html_2search_2pages__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_0.js'],['../frontend_2docs_2html_2search_2pages__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_1.js'],['../frontend_2docs_2html_2search_2pages__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_2.js'],['../frontend_2docs_2html_2search_2pages__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_3.js'],['../frontend_2docs_2html_2search_2pages__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): pages_4.js'],['../frontend_2docs_2html_2search_2variables__0_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_0.js'],['../frontend_2docs_2html_2search_2variables__1_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_1.js'],['../frontend_2docs_2html_2search_2variables__10_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_10.js'],['../frontend_2docs_2html_2search_2variables__11_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_11.js'],['../frontend_2docs_2html_2search_2variables__12_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_12.js'],['../frontend_2docs_2html_2search_2variables__13_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_13.js'],['../variables__14_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_14.js'],['../variables__15_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_15.js'],['../variables__16_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_16.js'],['../variables__17_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_17.js'],['../variables__18_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_18.js'],['../frontend_2docs_2html_2search_2variables__2_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_2.js'],['../frontend_2docs_2html_2search_2variables__3_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_3.js'],['../frontend_2docs_2html_2search_2variables__4_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_4.js'],['../frontend_2docs_2html_2search_2variables__5_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_5.js'],['../frontend_2docs_2html_2search_2variables__6_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_6.js'],['../frontend_2docs_2html_2search_2variables__7_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_7.js'],['../frontend_2docs_2html_2search_2variables__8_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_8.js'],['../frontend_2docs_2html_2search_2variables__9_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_9.js'],['../frontend_2docs_2html_2search_2variables__a_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_a.js'],['../frontend_2docs_2html_2search_2variables__b_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_b.js'],['../frontend_2docs_2html_2search_2variables__c_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_c.js'],['../frontend_2docs_2html_2search_2variables__d_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_d.js'],['../frontend_2docs_2html_2search_2variables__e_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_e.js'],['../frontend_2docs_2html_2search_2variables__f_8js.html#ad01a7523f103d6242ef9b0451861231e',1,'searchData(): variables_f.js']]],
['severity',['severity',['../_test_8js.html#a711528ece1dcb2611b6d477ad5abe547',1,'Test.js']]],
['shiftclickgraph',['shiftClickGraph',['../regression_8js.html#a02e4be97a02a1188952877484e322047',1,'regression.js']]],
['smtpauth',['SMTPAuth',['../sendresetlink_8php.html#ade0e27026bbf88bca46c0868cc91fee7',1,'sendresetlink.php']]],
['smtpsecure',['SMTPSecure',['../sendresetlink_8php.html#a2f8d1507d5bc89e70a62441d67603a3c',1,'sendresetlink.php']]],
['sorting',['sorting',['../regression_8js.html#aee14a3a39d3a80d818c387b3972e1cc2',1,'regression.js']]],
['state',['state',['../docs_2html_2svgpan_8js.html#aa3135e476e0928738bc75343eca76948',1,'state(): svgpan.js'],['../frontend_2docs_2html_2svgpan_8js.html#aa3135e476e0928738bc75343eca76948',1,'state(): svgpan.js']]],
['stateorigin',['stateOrigin',['../docs_2html_2svgpan_8js.html#aab84d4fa19edd654b2f3a50888066672',1,'stateOrigin(): svgpan.js'],['../frontend_2docs_2html_2svgpan_8js.html#aab84d4fa19edd654b2f3a50888066672',1,'stateOrigin(): svgpan.js']]],
['statetf',['stateTf',['../docs_2html_2svgpan_8js.html#aff9fd0c5b8d5071161aaa9a662204d75',1,'stateTf(): svgpan.js'],['../frontend_2docs_2html_2svgpan_8js.html#aff9fd0c5b8d5071161aaa9a662204d75',1,'stateTf(): svgpan.js']]],
['std_5fline_5flength',['STD_LINE_LENGTH',['../class_p_h_p_mailer_1_1_p_h_p_mailer_1_1_p_h_p_mailer.html#abf7634fad6fbcfeec5aa10eb9ef36f7b',1,'PHPMailer::PHPMailer::PHPMailer']]],
['stop_5fcontinue',['STOP_CONTINUE',['../class_p_h_p_mailer_1_1_p_h_p_mailer_1_1_p_h_p_mailer.html#aa6009cc8b3707f7d1e2831825c5f6299',1,'PHPMailer::PHPMailer::PHPMailer']]],
['stop_5fcritical',['STOP_CRITICAL',['../class_p_h_p_mailer_1_1_p_h_p_mailer_1_1_p_h_p_mailer.html#a33c7245a420133b3edfb170dcbce1ad8',1,'PHPMailer::PHPMailer::PHPMailer']]],
['stop_5fmessage',['STOP_MESSAGE',['../class_p_h_p_mailer_1_1_p_h_p_mailer_1_1_p_h_p_mailer.html#a6ef5ce0777a7aa87bf05e0c1fda2e1ab',1,'PHPMailer::PHPMailer::PHPMailer']]],
['subject',['Subject',['../sendresetlink_8php.html#a0e9638a8d3ac1704b1f52a0238ba4666',1,'sendresetlink.php']]],
['svgdoc',['svgDoc',['../docs_2html_2svgpan_8js.html#ab846d2bc8037ae845847c6ec14c0c8b6',1,'svgDoc(): svgpan.js'],['../frontend_2docs_2html_2svgpan_8js.html#ab846d2bc8037ae845847c6ec14c0c8b6',1,'svgDoc(): svgpan.js']]],
['switch',['switch',['../cache_8php.html#acc3beb8ea9c74d993abd41b69360fbf5',1,'cache.php']]]
];
|
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=AdminUsersSessionGetSettingsResponse.js.map |
/**
GridStoreAdapter
Stores files in Mongo using GridStore
Requires the database adapter to be based on mongoclient
@flow weak
*/
import { MongoClient, GridStore, Db} from 'mongodb';
import { FilesAdapter } from './FilesAdapter';
import defaults from '../../defaults';
export class GridStoreAdapter extends FilesAdapter {
_databaseURI: string;
_connectionPromise: Promise<Db>;
constructor(mongoDatabaseURI = defaults.DefaultMongoURI) {
super();
this._databaseURI = mongoDatabaseURI;
}
_connect() {
if (!this._connectionPromise) {
this._connectionPromise = MongoClient.connect(this._databaseURI);
}
return this._connectionPromise;
}
// For a given config object, filename, and data, store a file
// Returns a promise
createFile(filename: string, data) {
return this._connect().then(database => {
let gridStore = new GridStore(database, filename, 'w');
return gridStore.open();
}).then(gridStore => {
return gridStore.write(data);
}).then(gridStore => {
return gridStore.close();
});
}
deleteFile(filename: string) {
return this._connect().then(database => {
let gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
}).then((gridStore) => {
return gridStore.unlink();
}).then((gridStore) => {
return gridStore.close();
});
}
getFileData(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename)
.then(() => {
let gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
}).then(gridStore => {
return gridStore.read();
});
}
getFileLocation(config, filename) {
return (config.mount + '/files/' + config.applicationId + '/' + encodeURIComponent(filename));
}
getFileStream(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename).then(() => {
let gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
});
}
}
export default GridStoreAdapter;
|
# encoding: utf-8
# Copyright 2012 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''IPDA Site views: documentation tests.'''
from ipdasite.views.testing import IPDA_SITE_VIEWS_FUNCTIONAL_TESTING as LAYER
from plone.testing import layered
import doctest
import unittest
optionFlags = (doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_ONLY_FIRST_FAILURE)
def test_suite():
return unittest.TestSuite([
layered(doctest.DocFileSuite('README.txt', package='ipdasite.views', optionflags=optionFlags), LAYER),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
import sys
from .base import *
# make db document in top blog, or in the directory of settings
BASE_DIR = os.path.dirname(BASE_DIR)
DEBUG = True
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
THEME_DIR = 'themes'
THEME_TYPE = 'html5up'
# THEME = THEME_DIR + '/' + THEME_TYPE
THEME = os.path.join(THEME_DIR, THEME_TYPE)
SITE_PACKAGES = [s_p for s_p in sys.path if s_p.endswith('site-packages')][0]
INSTALLED_APPS += [
'debug_toolbar',
'silk',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'silk.middleware.SilkyMiddleware',
]
INTERNAL_IPS = ['127.0.0.1']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates', THEME),
os.path.join(SITE_PACKAGES, 'xadmin/templates'),
os.path.join(SITE_PACKAGES, 'crispy_forms/templates'),
os.path.join(SITE_PACKAGES, 'reversion/templates'),
os.path.join(SITE_PACKAGES, 'ckeditor/templates'),
os.path.join(SITE_PACKAGES, 'ckeditor_uploader/templates'),
os.path.join(SITE_PACKAGES, 'rest_framework/templates'),
os.path.join(SITE_PACKAGES, 'debug_toolbar/templates'),
os.path.join(SITE_PACKAGES, 'silk/templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries': {
'filters': 'templatetags.filters'
}
},
},
]
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(SITE_PACKAGES, 'rest_framework/static'),
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/tmp/django_cache',
}
}
CKEDITOR_CONFIGS = {
'awesome_ckeditor': { # set the name of the config
'toolbar': 'Full',
'height': 300,
# 'width': 1200,
'tabSpaces': 4,
},
}
DEFAULT_FILE_STORAGE = 'blog.storage.MyStorage'
SILKY_PYTHON_PROFILER = True
|
Subsets and Splits