Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/generators/generator.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Base class for a generator to convert dict to tuning file
from pathlib import Path
class Generator(object):
def __init__(self):
pass
def write(self, output_path: Path, output_dict: dict, output_order: list):
raise NotImplementedError
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/raspberrypi/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/raspberrypi/alsc.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# ALSC module instance for Raspberry Pi tuning scripts
import libtuning as lt
from libtuning.modules.lsc import ALSCRaspberryPi
ALSC = \
ALSCRaspberryPi(do_color=lt.Param('do_alsc_colour', lt.Param.Mode.Optional, True),
luminance_strength=lt.Param('luminance_strength', lt.Param.Mode.Optional, 0.5),
debug=[lt.Debug.Plot],
sector_shape=(16, 12),
sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
sector_average_function=lt.average.Mean(),
smoothing_function=lt.smoothing.MedianBlur(3),
)
|
0 | repos/libcamera/utils | repos/libcamera/utils/rkisp1/gen-csc-table.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2022, Ideas on Board Oy
#
# Generate color space conversion table coefficients with configurable
# fixed-point precision
import argparse
import enum
import numpy as np
import sys
encodings = {
'rec601': [
[ 0.299, 0.587, 0.114 ],
[ -0.299 / 1.772, -0.587 / 1.772, 0.886 / 1.772 ],
[ 0.701 / 1.402, -0.587 / 1.402, -0.114 / 1.402 ]
],
'rec709': [
[ 0.2126, 0.7152, 0.0722 ],
[ -0.2126 / 1.8556, -0.7152 / 1.8556, 0.9278 / 1.8556 ],
[ 0.7874 / 1.5748, -0.7152 / 1.5748, -0.0722 / 1.5748 ]
],
'rec2020': [
[ 0.2627, 0.6780, 0.0593 ],
[ -0.2627 / 1.8814, -0.6780 / 1.8814, 0.9407 / 1.8814 ],
[ 0.7373 / 1.4746, -0.6780 / 1.4746, -0.0593 / 1.4746 ],
],
'smpte240m': [
[ 0.2122, 0.7013, 0.0865 ],
[ -0.2122 / 1.8270, -0.7013 / 1.8270, 0.9135 / 1.8270 ],
[ 0.7878 / 1.5756, -0.7013 / 1.5756, -0.0865 / 1.5756 ],
],
}
class Precision(object):
def __init__(self, precision):
if precision[0].upper() != 'Q':
raise RuntimeError(f'Invalid precision `{precision}`')
prec = precision[1:].split('.')
if len(prec) != 2:
raise RuntimeError(f'Invalid precision `{precision}`')
self.__prec = [int(v) for v in prec]
@property
def integer(self):
return self.__prec[0]
@property
def fractional(self):
return self.__prec[1]
@property
def total(self):
# Add 1 for the sign bit
return self.__prec[0] + self.__prec[1] + 1
class Quantization(enum.Enum):
FULL = 0
LIMITED = 1
def scale_coeff(coeff, quantization, luma):
"""Scale a coefficient to the output range dictated by the quantization.
Parameters
----------
coeff : float
The CSC matrix coefficient to scale
quantization : Quantization
The quantization, either FULL or LIMITED
luma : bool
True if the coefficient corresponds to a luma value, False otherwise
"""
# Assume the input range is 8 bits. The output range is set by the
# quantization and differs between luma and chrome components for limited
# range.
in_range = 255 - 0
if quantization == Quantization.FULL:
out_range = 255 - 0
elif luma:
out_range = 235 - 16
else:
out_range = 240 - 16
return coeff * out_range / in_range
def round_array(values):
"""Round a list of signed floating point values to the closest integer while
preserving the (rounded) value of the sum of all elements.
"""
# Calculate the rounding error as the difference between the rounded sum of
# values and the sum of rounded values. This is by definition an integer
# (positive or negative), which indicates how many values will need to be
# 'flipped' to the opposite rounding.
rounded_values = [round(value) for value in values]
sum_values = round(sum(values))
sum_error = sum_values - sum(rounded_values)
if sum_error == 0:
return rounded_values
# The next step is to distribute the error among the values, in a way that
# will minimize the relative error introduced in individual values. We
# extend the values list with the rounded value and original index for each
# element, and sort by rounding error. Then we modify the elements with the
# highest or lowest error, depending on whether the sum error is negative
# or positive.
values = [[value, round(value), index] for index, value in enumerate(values)]
values.sort(key=lambda v: v[1] - v[0])
# It could also be argued that the key for the sort order should not be the
# absolute rouding error but the relative error, as the impact of identical
# rounding errors will differ for coefficients with widely different values.
# This is a topic for further research.
#
# values.sort(key=lambda v: (v[1] - v[0]) / abs(v[0]))
if sum_error > 0:
for i in range(sum_error):
values[i][1] += 1
else:
for i in range(-sum_error):
values[len(values) - i - 1][1] -= 1
# Finally, sort back by index, make sure the total rounding error is now 0,
# and return the rounded values.
values.sort(key=lambda v: v[2])
values = [value[1] for value in values]
assert(sum(values) == sum_values)
return values
def main(argv):
# Parse command line arguments.
parser = argparse.ArgumentParser(
description='Generate color space conversion table coefficients with '
'configurable fixed-point precision.'
)
parser.add_argument('--invert', '-i', action='store_true',
help='Invert the color space conversion (YUV -> RGB)')
parser.add_argument('--precision', '-p', default='Q1.7',
help='The output fixed point precision in Q notation (sign bit excluded)')
parser.add_argument('--quantization', '-q', choices=['full', 'limited'],
default='limited', help='Quantization range')
parser.add_argument('encoding', choices=encodings.keys(), help='YCbCr encoding')
args = parser.parse_args(argv[1:])
try:
precision = Precision(args.precision)
except Exception:
print(f'Invalid precision `{args.precision}`')
return 1
encoding = encodings[args.encoding]
quantization = Quantization[args.quantization.upper()]
# Scale and round the encoding coefficients based on the precision and
# quantization range.
luma = True
scaled_coeffs = []
for line in encoding:
line = [scale_coeff(coeff, quantization, luma) for coeff in line]
scaled_coeffs.append(line)
luma = False
if args.invert:
scaled_coeffs = np.linalg.inv(scaled_coeffs)
rounded_coeffs = []
for line in scaled_coeffs:
line = [coeff * (1 << precision.fractional) for coeff in line]
# For the RGB to YUV conversion, use a rounding method that preserves
# the rounded sum of each line to avoid biases and overflow, as the sum
# of luma and chroma coefficients should be 1.0 and 0.0 respectively
# (in full range). For the YUV to RGB conversion, there is no such
# constraint, so use simple rounding.
if args.invert:
line = [round(coeff) for coeff in line]
else:
line = round_array(line)
# Convert coefficients to the number of bits selected by the precision.
# Negative values will be turned into positive integers using 2's
# complement.
line = [coeff & ((1 << precision.total) - 1) for coeff in line]
rounded_coeffs.append(line)
# Print the result as C code.
nbits = 1 << (precision.total - 1).bit_length()
nbytes = nbits // 4
print(f'static const u{nbits} {"yuv2rgb" if args.invert else "rgb2yuv"}_{args.encoding}_{quantization.name.lower()}_coeffs[] = {{')
for line in rounded_coeffs:
line = [f'0x{coeff:0{nbytes}x}' for coeff in line]
print(f'\t{", ".join(line)},')
print('};')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera/utils | repos/libcamera/utils/rkisp1/rkisp1-capture.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2019, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Capture processed frames from cameras based on the Rockchip ISP1
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
#
# - media-ctl (from v4l-utils git://linuxtv.org/v4l-utils.git)
# - raw2rgbpnm (from git://git.retiisi.org.uk/~sailus/raw2rgbpnm.git)
# - yavta (from git://git.ideasonboard.org/yavta.git)
# Return the entity connected to a given pad
# $1: The pad, expressed as "entity":index
mc_remote_entity() {
local entity="${1%:*}"
local pad="${1#*:}"
${mediactl} -p | awk '
/^- entity / {
in_entity=0
}
/^- entity [0-9]+: '"${entity}"' / {
in_entity=1
}
/^[ \t]+pad/ {
in_pad=0
}
/^[ \t]+pad'"${pad}"': / {
in_pad=1
}
/^[ \t]+(<-|->) "[^"]+"/ {
if (in_entity && in_pad) {
print gensub(/^[^"]+"([^"]+)":([0-9]+).*$/, "\\1", "g")
exit
}
}'
}
# Locate the sensor entity
find_sensor() {
local bus
local sensor_name=$1
bus=$(grep "$sensor_name " /sys/class/video4linux/v4l-subdev*/name | cut -d ' ' -f 2)
if [[ -z $bus ]]; then
echo "Sensor '$sensor_name' not found." >&2
exit 1
fi
echo "$sensor_name $bus"
}
# Locate the CSI-2 receiver
find_csi2_rx() {
local sensor_name=$1
local csi2_rx
csi2_rx=$(mc_remote_entity "$sensor_name:0")
if [ "$csi2_rx" != rkisp1_isp ] ; then
echo "$csi2_rx"
fi
}
# Locate the media device
find_media_device() {
local mdev
local name=$1
for mdev in /dev/media* ; do
media-ctl -d $mdev -p | grep -q "^driver[ \t]*$name$" && break
mdev=
done
if [[ -z $mdev ]] ; then
echo "$name media device not found." >&2
exit 1
fi
echo $mdev
}
# Get the sensor format
get_sensor_format() {
local format
local sensor=$1
format=$($mediactl --get-v4l2 "'$sensor':0" | grep 'fmt:' | sed 's/.*\(fmt:\S*\).*/\1/')
sensor_mbus_code=$(echo $format | sed 's/fmt:\([A-Z0-9_]*\).*/\1/')
sensor_size=$(echo $format | sed 's/[^\/]*\/\([0-9x]*\).*/\1/')
echo "Capturing ${sensor_size} from sensor $sensor in ${sensor_mbus_code}"
}
# Configure the pipeline
configure_pipeline() {
local format="fmt:$sensor_mbus_code/$sensor_size"
local capture_mbus_code=$1
local capture_size=$2
local csi2_rx
echo "Configuring pipeline for $sensor in $format"
csi2_rx=$(find_csi2_rx "$sensor")
$mediactl -r
if [ -n "$csi2_rx" ] ; then
$mediactl -l "'$sensor':0 -> '$csi2_rx':0 [1]"
$mediactl -l "'$csi2_rx':1 -> 'rkisp1_isp':0 [1]"
else
$mediactl -l "'$sensor':0 -> 'rkisp1_isp':0 [1]"
fi
$mediactl -l "'rkisp1_isp':2 -> 'rkisp1_resizer_mainpath':0 [1]"
$mediactl -V "\"$sensor\":0 [$format]"
if [ -n "$csi2_rx" ] ; then
$mediactl -V "'$csi2_rx':0 [$format]"
$mediactl -V "'$csi2_rx':1 [$format]"
fi
$mediactl -V "'rkisp1_isp':0 [$format crop:(0,0)/$sensor_size]"
$mediactl -V "'rkisp1_isp':2 [fmt:$capture_mbus_code/$sensor_size crop:(0,0)/$sensor_size]"
$mediactl -V "'rkisp1_resizer_mainpath':0 [fmt:$capture_mbus_code/$sensor_size crop:(0,0)/$sensor_size]"
$mediactl -V "'rkisp1_resizer_mainpath':1 [fmt:$capture_mbus_code/$capture_size]"
}
# Capture frames
capture_frames() {
local file_op
local capture_format=$1
local capture_size=$2
local frame_count=$3
local save_file=$4
if [[ $save_file -eq 1 ]]; then
file_op="--file=/tmp/frame-#.bin"
rm -f /tmp/frame-*.bin
fi
yavta -c$frame_count -n5 -I -f $capture_format -s $capture_size \
$file_op $($mediactl -e "rkisp1_mainpath")
}
# Convert captured files to ppm
convert_files() {
local format=$1
local size=$2
local frame_count=$3
echo "Converting ${frame_count} frames (${size})"
for i in `seq 0 $(($frame_count - 1))`; do
i=$(printf %06u $i)
raw2rgbpnm -f $format -s $size /tmp/frame-$i.bin /tmp/frame-$i.ppm
done
}
# Print usage message
usage() {
echo "Usage: $1 [options] sensor-name"
echo "Supported options:"
echo "-c,--count n Number of frame to capture"
echo "--no-save Do not save captured frames to disk"
echo "-r, --raw Capture RAW frames"
echo "-s, --size wxh Frame size"
}
# Parse command line arguments
capture_size=1024x768
frame_count=10
raw=false
save_file=1
while [[ $# -ne 0 ]] ; do
case $1 in
-c|--count)
frame_count=$2
shift 2
;;
--no-save)
save_file=0
shift
;;
-r|--raw)
raw=true
shift
;;
-s|--size)
capture_size=$2
shift 2
;;
-*)
echo "Unsupported option $1" >&2
usage $0
exit 1
;;
*)
break
;;
esac
done
if [[ $# -ne 1 ]] ; then
usage $0
exit 1
fi
sensor_name=$1
modprobe phy_rockchip_dphy_rx0
modprobe rockchip_isp1
sensor=$(find_sensor $sensor_name) || exit
mdev=$(find_media_device rkisp1) || exit
mediactl="media-ctl -d $mdev"
get_sensor_format "$sensor"
if [[ $raw == true ]] ; then
capture_format=$(echo $sensor_mbus_code | sed 's/_[0-9X]*$//')
capture_mbus_code=$sensor_mbus_code
else
capture_format=YUYV
capture_mbus_code=YUYV8_2X8
fi
configure_pipeline $capture_mbus_code $capture_size
capture_frames $capture_format $capture_size $frame_count $save_file
[[ $save_file -eq 1 ]] && convert_files $capture_format $capture_size $frame_count
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipc/parser.py | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2020, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Run mojo parser with python3
import os
import sys
# TODO set sys.pycache_prefix for >= python3.8
sys.dont_write_bytecode = True
# Make sure that mojom_parser.py can import mojom
sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/mojom')
import mojo.public.tools.mojom.mojom_parser as parser
parser.Run(sys.argv[1:])
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipc/generate.py | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2020, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Run mojo code generator for generating libcamera IPC files
import os
import sys
# TODO set sys.pycache_prefix for >= python3.8
sys.dont_write_bytecode = True
sys.path.insert(0, f'{os.path.dirname(__file__)}/mojo/public/tools/bindings')
import mojo.public.tools.bindings.mojom_bindings_generator as generator
def _GetModulePath(path, output_dir):
return os.path.join(output_dir, path.relative_path())
# Disable the attribute checker to support our custom attributes. Ideally we
# should add the attributes to the list of allowed attributes in
# utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py, but
# we're trying hard to use the upstream mojom as-is.
if hasattr(generator, '_BUILTIN_CHECKS'):
del generator._BUILTIN_CHECKS['attributes']
# Override the mojo code generator's generator list to only contain our
# libcamera generator
generator._BUILTIN_GENERATORS = {'libcamera': 'mojom_libcamera_generator'}
# Override the mojo code generator's _GetModulePath method to not add
# the '-module' suffix when searching for mojo modules, so that we can
# pass the path to the mojom module without having to trim the '-module' suffix
generator._GetModulePath = _GetModulePath
generator.main()
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipc/extract-docs.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2021, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Extract doxygen documentation from mojom files
import argparse
import re
import sys
regex_block_start = re.compile(r'^/\*\*$')
regex_block_end = re.compile(r'^ \*/$')
regex_spdx = re.compile(r'^/\* SPDX-License-Identifier: .* \*/$')
def main(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output', metavar='file',
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout,
help='Output file name (default: standard output)')
parser.add_argument('input', type=str,
help='Input file name.')
args = parser.parse_args(argv[1:])
lines = open(args.input, 'r').readlines()
pipeline = args.input.split('/')[-1].replace('.mojom', '')
if not regex_spdx.match(lines[0]):
raise Exception(f'Missing SPDX license header in {args.input}')
data = lines[0]
data += f'''\
/*
* Copyright (C) 2021, Google Inc.
*
* Docs file for generated {pipeline}.mojom
*
* This file is auto-generated. Do not edit.
*/
namespace libcamera {{
'''
in_block = False
comment = ''
for lineno, line in enumerate(lines, start=1):
if regex_block_start.match(line):
if in_block:
raise SyntaxError('Expected end of comment',
(args.input, lineno, 1, line))
in_block = True
comment = line
continue
if regex_block_end.match(line):
if in_block:
comment += line
data += comment + '\n'
in_block = False
continue
if in_block:
comment += line
data += '} /* namespace libcamera */\n'
args.output.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera/utils/ipc/mojo/public | repos/libcamera/utils/ipc/mojo/public/tools/run_all_python_unittests.py | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sys
_TOOLS_DIR = os.path.dirname(__file__)
_MOJOM_DIR = os.path.join(_TOOLS_DIR, 'mojom')
_BINDINGS_DIR = os.path.join(_TOOLS_DIR, 'bindings')
_SRC_DIR = os.path.join(_TOOLS_DIR, os.path.pardir, os.path.pardir,
os.path.pardir)
# Ensure that the mojom library is discoverable.
sys.path.append(_MOJOM_DIR)
sys.path.append(_BINDINGS_DIR)
# Help Python find typ in //third_party/catapult/third_party/typ/
sys.path.append(
os.path.join(_SRC_DIR, 'third_party', 'catapult', 'third_party', 'typ'))
import typ
def Main():
return typ.main(top_level_dirs=[_MOJOM_DIR, _BINDINGS_DIR])
if __name__ == '__main__':
sys.exit(Main())
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/gen_data_files_list.py | # Copyright 2017 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a list of all files in a directory.
This script takes in a directory and an output file name as input.
It then reads the directory and creates a list of all file names
in that directory. The list is written to the output file.
There is also an option to pass in '-p' or '--pattern'
which will check each file name against a regular expression
pattern that is passed in. Only files which match the regex
will be written to the list.
"""
from __future__ import print_function
import os
import re
import sys
from optparse import OptionParser
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "mojom"))
from mojom.generate.generator import WriteFile
def main():
parser = OptionParser()
parser.add_option('-d', '--directory', help='Read files from DIRECTORY')
parser.add_option('-o', '--output', help='Write list to FILE')
parser.add_option('-p',
'--pattern',
help='Only reads files that name matches PATTERN',
default=".")
(options, _) = parser.parse_args()
pattern = re.compile(options.pattern)
files = [f for f in os.listdir(options.directory) if pattern.match(f)]
contents = '\n'.join(f for f in files) + '\n'
WriteFile(contents, options.output)
if __name__ == '__main__':
sys.exit(main())
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/concatenate_and_replace_closure_exports.py | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple utility which concatenates a set of files into a single output file
while also stripping any goog.provide or goog.require lines. This allows us to
provide a very primitive sort of "compilation" without any extra toolchain
support and without having to modify otherwise compilable sources in the tree
which use these directives.
goog.provide lines are replaced with an equivalent invocation of
mojo.internal.exportModule, which accomplishes essentially the same thing in an
uncompiled context. A singular exception is made for the 'mojo.internal' export,
which is instead replaced with an inlined assignment to initialize the
namespace.
"""
from __future__ import print_function
import optparse
import re
import sys
_MOJO_INTERNAL_MODULE_NAME = "mojo.internal"
_MOJO_EXPORT_MODULE_SYMBOL = "mojo.internal.exportModule"
def FilterLine(filename, line, output):
if line.startswith("goog.require"):
return
if line.startswith("goog.provide"):
match = re.match(r"goog.provide\('([^']+)'\);", line)
if not match:
print("Invalid goog.provide line in %s:\n%s" % (filename, line))
sys.exit(1)
module_name = match.group(1)
if module_name == _MOJO_INTERNAL_MODULE_NAME:
output.write("self.mojo = { internal: {} };")
else:
output.write("%s('%s');\n" % (_MOJO_EXPORT_MODULE_SYMBOL, module_name))
return
output.write(line)
def ConcatenateAndReplaceExports(filenames):
if (len(filenames) < 2):
print("At least two filenames (one input and the output) are required.")
return False
try:
with open(filenames[-1], "w") as target:
for filename in filenames[:-1]:
with open(filename, "r") as current:
for line in current.readlines():
FilterLine(filename, line, target)
return True
except IOError as e:
print("Error generating %s\n: %s" % (filenames[-1], e))
return False
def main():
parser = optparse.OptionParser()
parser.set_usage("""file1 [file2...] outfile
Concatenate several files into one, stripping Closure provide and
require directives along the way.""")
(_, args) = parser.parse_args()
sys.exit(0 if ConcatenateAndReplaceExports(args) else 1)
if __name__ == "__main__":
main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from mojom_bindings_generator import MakeImportStackMessage
from mojom_bindings_generator import ScrambleMethodOrdinals
class FakeIface:
def __init__(self):
self.mojom_name = None
self.methods = None
class FakeMethod:
def __init__(self, explicit_ordinal=None):
self.explicit_ordinal = explicit_ordinal
self.ordinal = explicit_ordinal
self.ordinal_comment = None
class MojoBindingsGeneratorTest(unittest.TestCase):
"""Tests mojo_bindings_generator."""
def testMakeImportStackMessage(self):
"""Tests MakeImportStackMessage()."""
self.assertEqual(MakeImportStackMessage(["x"]), "")
self.assertEqual(MakeImportStackMessage(["x", "y"]),
"\n y was imported by x")
self.assertEqual(MakeImportStackMessage(["x", "y", "z"]),
"\n z was imported by y\n y was imported by x")
def testScrambleMethodOrdinals(self):
"""Tests ScrambleMethodOrdinals()."""
interface = FakeIface()
interface.mojom_name = 'RendererConfiguration'
interface.methods = [
FakeMethod(),
FakeMethod(),
FakeMethod(),
FakeMethod(explicit_ordinal=42)
]
ScrambleMethodOrdinals([interface], "foo".encode('utf-8'))
# These next three values are hard-coded. If the generation algorithm
# changes from being based on sha256(seed + interface.name + str(i)) then
# these numbers will obviously need to change too.
#
# Note that hashlib.sha256('fooRendererConfiguration1').digest()[:4] is
# '\xa5\xbc\xf9\xca' and that hex(1257880741) = '0x4af9bca5'. The
# difference in 0x4a vs 0xca is because we only take 31 bits.
self.assertEqual(interface.methods[0].ordinal, 1257880741)
self.assertEqual(interface.methods[1].ordinal, 631133653)
self.assertEqual(interface.methods[2].ordinal, 549336076)
# Explicit method ordinals should not be scrambled.
self.assertEqual(interface.methods[3].ordinal, 42)
if __name__ == "__main__":
unittest.main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/minify_with_terser.py | #!/usr/bin/env python3
# Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This utility minifies JS files with terser.
#
# Instance of 'node' has no 'RunNode' member (no-member)
# pylint: disable=no-member
import argparse
import os
import sys
_HERE_PATH = os.path.dirname(__file__)
_SRC_PATH = os.path.normpath(os.path.join(_HERE_PATH, '..', '..', '..', '..'))
_CWD = os.getcwd()
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'node'))
import node
import node_modules
def MinifyFile(input_file, output_file):
node.RunNode([
node_modules.PathToTerser(), input_file, '--mangle', '--compress',
'--comments', 'false', '--output', output_file
])
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args(argv)
# Delete the output file if it already exists. It may be a sym link to the
# input, because in non-optimized/pre-Terser builds the input file is copied
# to the output location with gn copy().
out_path = os.path.join(_CWD, args.output)
if (os.path.exists(out_path)):
os.remove(out_path)
MinifyFile(os.path.join(_CWD, args.input), out_path)
if __name__ == '__main__':
main(sys.argv[1:])
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/validate_typemap_config.py | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import re
import sys
def CheckCppTypemapConfigs(target_name, config_filename, out_filename):
_SUPPORTED_CONFIG_KEYS = set([
'types', 'traits_headers', 'traits_private_headers', 'traits_sources',
'traits_deps', 'traits_public_deps'
])
_SUPPORTED_TYPE_KEYS = set([
'mojom', 'cpp', 'copyable_pass_by_value', 'force_serialize', 'hashable',
'move_only', 'nullable_is_same_type', 'forward_declaration',
'default_constructible'
])
with open(config_filename, 'r') as f:
for config in json.load(f):
for key in config.keys():
if key not in _SUPPORTED_CONFIG_KEYS:
raise ValueError('Invalid typemap property "%s" when processing %s' %
(key, target_name))
types = config.get('types')
if not types:
raise ValueError('Typemap for %s must specify at least one type to map'
% target_name)
for entry in types:
for key in entry.keys():
if key not in _SUPPORTED_TYPE_KEYS:
raise IOError(
'Invalid type property "%s" in typemap for "%s" on target %s' %
(key, entry.get('mojom', '(unknown)'), target_name))
with open(out_filename, 'w') as f:
f.truncate(0)
def main():
parser = argparse.ArgumentParser()
_, args = parser.parse_known_args()
if len(args) != 3:
print('Usage: validate_typemap_config.py target_name config_filename '
'stamp_filename')
sys.exit(1)
CheckCppTypemapConfigs(args[0], args[1], args[2])
if __name__ == '__main__':
main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/generate_type_mappings.py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a JSON typemap from its command-line arguments and dependencies.
Each typemap should be specified in an command-line argument of the form
key=value, with an argument of "--start-typemap" preceding each typemap.
For example,
generate_type_mappings.py --output=foo.typemap --start-typemap \\
public_headers=foo.h traits_headers=foo_traits.h \\
type_mappings=mojom.Foo=FooImpl
generates a foo.typemap containing
{
"c++": {
"mojom.Foo": {
"typename": "FooImpl",
"traits_headers": [
"foo_traits.h"
],
"public_headers": [
"foo.h"
]
}
}
}
Then,
generate_type_mappings.py --dependency foo.typemap --output=bar.typemap \\
--start-typemap public_headers=bar.h traits_headers=bar_traits.h \\
type_mappings=mojom.Bar=BarImpl
generates a bar.typemap containing
{
"c++": {
"mojom.Bar": {
"typename": "BarImpl",
"traits_headers": [
"bar_traits.h"
],
"public_headers": [
"bar.h"
]
},
"mojom.Foo": {
"typename": "FooImpl",
"traits_headers": [
"foo_traits.h"
],
"public_headers": [
"foo.h"
]
}
}
}
"""
import argparse
import json
import os
import re
import sys
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "mojom"))
from mojom.generate.generator import WriteFile
def ReadTypemap(path):
with open(path) as f:
return json.load(f)['c++']
def LoadCppTypemapConfig(path):
configs = {}
with open(path) as f:
for config in json.load(f):
for entry in config['types']:
configs[entry['mojom']] = {
'typename': entry['cpp'],
'forward_declaration': entry.get('forward_declaration', None),
'public_headers': config.get('traits_headers', []),
'traits_headers': config.get('traits_private_headers', []),
'copyable_pass_by_value': entry.get('copyable_pass_by_value',
False),
'default_constructible': entry.get('default_constructible', True),
'force_serialize': entry.get('force_serialize', False),
'hashable': entry.get('hashable', False),
'move_only': entry.get('move_only', False),
'nullable_is_same_type': entry.get('nullable_is_same_type', False),
'non_copyable_non_movable': False,
}
return configs
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--dependency',
type=str,
action='append',
default=[],
help=('A path to another JSON typemap to merge into the output. '
'This may be repeated to merge multiple typemaps.'))
parser.add_argument(
'--cpp-typemap-config',
type=str,
action='store',
dest='cpp_config_path',
help=('A path to a single JSON-formatted typemap config as emitted by'
'GN when processing a mojom_cpp_typemap build rule.'))
parser.add_argument('--output',
type=str,
required=True,
help='The path to which to write the generated JSON.')
params, _ = parser.parse_known_args()
typemaps = {}
if params.cpp_config_path:
typemaps = LoadCppTypemapConfig(params.cpp_config_path)
missing = [path for path in params.dependency if not os.path.exists(path)]
if missing:
raise IOError('Missing dependencies: %s' % ', '.join(missing))
for path in params.dependency:
typemaps.update(ReadTypemap(path))
WriteFile(json.dumps({'c++': typemaps}, indent=2), params.output)
if __name__ == '__main__':
main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/README.md | # Mojom Interface Definition Language (IDL)
This document is a subset of the [Mojo documentation](/mojo/README.md).
[TOC]
## Overview
Mojom is the IDL for Mojo interfaces. Given a `.mojom` file, the
[bindings
generator](https://cs.chromium.org/chromium/src/mojo/public/tools/bindings/) can
output bindings for any supported language: **C++**, **JavaScript**, or
**Java**.
For a trivial example consider the following hypothetical Mojom file we write to
`//services/widget/public/mojom/frobinator.mojom`:
```
module widget.mojom;
interface Frobinator {
Frobinate();
};
```
This defines a single [interface](#Interfaces) named `Frobinator` in a
[module](#Modules) named `widget.mojom` (and thus fully qualified in Mojom as
`widget.mojom.Frobinator`.) Note that many interfaces and/or other types of
definitions (structs, enums, *etc.*) may be included in a single Mojom file.
If we add a corresponding GN target to
`//services/widget/public/mojom/BUILD.gn`:
```
import("mojo/public/tools/bindings/mojom.gni")
mojom("mojom") {
sources = [
"frobinator.mojom",
]
}
```
and then build this target:
```
ninja -C out/r services/widget/public/mojom
```
we'll find several generated sources in our output directory:
```
out/r/gen/services/widget/public/mojom/frobinator.mojom.cc
out/r/gen/services/widget/public/mojom/frobinator.mojom.h
out/r/gen/services/widget/public/mojom/frobinator.mojom-shared.h
etc...
```
Each of these generated source modules includes a set of definitions
representing the Mojom contents in C++. You can also build or depend on suffixed
target names to get bindings for other languages. For example,
```
ninja -C out/r services/widget/public/mojom:mojom_js
ninja -C out/r services/widget/public/mojom:mojom_java
```
would generate JavaScript and Java bindings respectively, in the same generated
output directory.
For more details regarding the generated
outputs please see
[documentation for individual target languages](#Generated-Code-For-Target-Languages).
## Mojom Syntax
Mojom IDL allows developers to define **structs**, **unions**, **interfaces**,
**constants**, and **enums**, all within the context of a **module**. These
definitions are used to generate code in the supported target languages at build
time.
Mojom files may **import** other Mojom files in order to reference their
definitions.
### Primitive Types
Mojom supports a few basic data types which may be composed into structs or used
for message parameters.
| Type | Description
|-------------------------------|-------------------------------------------------------|
| `bool` | Boolean type (`true` or `false`.)
| `int8`, `uint8` | Signed or unsigned 8-bit integer.
| `int16`, `uint16` | Signed or unsigned 16-bit integer.
| `int32`, `uint32` | Signed or unsigned 32-bit integer.
| `int64`, `uint64` | Signed or unsigned 64-bit integer.
| `float`, `double` | 32- or 64-bit floating point number.
| `string` | UTF-8 encoded string.
| `array<T>` | Array of any Mojom type *T*; for example, `array<uint8>` or `array<array<string>>`.
| `array<T, N>` | Fixed-length array of any Mojom type *T*. The parameter *N* must be an integral constant.
| `map<S, T>` | Associated array mapping values of type *S* to values of type *T*. *S* may be a `string`, `enum`, or numeric type.
| `handle` | Generic Mojo handle. May be any type of handle, including a wrapped native platform handle.
| `handle<message_pipe>` | Generic message pipe handle.
| `handle<shared_buffer>` | Shared buffer handle.
| `handle<data_pipe_producer>` | Data pipe producer handle.
| `handle<data_pipe_consumer>` | Data pipe consumer handle.
| `handle<platform>` | A native platform/OS handle.
| *`pending_remote<InterfaceType>`* | Any user-defined Mojom interface type. This is sugar for a strongly-typed message pipe handle which should eventually be used to make outgoing calls on the interface.
| *`pending_receiver<InterfaceType>`* | A pending receiver for any user-defined Mojom interface type. This is sugar for a more strongly-typed message pipe handle which is expected to receive request messages and should therefore eventually be bound to an implementation of the interface.
| *`pending_associated_remote<InterfaceType>`* | An associated interface handle. See [Associated Interfaces](#Associated-Interfaces)
| *`pending_associated_receiver<InterfaceType>`* | A pending associated receiver. See [Associated Interfaces](#Associated-Interfaces)
| *T*? | An optional (nullable) value. Primitive numeric types (integers, floats, booleans, and enums) are not nullable. All other types are nullable.
### Modules
Every Mojom file may optionally specify a single **module** to which it belongs.
This is used strictly for aggregating all defined symbols therein within a
common Mojom namespace. The specific impact this has on generated bindings code
varies for each target language. For example, if the following Mojom is used to
generate bindings:
```
module business.stuff;
interface MoneyGenerator {
GenerateMoney();
};
```
Generated C++ bindings will define a class interface `MoneyGenerator` in the
`business::stuff` namespace, while Java bindings will define an interface
`MoneyGenerator` in the `org.chromium.business.stuff` package. JavaScript
bindings at this time are unaffected by module declarations.
**NOTE:** By convention in the Chromium codebase, **all** Mojom files should
declare a module name with at least (and preferably exactly) one top-level name
as well as an inner `mojom` module suffix. *e.g.*, `chrome.mojom`,
`business.mojom`, *etc.*
This convention makes it easy to tell which symbols are generated by Mojom when
reading non-Mojom code, and it also avoids namespace collisions in the fairly
common scenario where you have a real C++ or Java `Foo` along with a
corresponding Mojom `Foo` for its serialized representation.
### Imports
If your Mojom references definitions from other Mojom files, you must **import**
those files. Import syntax is as follows:
```
import "services/widget/public/mojom/frobinator.mojom";
```
Import paths are always relative to the top-level directory.
Note that circular imports are **not** supported.
### Structs
Structs are defined using the **struct** keyword, and they provide a way to
group related fields together:
``` cpp
struct StringPair {
string first;
string second;
};
```
Struct fields may be comprised of any of the types listed above in the
[Primitive Types](#Primitive-Types) section.
Default values may be specified as long as they are constant:
``` cpp
struct Request {
int32 id = -1;
string details;
};
```
What follows is a fairly
comprehensive example using the supported field types:
``` cpp
struct StringPair {
string first;
string second;
};
enum AnEnum {
kYes,
kNo
};
interface SampleInterface {
DoStuff();
};
struct AllTheThings {
// Note that these types can never be marked nullable!
bool boolean_value;
int8 signed_8bit_value = 42;
uint8 unsigned_8bit_value;
int16 signed_16bit_value;
uint16 unsigned_16bit_value;
int32 signed_32bit_value;
uint32 unsigned_32bit_value;
int64 signed_64bit_value;
uint64 unsigned_64bit_value;
float float_value_32bit;
double float_value_64bit;
AnEnum enum_value = AnEnum.kYes;
// Strings may be nullable.
string? maybe_a_string_maybe_not;
// Structs may contain other structs. These may also be nullable.
StringPair some_strings;
StringPair? maybe_some_more_strings;
// In fact structs can also be nested, though in practice you must always make
// such fields nullable -- otherwise messages would need to be infinitely long
// in order to pass validation!
AllTheThings? more_things;
// Arrays may be templated over any Mojom type, and are always nullable:
array<int32> numbers;
array<int32>? maybe_more_numbers;
// Arrays of arrays of arrays... are fine.
array<array<array<AnEnum>>> this_works_but_really_plz_stop;
// The element type may be nullable if it's a type which is allowed to be
// nullable.
array<AllTheThings?> more_maybe_things;
// Fixed-size arrays get some extra validation on the receiving end to ensure
// that the correct number of elements is always received.
array<uint64, 2> uuid;
// Maps follow many of the same rules as arrays. Key types may be any
// non-handle, non-collection type, and value types may be any supported
// struct field type. Maps may also be nullable.
map<string, int32> one_map;
map<AnEnum, string>? maybe_another_map;
map<StringPair, AllTheThings?>? maybe_a_pretty_weird_but_valid_map;
map<StringPair, map<int32, array<map<string, string>?>?>?> ridiculous;
// And finally, all handle types are valid as struct fields and may be
// nullable. Note that interfaces and interface requests (the "Foo" and
// "Foo&" type syntax respectively) are just strongly-typed message pipe
// handles.
handle generic_handle;
handle<data_pipe_consumer> reader;
handle<data_pipe_producer>? maybe_writer;
handle<shared_buffer> dumping_ground;
handle<message_pipe> raw_message_pipe;
pending_remote<SampleInterface>? maybe_a_sample_interface_client_pipe;
pending_receiver<SampleInterface> non_nullable_sample_pending_receiver;
pending_receiver<SampleInterface>? nullable_sample_pending_receiver;
pending_associated_remote<SampleInterface> associated_interface_client;
pending_associated_receiver<SampleInterface> associated_pending_receiver;
pending_associated_receiver<SampleInterface>? maybe_another_pending_receiver;
};
```
For details on how all of these different types translate to usable generated
code, see
[documentation for individual target languages](#Generated-Code-For-Target-Languages).
### Unions
Mojom supports tagged unions using the **union** keyword. A union is a
collection of fields which may take the value of any single one of those fields
at a time. Thus they provide a way to represent a variant value type while
minimizing storage requirements.
Union fields may be of any type supported by [struct](#Structs) fields. For
example:
```cpp
union ExampleUnion {
string str;
StringPair pair;
int64 id;
array<uint64, 2> guid;
SampleInterface iface;
};
```
For details on how unions like this translate to generated bindings code, see
[documentation for individual target languages](#Generated-Code-For-Target-Languages).
### Enumeration Types
Enumeration types may be defined using the **enum** keyword either directly
within a module or nested within the namespace of some struct or interface:
```
module business.mojom;
enum Department {
kSales = 0,
kDev,
};
struct Employee {
enum Type {
kFullTime,
kPartTime,
};
Type type;
// ...
};
```
C++ constant-style enum value names are preferred as specified in the
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html#Enumerator_Names).
Similar to C-style enums, individual values may be explicitly assigned within an
enum definition. By default, values are based at zero and increment by
1 sequentially.
The effect of nested definitions on generated bindings varies depending on the
target language. See [documentation for individual target languages](#Generated-Code-For-Target-Languages).
### Constants
Constants may be defined using the **const** keyword either directly within a
module or nested within the namespace of some struct or interface:
```
module business.mojom;
const string kServiceName = "business";
struct Employee {
const uint64 kInvalidId = 0;
enum Type {
kFullTime,
kPartTime,
};
uint64 id = kInvalidId;
Type type;
};
```
The effect of nested definitions on generated bindings varies depending on the
target language. See [documentation for individual target languages](#Generated-Code-For-Target-Languages).
### Features
Features can be declared with a `name` and `default_state` and can be attached
in mojo to interfaces or methods using the `RuntimeFeature` attribute. If the
feature is disabled at runtime, the method will crash and the interface will
refuse to be bound / instantiated. Features cannot be serialized to be sent over
IPC at this time.
```
module experimental.mojom;
feature kUseElevators {
const string name = "UseElevators";
const bool default_state = false;
}
[RuntimeFeature=kUseElevators]
interface Elevator {
// This interface cannot be bound or called if the feature is disabled.
}
interface Building {
// This method cannot be called if the feature is disabled.
[RuntimeFeature=kUseElevators]
CallElevator(int floor);
// This method can be called.
RingDoorbell(int volume);
}
```
### Interfaces
An **interface** is a logical bundle of parameterized request messages. Each
request message may optionally define a parameterized response message. Here's
an example to define an interface `Foo` with various kinds of requests:
```
interface Foo {
// A request which takes no arguments and expects no response.
MyMessage();
// A request which has some arguments and expects no response.
MyOtherMessage(string name, array<uint8> bytes);
// A request which expects a single-argument response.
MyMessageWithResponse(string command) => (bool success);
// A request which expects a response with multiple arguments.
MyMessageWithMoarResponse(string a, string b) => (int8 c, int8 d);
};
```
Anything which is a valid struct field type (see [Structs](#Structs)) is also a
valid request or response argument type. The type notation is the same for both.
### Attributes
Mojom definitions may have their meaning altered by **attributes**, specified
with a syntax similar to Java or C# attributes. There are a handle of
interesting attributes supported today.
* **`[Sync]`**:
The `Sync` attribute may be specified for any interface method which expects a
response. This makes it so that callers of the method can wait synchronously
for a response. See [Synchronous
Calls](/mojo/public/cpp/bindings/README.md#Synchronous-Calls) in the C++
bindings documentation. Note that sync methods are only actually synchronous
when called from C++.
* **`[NoInterrupt]`**:
When a thread is waiting for a reply to a `Sync` message, it's possible to be
woken up to dispatch other unrelated incoming `Sync` messages. This measure
helps to avoid deadlocks. If a `Sync` message is also marked as `NoInterrupt`
however, this behavior is disabled: instead the calling thread will only wake
up for the precise message being waited upon. This attribute must be used with
extreme caution, because it can lead to deadlocks otherwise.
* **`[Default]`**:
The `Default` attribute may be used to specify an enumerator value or union
field that will be used if an `Extensible` enumeration or union does not
deserialize to a known value on the receiver side, i.e. the sender is using a
newer version of the enum or union. This allows unknown values to be mapped to
a well-defined value that can be appropriately handled.
Note: The `Default` field for a union must be of nullable or integral type.
When a union is defaulted to this field, the field takes on the default value
for its type: null for nullable types, and zero/false for integral types.
* **`[Extensible]`**:
The `Extensible` attribute may be specified for any enum or union definition.
For enums, this essentially disables builtin range validation when receiving
values of the enum type in a message, allowing older bindings to tolerate
unrecognized values from newer versions of the enum.
If an enum value within an extensible enum definition is affixed with the
`Default` attribute, out-of-range values for the enum will deserialize to that
default value. Only one enum value may be designated as the `Default`.
Similarly, a union marked `Extensible` will deserialize to its `Default` field
when an unrecognized field is received. Extensible unions MUST specify exactly
one `Default` field, and the field must be of nullable or integral type. When
defaulted to this field, the value is always null/zero/false as appropriate.
An `Extensible` enumeration REQUIRES that a `Default` value be specified,
so all new extensible enums should specify one.
* **`[Native]`**:
The `Native` attribute may be specified for an empty struct declaration to
provide a nominal bridge between Mojo IPC and legacy `IPC::ParamTraits` or
`IPC_STRUCT_TRAITS*` macros. See [Repurposing Legacy IPC
Traits](/docs/mojo_ipc_conversion.md#repurposing-and-invocations) for more
details. Note support for this attribute is strictly limited to C++ bindings
generation.
* **`[MinVersion=N]`**:
The `MinVersion` attribute is used to specify the version at which a given
field, enum value, interface method, or method parameter was introduced.
See [Versioning](#Versioning) for more details. `MinVersion` does not apply
to interfaces, structs or enums, but to the fields of those types.
`MinVersion` is not a module-global value, but it is ok to pretend it is by
skipping versions when adding fields or parameters.
* **`[Stable]`**:
The `Stable` attribute specifies that a given mojom type or interface
definition can be considered stable over time, meaning it is safe to use for
things like persistent storage or communication between independent
version-skewed binaries. Stable definitions may only depend on builtin mojom
types or other stable definitions, and changes to such definitions MUST
preserve backward-compatibility through appropriate use of versioning.
Backward-compatibility of changes is enforced in the Chromium tree using a
strict presubmit check. See [Versioning](#Versioning) for more details on
backward-compatibility constraints.
* **`[Uuid=<UUID>]`**:
Specifies a UUID to be associated with a given interface. The UUID is intended
to remain stable across all changes to the interface definition, including
name changes. The value given for this attribute should be a standard UUID
string representation as specified by RFC 4122. New UUIDs can be generated
with common tools such as `uuidgen`.
* **`[RuntimeFeature=feature]`**
The `RuntimeFeature` attribute should reference a mojo `feature`. If this
feature is enabled (e.g. using `--enable-features={feature.name}`) then the
interface behaves entirely as expected. If the feature is not enabled the
interface cannot be bound to a concrete receiver or remote - attempting to do
so will result in the receiver or remote being reset() to an unbound state.
Note that this is a different concept to the build-time `EnableIf` directive.
`RuntimeFeature` is currently only supported for C++ bindings and has no
effect for, say, Java or TypeScript bindings (see https://crbug.com/1278253).
* **`[EnableIf=value]`**:
The `EnableIf` attribute is used to conditionally enable definitions when the
mojom is parsed. If the `mojom` target in the GN file does not include the
matching `value` in the list of `enabled_features`, the definition will be
disabled. This is useful for mojom definitions that only make sense on one
platform. Note that the `EnableIf` attribute can only be set once per
definition and cannot be set at the same time as `EnableIfNot`. Also be aware
that only one condition can be tested, `EnableIf=value,xyz` introduces a new
`xyz` attribute. `xyz` is not part of the `EnableIf` condition that depends
only on the feature `value`. Complex conditions can be introduced via
enabled_features in `build.gn` files.
* **`[EnableIfNot=value]`**:
The `EnableIfNot` attribute is used to conditionally enable definitions when
the mojom is parsed. If the `mojom` target in the GN file includes the
matching `value` in the list of `enabled_features`, the definition will be
disabled. This is useful for mojom definitions that only make sense on all but
one platform. Note that the `EnableIfNot` attribute can only be set once per
definition and cannot be set at the same time as `EnableIf`.
* **`[ServiceSandbox=value]`**:
The `ServiceSandbox` attribute is used in Chromium to tag which sandbox a
service hosting an implementation of interface will be launched in. This only
applies to `C++` bindings. `value` should match a constant defined in an
imported `sandbox.mojom.Sandbox` enum (for Chromium this is
`//sandbox/policy/mojom/sandbox.mojom`), such as `kService`.
* **`[RequireContext=enum]`**:
The `RequireContext` attribute is used in Chromium to tag interfaces that
should be passed (as remotes or receivers) only to privileged process
contexts. The process context must be an enum that is imported into the
mojom that defines the tagged interface. `RequireContext` may be used in
future to DCHECK or CHECK if remotes are made available in contexts that
conflict with the one provided in the interface definition. Process contexts
are not the same as the sandbox a process is running in, but will reflect
the set of capabilities provided to the service.
* **`[AllowedContext=enum]`**:
The `AllowedContext` attribute is used in Chromium to tag methods that pass
remotes or receivers of interfaces that are marked with a `RequireContext`
attribute. The enum provided on the method must be equal or better (lower
numerically) than the one required on the interface being passed. At present
failing to specify an adequate `AllowedContext` value will cause mojom
generation to fail at compile time. In future DCHECKs or CHECKs might be
added to enforce that method is only called from a process context that meets
the given `AllowedContext` value. The enum must of the same type as that
specified in the interface's `RequireContext` attribute. Adding an
`AllowedContext` attribute to a method is a strong indication that you need
a detailed security review of your design - please reach out to the security
team.
* **`[SupportsUrgent]`**:
The `SupportsUrgent` attribute is used in conjunction with
`mojo::UrgentMessageScope` in Chromium to tag messages as having high
priority. The IPC layer notifies the underlying scheduler upon both receiving
and processing an urgent message. At present, this attribute only affects
channel associated messages in the renderer process.
## Generated Code For Target Languages
When the bindings generator successfully processes an input Mojom file, it emits
corresponding code for each supported target language. For more details on how
Mojom concepts translate to a given target language, please refer to the
bindings API documentation for that language:
* [C++ Bindings](/mojo/public/cpp/bindings/README.md)
* [JavaScript Bindings](/mojo/public/js/README.md)
* [Java Bindings](/mojo/public/java/bindings/README.md)
## Message Validation
Regardless of target language, all interface messages are validated during
deserialization before they are dispatched to a receiving implementation of the
interface. This helps to ensure consistent validation across interfaces without
leaving the burden to developers and security reviewers every time a new message
is added.
If a message fails validation, it is never dispatched. Instead a **connection
error** is raised on the binding object (see
[C++ Connection Errors](/mojo/public/cpp/bindings/README.md#Connection-Errors),
[Java Connection Errors](/mojo/public/java/bindings/README.md#Connection-Errors),
or
[JavaScript Connection Errors](/mojo/public/js/README.md#Connection-Errors) for
details.)
Some baseline level of validation is done automatically for primitive Mojom
types.
### Non-Nullable Objects
Mojom fields or parameter values (*e.g.*, structs, interfaces, arrays, *etc.*)
may be marked nullable in Mojom definitions (see
[Primitive Types](#Primitive-Types).) If a field or parameter is **not** marked
nullable but a message is received with a null value in its place, that message
will fail validation.
### Enums
Enums declared in Mojom are automatically validated against the range of legal
values. For example if a Mojom declares the enum:
``` cpp
enum AdvancedBoolean {
kTrue = 0,
kFalse = 1,
kFileNotFound = 2,
};
```
and a message is received with the integral value 3 (or anything other than 0,
1, or 2) in place of some `AdvancedBoolean` field or parameter, the message will
fail validation.
*** note
NOTE: It's possible to avoid this type of validation error by explicitly marking
an enum as [Extensible](#Attributes) if you anticipate your enum being exchanged
between two different versions of the binding interface. See
[Versioning](#Versioning).
***
### Other failures
There are a host of internal validation errors that may occur when a malformed
message is received, but developers should not be concerned with these
specifically; in general they can only result from internal bindings bugs,
compromised processes, or some remote endpoint making a dubious effort to
manually encode their own bindings messages.
### Custom Validation
It's also possible for developers to define custom validation logic for specific
Mojom struct types by exploiting the
[type mapping](/mojo/public/cpp/bindings/README.md#Type-Mapping) system for C++
bindings. Messages rejected by custom validation logic trigger the same
validation failure behavior as the built-in type validation routines.
## Associated Interfaces
As mentioned in the [Primitive Types](#Primitive-Types) section above, pending_remote
and pending_receiver fields and parameters may be marked as `associated`. This
essentially means that they are piggy-backed on some other interface's message
pipe.
Because individual interface message pipes operate independently there can be no
relative ordering guarantees among them. Associated interfaces are useful when
one interface needs to guarantee strict FIFO ordering with respect to one or
more other interfaces, as they allow interfaces to share a single pipe.
Currently associated interfaces are only supported in generated C++ bindings.
See the documentation for
[C++ Associated Interfaces](/mojo/public/cpp/bindings/README.md#Associated-Interfaces).
## Versioning
### Overview
*** note
**NOTE:** You don't need to worry about versioning if you don't care about
backwards compatibility. Today, all parts of the Chrome browser are
updated atomically and there is not yet any possibility of any two
Chrome processes communicating with two different versions of any given Mojom
interface. On Chrome OS, there are several places where versioning is required.
For example,
[ARC++](https://developer.android.com/chrome-os/intro)
uses versioned mojo to send IPC to the Android container.
Likewise, the
[Lacros](/docs/lacros.md)
browser uses versioned mojo to talk to the ash system UI.
***
Services extend their interfaces to support new features over time, and clients
want to use those new features when they are available. If services and clients
are not updated at the same time, it's important for them to be able to
communicate with each other using different snapshots (versions) of their
interfaces.
This document shows how to extend Mojom interfaces in a backwards-compatible
way. Changing interfaces in a non-backwards-compatible way is not discussed,
because in that case communication between different interface versions is
impossible anyway.
### Versioned Structs
You can use the `MinVersion` [attribute](#Attributes) to indicate from which
version a struct field is introduced. Assume you have the following struct:
``` cpp
struct Employee {
uint64 employee_id;
string name;
};
```
and you would like to add birthday and nickname fields. You can add them as
optional types with a `MinVersion` like so:
``` cpp
struct Employee {
uint64 employee_id;
string name;
[MinVersion=1] Date? birthday;
[MinVersion=1] string? nickname;
};
```
*** note
**NOTE:** Mojo object or handle types added with a `MinVersion` **MUST** be
optional (nullable) or primitive. See [Primitive Types](#Primitive-Types) for
details on nullable values.
***
By default, fields belong to version 0. New fields must be appended to the
struct definition (*i.e*., existing fields must not change **ordinal value**)
with the `MinVersion` attribute set to a number greater than any previous
existing versions.
The value of `MinVersion` is unrelated to ordinals. The choice of a particular
version number is arbitrary. All its usage means is that a field isn't present
before the numbered version.
*** note
**NOTE:** do not change existing fields in versioned structs, as this is
not backwards-compatible. Instead, rename the old field to make its
deprecation clear and add a new field with a new `MinVersion` number.
***
**Ordinal value** refers to the relative positional layout of a struct's fields
(and an interface's methods) when encoded in a message. Implicitly, ordinal
numbers are assigned to fields according to lexical position. In the example
above, `employee_id` has an ordinal value of 0 and `name` has an ordinal value
of 1.
Ordinal values can be specified explicitly using `**@**` notation, subject to
the following hard constraints:
* For any given struct or interface, if any field or method explicitly specifies
an ordinal value, all fields or methods must explicitly specify an ordinal
value.
* For an *N*-field struct, the set of explicitly assigned ordinal values must be
limited to the range *[0, N-1]*. Structs should include placeholder fields
to fill the ordinal positions of removed fields (for example "Unused_Field"
or "RemovedField", etc).
You may reorder fields, but you must ensure that the ordinal values of existing
fields remain unchanged. For example, the following struct remains
backwards-compatible:
``` cpp
struct Employee {
uint64 employee_id@0;
[MinVersion=1] Date? birthday@2;
string name@1;
[MinVersion=1] string? nickname@3;
};
```
### Versioned Interfaces
There are two dimensions on which an interface can be extended
**Appending New Parameters To Existing Methods**
: Parameter lists are treated as structs internally, so all the rules of
versioned structs apply to method parameter lists. The only difference is
that the version number is scoped to the whole interface rather than to any
individual parameter list.
``` cpp
// Old version:
interface HumanResourceDatabase {
QueryEmployee(uint64 id) => (Employee? employee);
};
// New version:
interface HumanResourceDatabase {
QueryEmployee(uint64 id, [MinVersion=1] bool retrieve_finger_print)
=> (Employee? employee,
[MinVersion=1] array<uint8>? finger_print);
};
```
Similar to [versioned structs](#Versioned-Structs), when you pass the parameter
list of a request or response method to a destination using an older version of
an interface, unrecognized fields are silently discarded.
Please note that adding a response to a message which did not previously
expect a response is a not a backwards-compatible change.
**Appending New Methods**
: Similarly, you can reorder methods with explicit ordinal values as long as
the ordinal values of existing methods are unchanged.
For example:
``` cpp
// Old version:
interface HumanResourceDatabase {
QueryEmployee(uint64 id) => (Employee? employee);
};
// New version:
interface HumanResourceDatabase {
QueryEmployee(uint64 id) => (Employee? employee);
[MinVersion=1]
AttachFingerPrint(uint64 id, array<uint8> finger_print)
=> (bool success);
};
```
If a method call is not recognized, it is considered a validation error and the
receiver will close its end of the interface pipe. For example, if a client on
version 1 of the above interface sends an `AttachFingerPrint` request to an
implementation of version 0, the client will be disconnected.
Bindings target languages that support versioning expose means to query or
assert the remote version from a client handle (*e.g.*, an
`mojo::Remote<T>` in C++ bindings.)
See
[C++ Versioning Considerations](/mojo/public/cpp/bindings/README.md#Versioning-Considerations)
and
[Java Versioning Considerations](/mojo/public/java/bindings/README.md#Versioning-Considerations)
### Versioned Enums
**By default, enums are non-extensible**, which means that generated message
validation code does not expect to see new values in the future. When an unknown
value is seen for a non-extensible enum field or parameter, a validation error
is raised.
If you want an enum to be extensible in the future, you can apply the
`[Extensible]` [attribute](#Attributes):
``` cpp
[Extensible]
enum Department {
kSales,
kDev,
};
```
And later you can extend this enum without breaking backwards compatibility:
``` cpp
[Extensible]
enum Department {
kSales,
kDev,
[MinVersion=1] kResearch,
};
```
*** note
**NOTE:** For versioned enum definitions, the use of a `[MinVersion]` attribute
is strictly for documentation purposes. It has no impact on the generated code.
***
With extensible enums, bound interface implementations may receive unknown enum
values and will need to deal with them gracefully. See
[C++ Versioning Considerations](/mojo/public/cpp/bindings/README.md#Versioning-Considerations)
for details.
### Renaming versioned structs
It's possible to rename versioned structs by using the `[RenamedFrom]` attribute.
RenamedFrom
``` cpp
module asdf.mojom;
// Old version:
[Stable]
struct OldStruct {
};
// New version:
[Stable, RenamedFrom="asdf.mojom.OldStruct"]
struct NewStruct {
};
```
## Component targets
If there are multiple components depending on the same mojom target within one binary,
the target will need to be defined as `mojom_component` instead of `mojom`.
Since `mojom` targets are generated `source_set` targets and `mojom_component` targets
are generated `component` targets, you would use `mojom_component` in the same cases
where you would use `component` for non-mojom files.
*** note
**NOTE**: by default, components for both blink and non-blink bindings are generated.
Use the `disable_variants` target parameter to generate only non-blink bindings.
You can also generate a `source_set` for one of the variants by defining
[export_*](https://source.chromium.org/chromium/chromium/src/+/main:mojo/public/tools/bindings/mojom.gni;drc=739b9fbce50310c1dd2b59c279cd90a9319cb6e8;l=318)
parameters for the `mojom_component` target.
***
## Grammar Reference
Below is the (BNF-ish) context-free grammar of the Mojom language:
```
MojomFile = StatementList
StatementList = Statement StatementList | Statement
Statement = ModuleStatement | ImportStatement | Definition
ModuleStatement = AttributeSection "module" Identifier ";"
ImportStatement = "import" StringLiteral ";"
Definition = Struct Union Interface Enum Feature Const
AttributeSection = <empty> | "[" AttributeList "]"
AttributeList = <empty> | NonEmptyAttributeList
NonEmptyAttributeList = Attribute
| Attribute "," NonEmptyAttributeList
Attribute = Name
| Name "=" Name
| Name "=" Literal
Struct = AttributeSection "struct" Name "{" StructBody "}" ";"
| AttributeSection "struct" Name ";"
StructBody = <empty>
| StructBody Const
| StructBody Enum
| StructBody StructField
StructField = AttributeSection TypeSpec Name Ordinal Default ";"
Union = AttributeSection "union" Name "{" UnionBody "}" ";"
UnionBody = <empty> | UnionBody UnionField
UnionField = AttributeSection TypeSpec Name Ordinal ";"
Interface = AttributeSection "interface" Name "{" InterfaceBody "}" ";"
InterfaceBody = <empty>
| InterfaceBody Const
| InterfaceBody Enum
| InterfaceBody Method
Method = AttributeSection Name Ordinal "(" ParameterList ")" Response ";"
ParameterList = <empty> | NonEmptyParameterList
NonEmptyParameterList = Parameter
| Parameter "," NonEmptyParameterList
Parameter = AttributeSection TypeSpec Name Ordinal
Response = <empty> | "=>" "(" ParameterList ")"
TypeSpec = TypeName "?" | TypeName
TypeName = BasicTypeName
| Array
| FixedArray
| Map
| InterfaceRequest
BasicTypeName = Identifier | "associated" Identifier | HandleType | NumericType
NumericType = "bool" | "int8" | "uint8" | "int16" | "uint16" | "int32"
| "uint32" | "int64" | "uint64" | "float" | "double"
HandleType = "handle" | "handle" "<" SpecificHandleType ">"
SpecificHandleType = "message_pipe"
| "shared_buffer"
| "data_pipe_consumer"
| "data_pipe_producer"
| "platform"
Array = "array" "<" TypeSpec ">"
FixedArray = "array" "<" TypeSpec "," IntConstDec ">"
Map = "map" "<" Identifier "," TypeSpec ">"
InterfaceRequest = Identifier "&" | "associated" Identifier "&"
Ordinal = <empty> | OrdinalValue
Default = <empty> | "=" Constant
Enum = AttributeSection "enum" Name "{" NonEmptyEnumValueList "}" ";"
| AttributeSection "enum" Name "{" NonEmptyEnumValueList "," "}" ";"
NonEmptyEnumValueList = EnumValue | NonEmptyEnumValueList "," EnumValue
EnumValue = AttributeSection Name
| AttributeSection Name "=" Integer
| AttributeSection Name "=" Identifier
; Note: `feature` is a weak keyword and can appear as, say, a struct field name.
Feature = AttributeSection "feature" Name "{" FeatureBody "}" ";"
| AttributeSection "feature" Name ";"
FeatureBody = <empty>
| FeatureBody FeatureField
FeatureField = AttributeSection TypeSpec Name Default ";"
Const = "const" TypeSpec Name "=" Constant ";"
Constant = Literal | Identifier ";"
Identifier = Name | Name "." Identifier
Literal = Integer | Float | "true" | "false" | "default" | StringLiteral
Integer = IntConst | "+" IntConst | "-" IntConst
IntConst = IntConstDec | IntConstHex
Float = FloatConst | "+" FloatConst | "-" FloatConst
; The rules below are for tokens matched strictly according to the given regexes
Identifier = /[a-zA-Z_][0-9a-zA-Z_]*/
IntConstDec = /0|(1-9[0-9]*)/
IntConstHex = /0[xX][0-9a-fA-F]+/
OrdinalValue = /@(0|(1-9[0-9]*))/
FloatConst = ... # Imagine it's close enough to C-style float syntax.
StringLiteral = ... # Imagine it's close enough to C-style string literals, including escapes.
```
## Additional Documentation
[Mojom Message Format](https://docs.google.com/document/d/13pv9cFh5YKuBggDBQ1-AL8VReF-IYpFOFpRfvWFrwio/edit)
: Describes the wire format used by Mojo bindings interfaces over message
pipes.
[Input Format of Mojom Message Validation Tests](https://docs.google.com/document/d/1-y-2IYctyX2NPaLxJjpJfzVNWCC2SR2MJAD9MpIytHQ/edit)
: Describes a text format used to facilitate bindings message validation
tests.
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/concatenate-files.py | #!/usr/bin/env python
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This utility concatenates several files into one. On Unix-like systems
# it is equivalent to:
# cat file1 file2 file3 ...files... > target
#
# The reason for writing a separate utility is that 'cat' is not available
# on all supported build platforms, but Python is, and hence this provides
# us with an easy and uniform way of doing this on all platforms.
# for py2/py3 compatibility
from __future__ import print_function
import optparse
import sys
def Concatenate(filenames):
"""Concatenate files.
Args:
files: Array of file names.
The last name is the target; all earlier ones are sources.
Returns:
True, if the operation was successful.
"""
if len(filenames) < 2:
print("An error occurred generating %s:\nNothing to do." % filenames[-1])
return False
try:
with open(filenames[-1], "wb") as target:
for filename in filenames[:-1]:
with open(filename, "rb") as current:
target.write(current.read())
return True
except IOError as e:
print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
return False
def main():
parser = optparse.OptionParser()
parser.set_usage("""Concatenate several files into one.
Equivalent to: cat file1 ... > target.""")
(_options, args) = parser.parse_args()
sys.exit(0 if Concatenate(args) else 1)
if __name__ == "__main__":
main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/bindings/mojom_bindings_generator.py | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The frontend for the Mojo bindings system."""
from __future__ import print_function
import argparse
import hashlib
import importlib
import json
import os
import pprint
import re
import struct
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
sys.path.insert(
0,
os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "mojom"))
from mojom.error import Error
import mojom.fileutil as fileutil
from mojom.generate.module import Module
from mojom.generate import template_expander
from mojom.generate import translate
from mojom.generate.generator import WriteFile
sys.path.append(
os.path.join(_GetDirAbove("mojo"), "tools", "diagnosis"))
import crbug_1001171
_BUILTIN_GENERATORS = {
"c++": "mojom_cpp_generator",
"javascript": "mojom_js_generator",
"java": "mojom_java_generator",
"mojolpm": "mojom_mojolpm_generator",
"typescript": "mojom_ts_generator",
}
_BUILTIN_CHECKS = {
"attributes": "mojom_attributes_check",
"definitions": "mojom_definitions_check",
"features": "mojom_interface_feature_check",
"restrictions": "mojom_restrictions_check",
}
def LoadGenerators(generators_string):
if not generators_string:
return {} # No generators.
generators = {}
for generator_name in [s.strip() for s in generators_string.split(",")]:
language = generator_name.lower()
if language not in _BUILTIN_GENERATORS:
print("Unknown generator name %s" % generator_name)
sys.exit(1)
generator_module = importlib.import_module(
"generators.%s" % _BUILTIN_GENERATORS[language])
generators[language] = generator_module
return generators
def LoadChecks(checks_string):
if not checks_string:
return {} # No checks.
checks = {}
for check_name in [s.strip() for s in checks_string.split(",")]:
check = check_name.lower()
if check not in _BUILTIN_CHECKS:
print("Unknown check name %s" % check_name)
sys.exit(1)
check_module = importlib.import_module("checks.%s" % _BUILTIN_CHECKS[check])
checks[check] = check_module
return checks
def MakeImportStackMessage(imported_filename_stack):
"""Make a (human-readable) message listing a chain of imports. (Returned
string begins with a newline (if nonempty) and does not end with one.)"""
return ''.join(
reversed(["\n %s was imported by %s" % (a, b) for (a, b) in \
zip(imported_filename_stack[1:], imported_filename_stack)]))
class RelativePath:
"""Represents a path relative to the source tree or generated output dir."""
def __init__(self, path, source_root, output_dir):
self.path = path
if path.startswith(source_root):
self.root = source_root
elif path.startswith(output_dir):
self.root = output_dir
else:
raise Exception("Invalid input path %s" % path)
def relative_path(self):
return os.path.relpath(
os.path.abspath(self.path), os.path.abspath(self.root))
def _GetModulePath(path, output_dir):
return os.path.join(output_dir, path.relative_path() + '-module')
def ScrambleMethodOrdinals(interfaces, salt):
already_generated = set()
for interface in interfaces:
i = 0
already_generated.clear()
for method in interface.methods:
if method.explicit_ordinal is not None:
continue
while True:
i = i + 1
if i == 1000000:
raise Exception("Could not generate %d method ordinals for %s" %
(len(interface.methods), interface.mojom_name))
# Generate a scrambled method.ordinal value. The algorithm doesn't have
# to be very strong, cryptographically. It just needs to be non-trivial
# to guess the results without the secret salt, in order to make it
# harder for a compromised process to send fake Mojo messages.
sha256 = hashlib.sha256(salt)
sha256.update(interface.mojom_name.encode('utf-8'))
sha256.update(str(i).encode('utf-8'))
# Take the first 4 bytes as a little-endian uint32.
ordinal = struct.unpack('<L', sha256.digest()[:4])[0]
# Trim to 31 bits, so it always fits into a Java (signed) int.
ordinal = ordinal & 0x7fffffff
if ordinal in already_generated:
continue
already_generated.add(ordinal)
method.ordinal = ordinal
method.ordinal_comment = (
'The %s value is based on sha256(salt + "%s%d").' %
(ordinal, interface.mojom_name, i))
break
def ReadFileContents(filename):
with open(filename, 'rb') as f:
return f.read()
class MojomProcessor:
"""Takes parsed mojom modules and generates language bindings from them.
Attributes:
_processed_files: {Dict[str, mojom.generate.module.Module]} Mapping from
relative mojom filename paths to the module AST for that mojom file.
"""
def __init__(self, should_generate):
self._should_generate = should_generate
self._processed_files = {}
self._typemap = {}
def LoadTypemaps(self, typemaps):
# Support some very simple single-line comments in typemap JSON.
comment_expr = r"^\s*//.*$"
def no_comments(line):
return not re.match(comment_expr, line)
for filename in typemaps:
with open(filename) as f:
typemaps = json.loads("".join(filter(no_comments, f.readlines())))
for language, typemap in typemaps.items():
language_map = self._typemap.get(language, {})
language_map.update(typemap)
self._typemap[language] = language_map
if 'c++' in self._typemap:
self._typemap['mojolpm'] = self._typemap['c++']
def _GenerateModule(self, args, remaining_args, check_modules,
generator_modules, rel_filename, imported_filename_stack):
# Return the already-generated module.
if rel_filename.path in self._processed_files:
return self._processed_files[rel_filename.path]
if rel_filename.path in imported_filename_stack:
print("%s: Error: Circular dependency" % rel_filename.path + \
MakeImportStackMessage(imported_filename_stack + [rel_filename.path]))
sys.exit(1)
module_path = _GetModulePath(rel_filename, args.output_dir)
with open(module_path, 'rb') as f:
module = Module.Load(f)
if args.scrambled_message_id_salt_paths:
salt = b''.join(
map(ReadFileContents, args.scrambled_message_id_salt_paths))
ScrambleMethodOrdinals(module.interfaces, salt)
if self._should_generate(rel_filename.path):
# Run checks on module first.
for check_module in check_modules.values():
checker = check_module.Check(module)
checker.CheckModule()
# Then run generation.
for language, generator_module in generator_modules.items():
generator = generator_module.Generator(
module, args.output_dir, typemap=self._typemap.get(language, {}),
variant=args.variant, bytecode_path=args.bytecode_path,
for_blink=args.for_blink,
js_generate_struct_deserializers=\
args.js_generate_struct_deserializers,
export_attribute=args.export_attribute,
export_header=args.export_header,
generate_non_variant_code=args.generate_non_variant_code,
support_lazy_serialization=args.support_lazy_serialization,
disallow_native_types=args.disallow_native_types,
disallow_interfaces=args.disallow_interfaces,
generate_message_ids=args.generate_message_ids,
generate_fuzzing=args.generate_fuzzing,
enable_kythe_annotations=args.enable_kythe_annotations,
extra_cpp_template_paths=args.extra_cpp_template_paths,
generate_extra_cpp_only=args.generate_extra_cpp_only)
filtered_args = []
if hasattr(generator_module, 'GENERATOR_PREFIX'):
prefix = '--' + generator_module.GENERATOR_PREFIX + '_'
filtered_args = [arg for arg in remaining_args
if arg.startswith(prefix)]
generator.GenerateFiles(filtered_args)
# Save result.
self._processed_files[rel_filename.path] = module
return module
def _Generate(args, remaining_args):
if args.variant == "none":
args.variant = None
for idx, import_dir in enumerate(args.import_directories):
tokens = import_dir.split(":")
if len(tokens) >= 2:
args.import_directories[idx] = RelativePath(tokens[0], tokens[1],
args.output_dir)
else:
args.import_directories[idx] = RelativePath(tokens[0], args.depth,
args.output_dir)
generator_modules = LoadGenerators(args.generators_string)
check_modules = LoadChecks(args.checks_string)
fileutil.EnsureDirectoryExists(args.output_dir)
processor = MojomProcessor(lambda filename: filename in args.filename)
processor.LoadTypemaps(set(args.typemaps))
if args.filelist:
with open(args.filelist) as f:
args.filename.extend(f.read().split())
for filename in args.filename:
processor._GenerateModule(
args, remaining_args, check_modules, generator_modules,
RelativePath(filename, args.depth, args.output_dir), [])
return 0
def _Precompile(args, _):
generator_modules = LoadGenerators(",".join(_BUILTIN_GENERATORS.keys()))
template_expander.PrecompileTemplates(generator_modules, args.output_dir)
return 0
def main():
parser = argparse.ArgumentParser(
description="Generate bindings from mojom files.")
parser.add_argument("--use_bundled_pylibs", action="store_true",
help="use Python modules bundled in the SDK")
parser.add_argument(
"-o",
"--output_dir",
dest="output_dir",
default=".",
help="output directory for generated files")
subparsers = parser.add_subparsers()
generate_parser = subparsers.add_parser(
"generate", description="Generate bindings from mojom files.")
generate_parser.add_argument("filename", nargs="*",
help="mojom input file")
generate_parser.add_argument("--filelist", help="mojom input file list")
generate_parser.add_argument("-d", "--depth", dest="depth", default=".",
help="depth from source root")
generate_parser.add_argument("-g",
"--generators",
dest="generators_string",
metavar="GENERATORS",
default="c++,javascript,java,mojolpm",
help="comma-separated list of generators")
generate_parser.add_argument("-c",
"--checks",
dest="checks_string",
metavar="CHECKS",
default=",".join(_BUILTIN_CHECKS.keys()),
help="comma-separated list of checks")
generate_parser.add_argument(
"--gen_dir", dest="gen_directories", action="append", metavar="directory",
default=[], help="add a directory to be searched for the syntax trees.")
generate_parser.add_argument(
"-I", dest="import_directories", action="append", metavar="directory",
default=[],
help="add a directory to be searched for import files. The depth from "
"source root can be specified for each import by appending it after "
"a colon")
generate_parser.add_argument("--typemap", action="append", metavar="TYPEMAP",
default=[], dest="typemaps",
help="apply TYPEMAP to generated output")
generate_parser.add_argument("--variant", dest="variant", default=None,
help="output a named variant of the bindings")
generate_parser.add_argument(
"--bytecode_path", required=True, help=(
"the path from which to load template bytecode; to generate template "
"bytecode, run %s precompile BYTECODE_PATH" % os.path.basename(
sys.argv[0])))
generate_parser.add_argument("--for_blink", action="store_true",
help="Use WTF types as generated types for mojo "
"string/array/map.")
generate_parser.add_argument(
"--js_generate_struct_deserializers", action="store_true",
help="Generate javascript deserialize methods for structs in "
"mojom-lite.js file")
generate_parser.add_argument(
"--export_attribute", default="",
help="Optional attribute to specify on class declaration to export it "
"for the component build.")
generate_parser.add_argument(
"--export_header", default="",
help="Optional header to include in the generated headers to support the "
"component build.")
generate_parser.add_argument(
"--generate_non_variant_code", action="store_true",
help="Generate code that is shared by different variants.")
generate_parser.add_argument(
"--scrambled_message_id_salt_path",
dest="scrambled_message_id_salt_paths",
help="If non-empty, the path to a file whose contents should be used as"
"a salt for generating scrambled message IDs. If this switch is specified"
"more than once, the contents of all salt files are concatenated to form"
"the salt value.", default=[], action="append")
generate_parser.add_argument(
"--support_lazy_serialization",
help="If set, generated bindings will serialize lazily when possible.",
action="store_true")
generate_parser.add_argument(
"--extra_cpp_template_paths",
dest="extra_cpp_template_paths",
action="append",
metavar="path_to_template",
default=[],
help="Provide a path to a new template (.tmpl) that is used to generate "
"additional C++ source/header files ")
generate_parser.add_argument(
"--generate_extra_cpp_only",
help="If set and extra_cpp_template_paths provided, will only generate"
"extra_cpp_template related C++ bindings",
action="store_true")
generate_parser.add_argument(
"--disallow_native_types",
help="Disallows the [Native] attribute to be specified on structs or "
"enums within the mojom file.", action="store_true")
generate_parser.add_argument(
"--disallow_interfaces",
help="Disallows interface definitions within the mojom file. It is an "
"error to specify this flag when processing a mojom file which defines "
"any interface.", action="store_true")
generate_parser.add_argument(
"--generate_message_ids",
help="Generates only the message IDs header for C++ bindings. Note that "
"this flag only matters if --generate_non_variant_code is also "
"specified.", action="store_true")
generate_parser.add_argument(
"--generate_fuzzing",
action="store_true",
help="Generates additional bindings for fuzzing in JS.")
generate_parser.add_argument(
"--enable_kythe_annotations",
action="store_true",
help="Adds annotations for kythe metadata generation.")
generate_parser.set_defaults(func=_Generate)
precompile_parser = subparsers.add_parser("precompile",
description="Precompile templates for the mojom bindings generator.")
precompile_parser.set_defaults(func=_Precompile)
args, remaining_args = parser.parse_known_args()
return args.func(args, remaining_args)
if __name__ == "__main__":
with crbug_1001171.DumpStateOnLookupError():
ret = main()
# Exit without running GC, which can save multiple seconds due to the large
# number of object created. But flush is necessary as os._exit doesn't do
# that.
sys.stdout.flush()
sys.stderr.flush()
os._exit(ret)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_check.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validate RequireContext and AllowedContext annotations before generation."""
import mojom.generate.check as check
import mojom.generate.module as module
class Check(check.Check):
def __init__(self, *args, **kwargs):
self.kind_to_interfaces = dict()
super(Check, self).__init__(*args, **kwargs)
def _IsPassedInterface(self, candidate):
if isinstance(
candidate.kind,
(module.PendingReceiver, module.PendingRemote,
module.PendingAssociatedReceiver, module.PendingAssociatedRemote)):
return True
return False
def _CheckInterface(self, method, param):
# |param| is a pending_x<Interface> so need .kind.kind to get Interface.
interface = param.kind.kind
if interface.require_context:
if method.allowed_context is None:
raise check.CheckException(
self.module, "method `{}` has parameter `{}` which passes interface"
" `{}` that requires an AllowedContext annotation but none exists.".
format(
method.mojom_name,
param.mojom_name,
interface.mojom_name,
))
# If a string was provided, or if an enum was not imported, this will
# be a string and we cannot validate that it is in range.
if not isinstance(method.allowed_context, module.EnumValue):
raise check.CheckException(
self.module,
"method `{}` has AllowedContext={} which is not a valid enum value."
.format(method.mojom_name, method.allowed_context))
# EnumValue must be from the same enum to be compared.
if interface.require_context.enum != method.allowed_context.enum:
raise check.CheckException(
self.module, "method `{}` has parameter `{}` which passes interface"
" `{}` that requires AllowedContext={} but one of kind `{}` was "
"provided.".format(
method.mojom_name,
param.mojom_name,
interface.mojom_name,
interface.require_context.enum,
method.allowed_context.enum,
))
# RestrictContext enums have most privileged field first (lowest value).
interface_value = interface.require_context.field.numeric_value
method_value = method.allowed_context.field.numeric_value
if interface_value < method_value:
raise check.CheckException(
self.module, "RequireContext={} > AllowedContext={} for method "
"`{}` which passes interface `{}`.".format(
interface.require_context.GetSpec(),
method.allowed_context.GetSpec(), method.mojom_name,
interface.mojom_name))
return True
def _GatherReferencedInterfaces(self, field):
key = field.kind.spec
# structs/unions can nest themselves so we need to bookkeep.
if not key in self.kind_to_interfaces:
# Might reference ourselves so have to create the list first.
self.kind_to_interfaces[key] = set()
for param in field.kind.fields:
if self._IsPassedInterface(param):
self.kind_to_interfaces[key].add(param)
elif isinstance(param.kind, (module.Struct, module.Union)):
for iface in self._GatherReferencedInterfaces(param):
self.kind_to_interfaces[key].add(iface)
return self.kind_to_interfaces[key]
def _CheckParams(self, method, params):
# Note: we have to repeat _CheckParams for each method as each might have
# different AllowedContext= attributes. We cannot memoize this function,
# but can do so for gathering referenced interfaces as their RequireContext
# attributes do not change.
for param in params:
if self._IsPassedInterface(param):
self._CheckInterface(method, param)
elif isinstance(param.kind, (module.Struct, module.Union)):
for interface in self._GatherReferencedInterfaces(param):
self._CheckInterface(method, interface)
def _CheckMethod(self, method):
if method.parameters:
self._CheckParams(method, method.parameters)
if method.response_parameters:
self._CheckParams(method, method.response_parameters)
def CheckModule(self):
for interface in self.module.interfaces:
for method in interface.methods:
self._CheckMethod(method)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validate mojo attributes are allowed in Chrome before generation."""
import mojom.generate.check as check
import mojom.generate.module as module
_COMMON_ATTRIBUTES = {
'EnableIf',
'EnableIfNot',
}
# For struct, union & parameter lists.
_COMMON_FIELD_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'MinVersion',
'RenamedFrom',
}
# Note: `Default`` goes on the default _value_, not on the enum.
# Note: [Stable] without [Extensible] is not allowed.
_ENUM_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'Extensible',
'Native',
'Stable',
'RenamedFrom',
'Uuid',
}
# TODO(crbug.com/1234883) MinVersion is not needed for EnumVal.
_ENUMVAL_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'Default',
'MinVersion',
}
_INTERFACE_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'RenamedFrom',
'RequireContext',
'RuntimeFeature',
'ServiceSandbox',
'Stable',
'Uuid',
}
_METHOD_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'AllowedContext',
'MinVersion',
'NoInterrupt',
'RuntimeFeature',
'SupportsUrgent',
'Sync',
'UnlimitedSize',
}
_MODULE_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'JavaConstantsClassName',
'JavaPackage',
}
_PARAMETER_ATTRIBUTES = _COMMON_FIELD_ATTRIBUTES
_STRUCT_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'CustomSerializer',
'JavaClassName',
'Native',
'Stable',
'RenamedFrom',
'Uuid',
}
_STRUCT_FIELD_ATTRIBUTES = _COMMON_FIELD_ATTRIBUTES
_UNION_ATTRIBUTES = _COMMON_ATTRIBUTES | {
'Extensible',
'Stable',
'RenamedFrom',
'Uuid',
}
_UNION_FIELD_ATTRIBUTES = _COMMON_FIELD_ATTRIBUTES | {
'Default',
}
# TODO(https://crbug.com/1193875) empty this set and remove the allowlist.
_STABLE_ONLY_ALLOWLISTED_ENUMS = {
'crosapi.mojom.OptionalBool',
'crosapi.mojom.TriState',
}
class Check(check.Check):
def __init__(self, *args, **kwargs):
super(Check, self).__init__(*args, **kwargs)
def _Respell(self, allowed, attribute):
for a in allowed:
if a.lower() == attribute.lower():
return f" - Did you mean: {a}?"
return ""
def _CheckAttributes(self, context, allowed, attributes):
if not attributes:
return
for attribute in attributes:
if not attribute in allowed:
# Is there a close misspelling?
hint = self._Respell(allowed, attribute)
raise check.CheckException(
self.module,
f"attribute {attribute} not allowed on {context}{hint}")
def _CheckEnumAttributes(self, enum):
if enum.attributes:
self._CheckAttributes("enum", _ENUM_ATTRIBUTES, enum.attributes)
if 'Stable' in enum.attributes and not 'Extensible' in enum.attributes:
full_name = f"{self.module.mojom_namespace}.{enum.mojom_name}"
if full_name not in _STABLE_ONLY_ALLOWLISTED_ENUMS:
raise check.CheckException(
self.module,
f"[Extensible] required on [Stable] enum {full_name}")
for enumval in enum.fields:
self._CheckAttributes("enum value", _ENUMVAL_ATTRIBUTES,
enumval.attributes)
def _CheckInterfaceAttributes(self, interface):
self._CheckAttributes("interface", _INTERFACE_ATTRIBUTES,
interface.attributes)
for method in interface.methods:
self._CheckAttributes("method", _METHOD_ATTRIBUTES, method.attributes)
for param in method.parameters:
self._CheckAttributes("parameter", _PARAMETER_ATTRIBUTES,
param.attributes)
if method.response_parameters:
for param in method.response_parameters:
self._CheckAttributes("parameter", _PARAMETER_ATTRIBUTES,
param.attributes)
for enum in interface.enums:
self._CheckEnumAttributes(enum)
def _CheckModuleAttributes(self):
self._CheckAttributes("module", _MODULE_ATTRIBUTES, self.module.attributes)
def _CheckStructAttributes(self, struct):
self._CheckAttributes("struct", _STRUCT_ATTRIBUTES, struct.attributes)
for field in struct.fields:
self._CheckAttributes("struct field", _STRUCT_FIELD_ATTRIBUTES,
field.attributes)
for enum in struct.enums:
self._CheckEnumAttributes(enum)
def _CheckUnionAttributes(self, union):
self._CheckAttributes("union", _UNION_ATTRIBUTES, union.attributes)
for field in union.fields:
self._CheckAttributes("union field", _UNION_FIELD_ATTRIBUTES,
field.attributes)
def CheckModule(self):
"""Note that duplicate attributes are forbidden at the parse phase.
We also do not need to look at the types of any parameters, as they will be
checked where they are defined. Consts do not have attributes so can be
skipped."""
self._CheckModuleAttributes()
for interface in self.module.interfaces:
self._CheckInterfaceAttributes(interface)
for enum in self.module.enums:
self._CheckEnumAttributes(enum)
for struct in self.module.structs:
self._CheckStructAttributes(struct)
for union in self.module.unions:
self._CheckUnionAttributes(union)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check_unittest.py | # Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mojom.generate.check as check
from mojom_bindings_generator import LoadChecks, _Generate
from mojom_parser_test_case import MojomParserTestCase
class FakeArgs:
"""Fakes args to _Generate - intention is to do just enough to run checks"""
def __init__(self, tester, files=None):
""" `tester` is MojomParserTestCase for paths.
`files` will have tester path added."""
self.checks_string = 'features'
self.depth = tester.GetPath('')
self.filelist = None
self.filename = [tester.GetPath(x) for x in files]
self.gen_directories = tester.GetPath('gen')
self.generators_string = ''
self.import_directories = []
self.output_dir = tester.GetPath('out')
self.scrambled_message_id_salt_paths = None
self.typemaps = []
self.variant = 'none'
class MojoBindingsCheckTest(MojomParserTestCase):
def _ParseAndGenerate(self, mojoms):
self.ParseMojoms(mojoms)
args = FakeArgs(self, files=mojoms)
_Generate(args, {})
def assertValid(self, filename, content):
self.WriteFile(filename, content)
self._ParseAndGenerate([filename])
def assertThrows(self, filename, content, regexp):
mojoms = []
self.WriteFile(filename, content)
mojoms.append(filename)
with self.assertRaisesRegexp(check.CheckException, regexp):
self._ParseAndGenerate(mojoms)
def testLoads(self):
"""Validate that the check is registered under the expected name."""
check_modules = LoadChecks('features')
self.assertTrue(check_modules['features'])
def testNullableOk(self):
self.assertValid(
"a.mojom", """
module a;
// Scaffolding.
feature kFeature {
const string name = "Hello";
const bool enabled_state = false;
};
[RuntimeFeature=kFeature]
interface Guarded {
};
// Unguarded interfaces should be ok everywhere.
interface NotGuarded { };
// Optional (nullable) interfaces should be ok everywhere:
struct Bar {
pending_remote<Guarded>? remote;
pending_receiver<Guarded>? receiver;
};
union Thingy {
pending_remote<Guarded>? remote;
pending_receiver<Guarded>? receiver;
};
interface Foo {
Foo(
pending_remote<Guarded>? remote,
pending_receiver<Guarded>? receiver,
pending_associated_remote<Guarded>? a_remote,
pending_associated_receiver<Guarded>? a_receiver,
// Unguarded interfaces do not have to be nullable.
pending_remote<NotGuarded> remote,
pending_receiver<NotGuarded> receiver,
pending_associated_remote<NotGuarded> a_remote,
pending_associated_receiver<NotGuarded> a_receiver
) => (
pending_remote<Guarded>? remote,
pending_receiver<Guarded>? receiver
);
Bar(array<pending_remote<Guarded>?> remote)
=> (map<string, pending_receiver<Guarded>?> a);
};
""")
def testMethodParamsMustBeNullable(self):
prelude = """
module a;
// Scaffolding.
feature kFeature {
const string name = "Hello";
const bool enabled_state = false;
};
[RuntimeFeature=kFeature]
interface Guarded { };
"""
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(pending_remote<Guarded> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(bool foo) => (pending_receiver<Guarded> a);
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(pending_receiver<Guarded> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(pending_associated_remote<Guarded> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(pending_associated_receiver<Guarded> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(array<pending_associated_receiver<Guarded>> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
interface Trial {
Method(map<string, pending_associated_receiver<Guarded>> a) => ();
};
""", 'interface Guarded has a RuntimeFeature')
def testStructUnionMembersMustBeNullable(self):
prelude = """
module a;
// Scaffolding.
feature kFeature {
const string name = "Hello";
const bool enabled_state = false;
};
[RuntimeFeature=kFeature]
interface Guarded { };
"""
self.assertThrows(
'a.mojom', prelude + """
struct Trial {
pending_remote<Guarded> a;
};
""", 'interface Guarded has a RuntimeFeature')
self.assertThrows(
'a.mojom', prelude + """
union Trial {
pending_remote<Guarded> a;
};
""", 'interface Guarded has a RuntimeFeature')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_attributes_check_unittest.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mojom.generate.check as check
from mojom_bindings_generator import LoadChecks, _Generate
from mojom_parser_test_case import MojomParserTestCase
class FakeArgs:
"""Fakes args to _Generate - intention is to do just enough to run checks"""
def __init__(self, tester, files=None):
""" `tester` is MojomParserTestCase for paths.
`files` will have tester path added."""
self.checks_string = 'attributes'
self.depth = tester.GetPath('')
self.filelist = None
self.filename = [tester.GetPath(x) for x in files]
self.gen_directories = tester.GetPath('gen')
self.generators_string = ''
self.import_directories = []
self.output_dir = tester.GetPath('out')
self.scrambled_message_id_salt_paths = None
self.typemaps = []
self.variant = 'none'
class MojoBindingsCheckTest(MojomParserTestCase):
def _ParseAndGenerate(self, mojoms):
self.ParseMojoms(mojoms)
args = FakeArgs(self, files=mojoms)
_Generate(args, {})
def _testValid(self, filename, content):
self.WriteFile(filename, content)
self._ParseAndGenerate([filename])
def _testThrows(self, filename, content, regexp):
mojoms = []
self.WriteFile(filename, content)
mojoms.append(filename)
with self.assertRaisesRegexp(check.CheckException, regexp):
self._ParseAndGenerate(mojoms)
def testLoads(self):
"""Validate that the check is registered under the expected name."""
check_modules = LoadChecks('attributes')
self.assertTrue(check_modules['attributes'])
def testNoAnnotations(self):
# Undecorated mojom should be fine.
self._testValid(
"a.mojom", """
module a;
struct Bar { int32 a; };
enum Hello { kValue };
union Thingy { Bar b; Hello hi; };
interface Foo {
Foo(int32 a, Hello hi, Thingy t) => (Bar b);
};
""")
def testValidAnnotations(self):
# Obviously this is meaningless and won't generate, but it should pass
# the attribute check's validation.
self._testValid(
"a.mojom", """
[JavaConstantsClassName="FakeClass",JavaPackage="org.chromium.Fake"]
module a;
[Stable, Extensible]
enum Hello { [Default] kValue, kValue2, [MinVersion=2] kValue3 };
[Native]
enum NativeEnum {};
[Stable,Extensible]
union Thingy { Bar b; [Default]int32 c; Hello hi; };
[Stable,RenamedFrom="module.other.Foo",
Uuid="4C178401-4B07-4C2E-9255-5401A943D0C7"]
struct Structure { Hello hi; };
[ServiceSandbox=Hello.kValue,RequireContext=Hello.kValue,Stable,
Uuid="2F17D7DD-865A-4B1C-9394-9C94E035E82F"]
interface Foo {
[AllowedContext=Hello.kValue]
Foo@0(int32 a) => (int32 b);
[MinVersion=2,Sync,UnlimitedSize,NoInterrupt]
Bar@1(int32 b, [MinVersion=2]Structure? s) => (bool c);
};
[RuntimeFeature=test.mojom.FeatureName]
interface FooFeatureControlled {};
interface FooMethodFeatureControlled {
[RuntimeFeature=test.mojom.FeatureName]
MethodWithFeature() => (bool c);
};
""")
def testWrongModuleStable(self):
contents = """
// err: module cannot be Stable
[Stable]
module a;
enum Hello { kValue, kValue2, kValue3 };
enum NativeEnum {};
struct Structure { Hello hi; };
interface Foo {
Foo(int32 a) => (int32 b);
Bar(int32 b, Structure? s) => (bool c);
};
"""
self._testThrows('b.mojom', contents,
'attribute Stable not allowed on module')
def testWrongEnumDefault(self):
contents = """
module a;
// err: default should go on EnumValue not Enum.
[Default=kValue]
enum Hello { kValue, kValue2, kValue3 };
enum NativeEnum {};
struct Structure { Hello hi; };
interface Foo {
Foo(int32 a) => (int32 b);
Bar(int32 b, Structure? s) => (bool c);
};
"""
self._testThrows('b.mojom', contents,
'attribute Default not allowed on enum')
def testWrongStructMinVersion(self):
contents = """
module a;
enum Hello { kValue, kValue2, kValue3 };
enum NativeEnum {};
// err: struct cannot have MinVersion.
[MinVersion=2]
struct Structure { Hello hi; };
interface Foo {
Foo(int32 a) => (int32 b);
Bar(int32 b, Structure? s) => (bool c);
};
"""
self._testThrows('b.mojom', contents,
'attribute MinVersion not allowed on struct')
def testWrongMethodRequireContext(self):
contents = """
module a;
enum Hello { kValue, kValue2, kValue3 };
enum NativeEnum {};
struct Structure { Hello hi; };
interface Foo {
// err: RequireContext is for interfaces.
[RequireContext=Hello.kValue]
Foo(int32 a) => (int32 b);
Bar(int32 b, Structure? s) => (bool c);
};
"""
self._testThrows('b.mojom', contents,
'RequireContext not allowed on method')
def testWrongMethodRequireContext(self):
# crbug.com/1230122
contents = """
module a;
interface Foo {
// err: sync not Sync.
[sync]
Foo(int32 a) => (int32 b);
};
"""
self._testThrows('b.mojom', contents,
'attribute sync not allowed.*Did you mean: Sync')
def testStableExtensibleEnum(self):
# crbug.com/1193875
contents = """
module a;
[Stable]
enum Foo {
kDefaultVal,
kOtherVal = 2,
};
"""
self._testThrows('a.mojom', contents,
'Extensible.*?required.*?Stable.*?enum')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_definitions_check.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Ensure no duplicate type definitions before generation."""
import mojom.generate.check as check
import mojom.generate.module as module
class Check(check.Check):
def __init__(self, *args, **kwargs):
super(Check, self).__init__(*args, **kwargs)
def CheckModule(self):
kinds = dict()
for module in self.module.imports:
for kind in module.enums + module.structs + module.unions:
kind_name = f'{kind.module.mojom_namespace}.{kind.mojom_name}'
if kind_name in kinds:
previous_module = kinds[kind_name]
if previous_module.path != module.path:
raise check.CheckException(
self.module, f"multiple-definition for type {kind_name}" +
f"(defined in both {previous_module} and {module})")
kinds[kind_name] = kind.module
for kind in self.module.enums + self.module.structs + self.module.unions:
kind_name = f'{kind.module.mojom_namespace}.{kind.mojom_name}'
if kind_name in kinds:
previous_module = kinds[kind_name]
raise check.CheckException(
self.module, f"multiple-definition for type {kind_name}" +
f"(previous definition in {previous_module})")
return True
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_restrictions_checks_unittest.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mojom.generate.check as check
from mojom_bindings_generator import LoadChecks, _Generate
from mojom_parser_test_case import MojomParserTestCase
# Mojoms that we will use in multiple tests.
basic_mojoms = {
'level.mojom':
"""
module level;
enum Level {
kHighest,
kMiddle,
kLowest,
};
""",
'interfaces.mojom':
"""
module interfaces;
import "level.mojom";
struct Foo {int32 bar;};
[RequireContext=level.Level.kHighest]
interface High {
DoFoo(Foo foo);
};
[RequireContext=level.Level.kMiddle]
interface Mid {
DoFoo(Foo foo);
};
[RequireContext=level.Level.kLowest]
interface Low {
DoFoo(Foo foo);
};
"""
}
class FakeArgs:
"""Fakes args to _Generate - intention is to do just enough to run checks"""
def __init__(self, tester, files=None):
""" `tester` is MojomParserTestCase for paths.
`files` will have tester path added."""
self.checks_string = 'restrictions'
self.depth = tester.GetPath('')
self.filelist = None
self.filename = [tester.GetPath(x) for x in files]
self.gen_directories = tester.GetPath('gen')
self.generators_string = ''
self.import_directories = []
self.output_dir = tester.GetPath('out')
self.scrambled_message_id_salt_paths = None
self.typemaps = []
self.variant = 'none'
class MojoBindingsCheckTest(MojomParserTestCase):
def _WriteBasicMojoms(self):
for filename, contents in basic_mojoms.items():
self.WriteFile(filename, contents)
return list(basic_mojoms.keys())
def _ParseAndGenerate(self, mojoms):
self.ParseMojoms(mojoms)
args = FakeArgs(self, files=mojoms)
_Generate(args, {})
def testLoads(self):
"""Validate that the check is registered under the expected name."""
check_modules = LoadChecks('restrictions')
self.assertTrue(check_modules['restrictions'])
def testValidAnnotations(self):
mojoms = self._WriteBasicMojoms()
a = 'a.mojom'
self.WriteFile(
a, """
module a;
import "level.mojom";
import "interfaces.mojom";
interface PassesHigh {
[AllowedContext=level.Level.kHighest]
DoHigh(pending_receiver<interfaces.High> hi);
};
interface PassesMedium {
[AllowedContext=level.Level.kMiddle]
DoMedium(pending_receiver<interfaces.Mid> hi);
[AllowedContext=level.Level.kMiddle]
DoMediumRem(pending_remote<interfaces.Mid> hi);
[AllowedContext=level.Level.kMiddle]
DoMediumAssoc(pending_associated_receiver<interfaces.Mid> hi);
[AllowedContext=level.Level.kMiddle]
DoMediumAssocRem(pending_associated_remote<interfaces.Mid> hi);
};
interface PassesLow {
[AllowedContext=level.Level.kLowest]
DoLow(pending_receiver<interfaces.Low> hi);
};
struct One { pending_receiver<interfaces.High> hi; };
struct Two { One one; };
interface PassesNestedHigh {
[AllowedContext=level.Level.kHighest]
DoNestedHigh(Two two);
};
// Allowed as PassesHigh is not itself restricted.
interface PassesPassesHigh {
DoPass(pending_receiver<PassesHigh> hiho);
};
""")
mojoms.append(a)
self._ParseAndGenerate(mojoms)
def _testThrows(self, filename, content, regexp):
mojoms = self._WriteBasicMojoms()
self.WriteFile(filename, content)
mojoms.append(filename)
with self.assertRaisesRegexp(check.CheckException, regexp):
self._ParseAndGenerate(mojoms)
def testMissingAnnotation(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
interface PassesHigh {
// err: missing annotation.
DoHigh(pending_receiver<interfaces.High> hi);
};
"""
self._testThrows('b.mojom', contents, 'require.*?AllowedContext')
def testAllowTooLow(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
interface PassesHigh {
// err: level is worse than required.
[AllowedContext=level.Level.kMiddle]
DoHigh(pending_receiver<interfaces.High> hi);
};
"""
self._testThrows('b.mojom', contents,
'RequireContext=.*?kHighest > AllowedContext=.*?kMiddle')
def testWrongEnumInAllow(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
enum Blah {
kZero,
};
interface PassesHigh {
// err: different enums.
[AllowedContext=Blah.kZero]
DoHigh(pending_receiver<interfaces.High> hi);
};
"""
self._testThrows('b.mojom', contents, 'but one of kind')
def testNotAnEnumInAllow(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
interface PassesHigh {
// err: not an enum.
[AllowedContext=doopdedoo.mojom.kWhatever]
DoHigh(pending_receiver<interfaces.High> hi);
};
"""
self._testThrows('b.mojom', contents, 'not a valid enum value')
def testMissingAllowedForNestedStructs(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
struct One { pending_receiver<interfaces.High> hi; };
struct Two { One one; };
interface PassesNestedHigh {
// err: missing annotation.
DoNestedHigh(Two two);
};
"""
self._testThrows('b.mojom', contents, 'require.*?AllowedContext')
def testMissingAllowedForNestedUnions(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
struct One { pending_receiver<interfaces.High> hi; };
struct Two { One one; };
union Three {One one; Two two; };
interface PassesNestedHigh {
// err: missing annotation.
DoNestedHigh(Three three);
};
"""
self._testThrows('b.mojom', contents, 'require.*?AllowedContext')
def testMultipleInterfacesThrows(self):
contents = """
module b;
import "level.mojom";
import "interfaces.mojom";
struct One { pending_receiver<interfaces.High> hi; };
interface PassesMultipleInterfaces {
[AllowedContext=level.Level.kMiddle]
DoMultiple(
pending_remote<interfaces.Mid> mid,
pending_receiver<interfaces.High> hi,
One one
);
};
"""
self._testThrows('b.mojom', contents,
'RequireContext=.*?kHighest > AllowedContext=.*?kMiddle')
def testMultipleInterfacesAllowed(self):
"""Multiple interfaces can be passed, all satisfy the level."""
mojoms = self._WriteBasicMojoms()
b = "b.mojom"
self.WriteFile(
b, """
module b;
import "level.mojom";
import "interfaces.mojom";
struct One { pending_receiver<interfaces.High> hi; };
interface PassesMultipleInterfaces {
[AllowedContext=level.Level.kHighest]
DoMultiple(
pending_receiver<interfaces.High> hi,
pending_remote<interfaces.Mid> mid,
One one
);
};
""")
mojoms.append(b)
self._ParseAndGenerate(mojoms)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/bindings | repos/libcamera/utils/ipc/mojo/public/tools/bindings/checks/mojom_interface_feature_check.py | # Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Validate mojo runtime feature guarded interfaces are nullable."""
import mojom.generate.check as check
import mojom.generate.module as module
class Check(check.Check):
def __init__(self, *args, **kwargs):
super(Check, self).__init__(*args, **kwargs)
# `param` is an Interface of some sort.
def _CheckNonNullableFeatureGuardedInterface(self, kind):
# Only need to validate interface if it has a RuntimeFeature
if not kind.kind.runtime_feature:
return
# Nullable (optional) is ok as the interface expects they might not be sent.
if kind.is_nullable:
return
interface = kind.kind.mojom_name
raise check.CheckException(
self.module,
f"interface {interface} has a RuntimeFeature but is not nullable")
# `param` can be a lot of things so check if it is a remote/receiver.
# Array/Map must be recursed into.
def _CheckFieldOrParam(self, kind):
if module.IsAnyInterfaceKind(kind):
self._CheckNonNullableFeatureGuardedInterface(kind)
if module.IsArrayKind(kind):
self._CheckFieldOrParam(kind.kind)
if module.IsMapKind(kind):
self._CheckFieldOrParam(kind.key_kind)
self._CheckFieldOrParam(kind.value_kind)
def _CheckInterfaceFeatures(self, interface):
for method in interface.methods:
for param in method.parameters:
self._CheckFieldOrParam(param.kind)
if method.response_parameters:
for param in method.response_parameters:
self._CheckFieldOrParam(param.kind)
def _CheckStructFeatures(self, struct):
for field in struct.fields:
self._CheckFieldOrParam(field.kind)
def _CheckUnionFeatures(self, union):
for field in union.fields:
self._CheckFieldOrParam(field.kind)
def CheckModule(self):
"""Validate that any runtime feature guarded interfaces that might be passed
over mojo are nullable."""
for interface in self.module.interfaces:
self._CheckInterfaceFeatures(interface)
for struct in self.module.structs:
self._CheckStructFeatures(struct)
for union in self.module.unions:
self._CheckUnionFeatures(union)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility_unittest.py | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import os.path
import shutil
import tempfile
import unittest
import check_stable_mojom_compatibility
from mojom.generate import module
class Change:
"""Helper to clearly define a mojom file delta to be analyzed."""
def __init__(self, filename, old=None, new=None):
"""If old is None, this is a file addition. If new is None, this is a file
deletion. Otherwise it's a file change."""
self.filename = filename
self.old = old
self.new = new
class UnchangedFile(Change):
def __init__(self, filename, contents):
super().__init__(filename, old=contents, new=contents)
class CheckStableMojomCompatibilityTest(unittest.TestCase):
"""Tests covering the behavior of the compatibility checking tool. Note that
details of different compatibility checks and relevant failure modes are NOT
covered by these tests. Those are instead covered by unittests in
version_compatibility_unittest.py. Additionally, the tests which ensure a
given set of [Stable] mojom definitions are indeed plausibly stable (i.e. they
have no unstable dependencies) are covered by stable_attribute_unittest.py.
These tests cover higher-level concerns of the compatibility checking tool,
like file or symbol, renames, changes spread over multiple files, etc."""
def verifyBackwardCompatibility(self, changes):
"""Helper for implementing assertBackwardCompatible and
assertNotBackwardCompatible"""
temp_dir = tempfile.mkdtemp()
for change in changes:
if change.old:
# Populate the old file on disk in our temporary fake source root
file_path = os.path.join(temp_dir, change.filename)
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(file_path, 'w') as f:
f.write(change.old)
delta = []
for change in changes:
if change.old != change.new:
delta.append({
'filename': change.filename,
'old': change.old,
'new': change.new
})
try:
check_stable_mojom_compatibility.Run(['--src-root', temp_dir],
delta=delta)
finally:
shutil.rmtree(temp_dir)
def assertBackwardCompatible(self, changes):
self.verifyBackwardCompatibility(changes)
def assertNotBackwardCompatible(self, changes):
try:
self.verifyBackwardCompatibility(changes)
except Exception:
return
raise Exception('Change unexpectedly passed a backward-compatibility check')
def testBasicCompatibility(self):
"""Minimal smoke test to verify acceptance of a simple valid change."""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='[Stable] struct S {};',
new='[Stable] struct S { [MinVersion=1] int32 x; };')
])
def testBasicIncompatibility(self):
"""Minimal smoke test to verify rejection of a simple invalid change."""
self.assertNotBackwardCompatible([
Change('foo/foo.mojom',
old='[Stable] struct S {};',
new='[Stable] struct S { int32 x; };')
])
def testIgnoreIfNotStable(self):
"""We don't care about types not marked [Stable]"""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='struct S {};',
new='struct S { int32 x; };')
])
def testRename(self):
"""We can do checks for renamed types."""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='[Stable] struct S {};',
new='[Stable, RenamedFrom="S"] struct T {};')
])
self.assertNotBackwardCompatible([
Change('foo/foo.mojom',
old='[Stable] struct S {};',
new='[Stable, RenamedFrom="S"] struct T { int32 x; };')
])
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='[Stable] struct S {};',
new="""\
[Stable, RenamedFrom="S"]
struct T { [MinVersion=1] int32 x; };
""")
])
def testNewlyStable(self):
"""We don't care about types newly marked as [Stable]."""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='struct S {};',
new='[Stable] struct S { int32 x; };')
])
def testFileRename(self):
"""Make sure we can still do compatibility checks after a file rename."""
self.assertBackwardCompatible([
Change('foo/foo.mojom', old='[Stable] struct S {};', new=None),
Change('bar/bar.mojom',
old=None,
new='[Stable] struct S { [MinVersion=1] int32 x; };')
])
self.assertNotBackwardCompatible([
Change('foo/foo.mojom', old='[Stable] struct S {};', new=None),
Change('bar/bar.mojom', old=None, new='[Stable] struct S { int32 x; };')
])
def testWithImport(self):
"""Ensure that cross-module dependencies do not break the compatibility
checking tool."""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old="""\
module foo;
[Stable] struct S {};
""",
new="""\
module foo;
[Stable] struct S { [MinVersion=2] int32 x; };
"""),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; [MinVersion=1] int32 y; };
""")
])
def testWithMovedDefinition(self):
"""If a definition moves from one file to another, we should still be able
to check compatibility accurately."""
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old="""\
module foo;
[Stable] struct S {};
""",
new="""\
module foo;
"""),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable, RenamedFrom="foo.S"] struct S {
[MinVersion=2] int32 x;
};
[Stable] struct T { S s; [MinVersion=1] int32 y; };
""")
])
self.assertNotBackwardCompatible([
Change('foo/foo.mojom',
old="""\
module foo;
[Stable] struct S {};
""",
new="""\
module foo;
"""),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable, RenamedFrom="foo.S"] struct S { int32 x; };
[Stable] struct T { S s; [MinVersion=1] int32 y; };
""")
])
def testWithUnmodifiedImport(self):
"""Unchanged files in the filesystem are still parsed by the compatibility
checking tool if they're imported by a changed file."""
self.assertBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; [MinVersion=1] int32 x; };
""")
])
self.assertNotBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; int32 x; };
""")
])
def testWithPartialImport(self):
"""The compatibility checking tool correctly parses imports with partial
paths."""
self.assertBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('foo/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo.mojom";
[Stable] struct T { foo.S s; };
""")
])
self.assertBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('foo/bar.mojom',
old="""\
module bar;
import "foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""")
])
self.assertNotBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo.mojom";
[Stable] struct T { foo.S s; };
""")
])
self.assertNotBackwardCompatible([
UnchangedFile('foo/foo.mojom', 'module foo; [Stable] struct S {};'),
Change('bar/bar.mojom',
old="""\
module bar;
import "foo.mojom";
[Stable] struct T { foo.S s; };
""",
new="""\
module bar;
import "foo/foo.mojom";
[Stable] struct T { foo.S s; };
""")
])
def testNewEnumDefault(self):
# Should be backwards compatible since it does not affect the wire format.
# This specific case also checks that the backwards compatibility checker
# does not throw an error due to the older version of the enum not
# specifying [Default].
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='[Extensible] enum E { One };',
new='[Extensible] enum E { [Default] One };')
])
self.assertBackwardCompatible([
Change('foo/foo.mojom',
old='[Extensible] enum E { [Default] One, Two, };',
new='[Extensible] enum E { One, [Default] Two, };')
])
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/feature_unittest.py | # Copyright 2023 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom_parser_test_case import MojomParserTestCase
class FeatureTest(MojomParserTestCase):
"""Tests feature parsing behavior."""
def testFeatureOff(self):
"""Verifies basic parsing of feature types."""
types = self.ExtractTypes("""
// e.g. BASE_DECLARE_FEATURE(kFeature);
[AttributeOne=ValueOne]
feature kFeature {
// BASE_FEATURE(kFeature,"MyFeature",
// base::FEATURE_DISABLED_BY_DEFAULT);
const string name = "MyFeature";
const bool default_state = false;
};
""")
self.assertEqual('name', types['kFeature'].constants[0].mojom_name)
self.assertEqual('"MyFeature"', types['kFeature'].constants[0].value)
self.assertEqual('default_state', types['kFeature'].constants[1].mojom_name)
self.assertEqual('false', types['kFeature'].constants[1].value)
def testFeatureOn(self):
"""Verifies basic parsing of feature types."""
types = self.ExtractTypes("""
// e.g. BASE_DECLARE_FEATURE(kFeature);
feature kFeature {
// BASE_FEATURE(kFeature,"MyFeature",
// base::FEATURE_ENABLED_BY_DEFAULT);
const string name = "MyFeature";
const bool default_state = true;
};
""")
self.assertEqual('name', types['kFeature'].constants[0].mojom_name)
self.assertEqual('"MyFeature"', types['kFeature'].constants[0].value)
self.assertEqual('default_state', types['kFeature'].constants[1].mojom_name)
self.assertEqual('true', types['kFeature'].constants[1].value)
def testFeatureWeakKeyword(self):
"""Verifies that `feature` is a weak keyword."""
types = self.ExtractTypes("""
// e.g. BASE_DECLARE_FEATURE(kFeature);
[AttributeOne=ValueOne]
feature kFeature {
// BASE_FEATURE(kFeature,"MyFeature",
// base::FEATURE_DISABLED_BY_DEFAULT);
const string name = "MyFeature";
const bool default_state = false;
};
struct MyStruct {
bool feature = true;
};
interface InterfaceName {
Method(string feature) => (int32 feature);
};
""")
self.assertEqual('name', types['kFeature'].constants[0].mojom_name)
self.assertEqual('"MyFeature"', types['kFeature'].constants[0].value)
self.assertEqual('default_state', types['kFeature'].constants[1].mojom_name)
self.assertEqual('false', types['kFeature'].constants[1].value)
def testFeatureAttributesAreFeatures(self):
"""Verifies that feature values in attributes are really feature types."""
a_mojom = 'a.mojom'
self.WriteFile(
a_mojom, 'module a;'
'feature F { const string name = "f";'
'const bool default_state = false; };')
b_mojom = 'b.mojom'
self.WriteFile(
b_mojom, 'module b;'
'import "a.mojom";'
'feature G'
'{const string name = "g"; const bool default_state = false;};'
'[Attri=a.F] interface Foo { Foo(); };'
'[Boink=G] interface Bar {};')
self.ParseMojoms([a_mojom, b_mojom])
b = self.LoadModule(b_mojom)
self.assertEqual(b.interfaces[0].attributes['Attri'].mojom_name, 'F')
self.assertEqual(b.interfaces[1].attributes['Boink'].mojom_name, 'G')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/version_compatibility_unittest.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom.generate import module
from mojom_parser_test_case import MojomParserTestCase
class VersionCompatibilityTest(MojomParserTestCase):
"""Tests covering compatibility between two versions of the same mojom type
definition. This coverage ensures that we can reliably detect unsafe changes
to definitions that are expected to tolerate version skew in production
environments."""
def _GetTypeCompatibilityMap(self, old_mojom, new_mojom):
"""Helper to support the implementation of assertBackwardCompatible and
assertNotBackwardCompatible."""
old = self.ExtractTypes(old_mojom)
new = self.ExtractTypes(new_mojom)
self.assertEqual(set(old.keys()), set(new.keys()),
'Old and new test mojoms should use the same type names.')
checker = module.BackwardCompatibilityChecker()
compatibility_map = {}
for name in old:
try:
compatibility_map[name] = checker.IsBackwardCompatible(
new[name], old[name])
except Exception:
compatibility_map[name] = False
return compatibility_map
def assertBackwardCompatible(self, old_mojom, new_mojom):
compatibility_map = self._GetTypeCompatibilityMap(old_mojom, new_mojom)
for name, compatible in compatibility_map.items():
if not compatible:
raise AssertionError(
'Given the old mojom:\n\n %s\n\nand the new mojom:\n\n %s\n\n'
'The new definition of %s should pass a backward-compatibiity '
'check, but it does not.' % (old_mojom, new_mojom, name))
def assertNotBackwardCompatible(self, old_mojom, new_mojom):
compatibility_map = self._GetTypeCompatibilityMap(old_mojom, new_mojom)
if all(compatibility_map.values()):
raise AssertionError(
'Given the old mojom:\n\n %s\n\nand the new mojom:\n\n %s\n\n'
'The new mojom should fail a backward-compatibility check, but it '
'does not.' % (old_mojom, new_mojom))
def testNewNonExtensibleEnumValue(self):
"""Adding a value to a non-extensible enum breaks backward-compatibility."""
self.assertNotBackwardCompatible('enum E { kFoo, kBar };',
'enum E { kFoo, kBar, kBaz };')
def testNewNonExtensibleEnumValueWithMinVersion(self):
"""Adding a value to a non-extensible enum breaks backward-compatibility,
even with a new [MinVersion] specified for the value."""
self.assertNotBackwardCompatible(
'enum E { kFoo, kBar };', 'enum E { kFoo, kBar, [MinVersion=1] kBaz };')
def testNewValueInExistingVersion(self):
"""Adding a value to an existing version is not allowed, even if the old
enum was marked [Extensible]. Note that it is irrelevant whether or not the
new enum is marked [Extensible]."""
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kFoo, kBar };',
'enum E { kFoo, kBar, kBaz };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kFoo, kBar };',
'[Extensible] enum E { [Default] kFoo, kBar, kBaz };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kFoo, [MinVersion=1] kBar };',
'enum E { kFoo, [MinVersion=1] kBar, [MinVersion=1] kBaz };')
def testEnumValueRemoval(self):
"""Removal of an enum value is never valid even for [Extensible] enums."""
self.assertNotBackwardCompatible('enum E { kFoo, kBar };',
'enum E { kFoo };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kFoo, kBar };',
'[Extensible] enum E { [Default] kFoo };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kA, [MinVersion=1] kB };',
'[Extensible] enum E { [Default] kA, };')
self.assertNotBackwardCompatible(
"""[Extensible] enum E {
[Default] kA,
[MinVersion=1] kB,
[MinVersion=1] kZ };""",
'[Extensible] enum E { [Default] kA, [MinVersion=1] kB };')
def testNewExtensibleEnumValueWithMinVersion(self):
"""Adding a new and properly [MinVersion]'d value to an [Extensible] enum
is a backward-compatible change. Note that it is irrelevant whether or not
the new enum is marked [Extensible]."""
self.assertBackwardCompatible('[Extensible] enum E { [Default] kA, kB };',
'enum E { kA, kB, [MinVersion=1] kC };')
self.assertBackwardCompatible(
'[Extensible] enum E { [Default] kA, kB };',
'[Extensible] enum E { [Default] kA, kB, [MinVersion=1] kC };')
self.assertBackwardCompatible(
'[Extensible] enum E { [Default] kA, [MinVersion=1] kB };',
"""[Extensible] enum E {
[Default] kA,
[MinVersion=1] kB,
[MinVersion=2] kC };""")
def testRenameEnumValue(self):
"""Renaming an enum value does not affect backward-compatibility. Only
numeric value is relevant."""
self.assertBackwardCompatible('enum E { kA, kB };', 'enum E { kX, kY };')
def testAddEnumValueAlias(self):
"""Adding new enum fields does not affect backward-compatibility if it does
not introduce any new numeric values."""
self.assertBackwardCompatible(
'enum E { kA, kB };', 'enum E { kA, kB, kC = kA, kD = 1, kE = kD };')
def testEnumIdentity(self):
"""An unchanged enum is obviously backward-compatible."""
self.assertBackwardCompatible('enum E { kA, kB, kC };',
'enum E { kA, kB, kC };')
def testNewStructFieldUnversioned(self):
"""Adding a new field to a struct without a new (i.e. higher than any
existing version) [MinVersion] tag breaks backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string a; };',
'struct S { string a; string b; };')
def testStructFieldRemoval(self):
"""Removing a field from a struct breaks backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string a; string b; };',
'struct S { string a; };')
def testStructFieldTypeChange(self):
"""Changing the type of an existing field always breaks
backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string a; };',
'struct S { array<int32> a; };')
def testStructFieldBecomingOptional(self):
"""Changing a field from non-optional to optional breaks
backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string a; };',
'struct S { string? a; };')
def testStructFieldBecomingNonOptional(self):
"""Changing a field from optional to non-optional breaks
backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string? a; };',
'struct S { string a; };')
def testStructFieldOrderChange(self):
"""Changing the order of fields breaks backward-compatibility."""
self.assertNotBackwardCompatible('struct S { string a; bool b; };',
'struct S { bool b; string a; };')
self.assertNotBackwardCompatible('struct S { string a@0; bool b@1; };',
'struct S { string a@1; bool b@0; };')
def testStructFieldMinVersionChange(self):
"""Changing the MinVersion of a field breaks backward-compatibility."""
self.assertNotBackwardCompatible(
'struct S { string a; [MinVersion=1] string? b; };',
'struct S { string a; [MinVersion=2] string? b; };')
def testStructFieldTypeChange(self):
"""If a struct field's own type definition changes, the containing struct
is backward-compatible if and only if the field type's change is
backward-compatible."""
self.assertBackwardCompatible(
'struct S {}; struct T { S s; };',
'struct S { [MinVersion=1] int32 x; }; struct T { S s; };')
self.assertBackwardCompatible(
'[Extensible] enum E { [Default] kA }; struct S { E e; };',
"""[Extensible] enum E {
[Default] kA,
[MinVersion=1] kB };
struct S { E e; };""")
self.assertNotBackwardCompatible(
'struct S {}; struct T { S s; };',
'struct S { int32 x; }; struct T { S s; };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kA }; struct S { E e; };',
'[Extensible] enum E { [Default] kA, kB }; struct S { E e; };')
def testNewStructFieldWithInvalidMinVersion(self):
"""Adding a new field using an existing MinVersion breaks backward-
compatibility."""
self.assertNotBackwardCompatible(
"""\
struct S {
string a;
[MinVersion=1] string? b;
};
""", """\
struct S {
string a;
[MinVersion=1] string? b;
[MinVersion=1] string? c;
};""")
def testNewStructFieldWithValidMinVersion(self):
"""Adding a new field is safe if tagged with a MinVersion greater than any
previously used MinVersion in the struct."""
self.assertBackwardCompatible(
'struct S { int32 a; };',
'struct S { int32 a; [MinVersion=1] int32 b; };')
self.assertBackwardCompatible(
'struct S { int32 a; [MinVersion=1] int32 b; };',
'struct S { int32 a; [MinVersion=1] int32 b; [MinVersion=2] bool c; };')
def testNewStructFieldNullableReference(self):
"""Adding a new nullable reference-typed field is fine if versioned
properly."""
self.assertBackwardCompatible(
'struct S { int32 a; };',
'struct S { int32 a; [MinVersion=1] string? b; };')
def testStructFieldRename(self):
"""Renaming a field has no effect on backward-compatibility."""
self.assertBackwardCompatible('struct S { int32 x; bool b; };',
'struct S { int32 a; bool b; };')
def testStructFieldReorderWithExplicitOrdinals(self):
"""Reordering fields has no effect on backward-compatibility when field
ordinals are explicitly labeled and remain unchanged."""
self.assertBackwardCompatible('struct S { bool b@1; int32 a@0; };',
'struct S { int32 a@0; bool b@1; };')
def testNewUnionFieldUnversioned(self):
"""Adding a new field to a union without a new (i.e. higher than any
existing version) [MinVersion] tag breaks backward-compatibility."""
self.assertNotBackwardCompatible('union U { string a; };',
'union U { string a; string b; };')
def testUnionFieldRemoval(self):
"""Removing a field from a union breaks backward-compatibility."""
self.assertNotBackwardCompatible('union U { string a; string b; };',
'union U { string a; };')
def testUnionFieldTypeChange(self):
"""Changing the type of an existing field always breaks
backward-compatibility."""
self.assertNotBackwardCompatible('union U { string a; };',
'union U { array<int32> a; };')
def testUnionFieldBecomingOptional(self):
"""Changing a field from non-optional to optional breaks
backward-compatibility."""
self.assertNotBackwardCompatible('union U { string a; };',
'union U { string? a; };')
def testFieldNestedTypeChanged(self):
"""Changing the definition of a nested type within a field (such as an array
element or interface endpoint type) should only break backward-compatibility
if the changes to that type are not backward-compatible."""
self.assertBackwardCompatible(
"""\
struct S { string a; };
struct T { array<S> ss; };
""", """\
struct S {
string a;
[MinVersion=1] string? b;
};
struct T { array<S> ss; };
""")
self.assertBackwardCompatible(
"""\
interface F { Do(); };
struct S { pending_receiver<F> r; };
""", """\
interface F {
Do();
[MinVersion=1] Say();
};
struct S { pending_receiver<F> r; };
""")
def testRecursiveTypeChange(self):
"""Recursive types do not break the compatibility checker."""
self.assertBackwardCompatible(
"""\
struct S {
string a;
array<S> others;
};""", """\
struct S {
string a;
array<S> others;
[MinVersion=1] string? b;
};""")
def testUnionFieldBecomingNonOptional(self):
"""Changing a field from optional to non-optional breaks
backward-compatibility."""
self.assertNotBackwardCompatible('union U { string? a; };',
'union U { string a; };')
def testUnionFieldOrderChange(self):
"""Changing the order of fields breaks backward-compatibility."""
self.assertNotBackwardCompatible('union U { string a; bool b; };',
'union U { bool b; string a; };')
self.assertNotBackwardCompatible('union U { string a@0; bool b@1; };',
'union U { string a@1; bool b@0; };')
def testUnionFieldMinVersionChange(self):
"""Changing the MinVersion of a field breaks backward-compatibility."""
self.assertNotBackwardCompatible(
'union U { string a; [MinVersion=1] string b; };',
'union U { string a; [MinVersion=2] string b; };')
def testUnionFieldTypeChange(self):
"""If a union field's own type definition changes, the containing union
is backward-compatible if and only if the field type's change is
backward-compatible."""
self.assertBackwardCompatible(
'struct S {}; union U { S s; };',
'struct S { [MinVersion=1] int32 x; }; union U { S s; };')
self.assertBackwardCompatible(
'[Extensible] enum E { [Default] kA }; union U { E e; };',
"""[Extensible] enum E {
[Default] kA,
[MinVersion=1] kB };
union U { E e; };""")
self.assertNotBackwardCompatible(
'struct S {}; union U { S s; };',
'struct S { int32 x; }; union U { S s; };')
self.assertNotBackwardCompatible(
'[Extensible] enum E { [Default] kA }; union U { E e; };',
'[Extensible] enum E { [Default] kA, kB }; union U { E e; };')
def testNewUnionFieldWithInvalidMinVersion(self):
"""Adding a new field using an existing MinVersion breaks backward-
compatibility."""
self.assertNotBackwardCompatible(
"""\
union U {
string a;
[MinVersion=1] string b;
};
""", """\
union U {
string a;
[MinVersion=1] string b;
[MinVersion=1] string c;
};""")
def testNewUnionFieldWithValidMinVersion(self):
"""Adding a new field is safe if tagged with a MinVersion greater than any
previously used MinVersion in the union."""
self.assertBackwardCompatible(
'union U { int32 a; };',
'union U { int32 a; [MinVersion=1] int32 b; };')
self.assertBackwardCompatible(
'union U { int32 a; [MinVersion=1] int32 b; };',
'union U { int32 a; [MinVersion=1] int32 b; [MinVersion=2] bool c; };')
def testUnionFieldRename(self):
"""Renaming a field has no effect on backward-compatibility."""
self.assertBackwardCompatible('union U { int32 x; bool b; };',
'union U { int32 a; bool b; };')
def testUnionFieldReorderWithExplicitOrdinals(self):
"""Reordering fields has no effect on backward-compatibility when field
ordinals are explicitly labeled and remain unchanged."""
self.assertBackwardCompatible('union U { bool b@1; int32 a@0; };',
'union U { int32 a@0; bool b@1; };')
def testNewInterfaceMethodUnversioned(self):
"""Adding a new method to an interface without a new (i.e. higher than any
existing version) [MinVersion] tag breaks backward-compatibility."""
self.assertNotBackwardCompatible('interface F { A(); };',
'interface F { A(); B(); };')
def testInterfaceMethodRemoval(self):
"""Removing a method from an interface breaks backward-compatibility."""
self.assertNotBackwardCompatible('interface F { A(); B(); };',
'interface F { A(); };')
def testInterfaceMethodParamsChanged(self):
"""Changes to the parameter list are only backward-compatible if they meet
backward-compatibility requirements of an equivalent struct definition."""
self.assertNotBackwardCompatible('interface F { A(); };',
'interface F { A(int32 x); };')
self.assertNotBackwardCompatible('interface F { A(int32 x); };',
'interface F { A(bool x); };')
self.assertNotBackwardCompatible(
'interface F { A(int32 x, [MinVersion=1] string? s); };', """\
interface F {
A(int32 x, [MinVersion=1] string? s, [MinVersion=1] int32 y);
};""")
self.assertBackwardCompatible('interface F { A(int32 x); };',
'interface F { A(int32 a); };')
self.assertBackwardCompatible(
'interface F { A(int32 x); };',
'interface F { A(int32 x, [MinVersion=1] string? s); };')
self.assertBackwardCompatible(
'struct S {}; interface F { A(S s); };',
'struct S { [MinVersion=1] int32 x; }; interface F { A(S s); };')
self.assertBackwardCompatible(
'struct S {}; struct T {}; interface F { A(S s); };',
'struct S {}; struct T {}; interface F { A(T s); };')
self.assertNotBackwardCompatible(
'struct S {}; struct T { int32 x; }; interface F { A(S s); };',
'struct S {}; struct T { int32 x; }; interface F { A(T t); };')
def testInterfaceMethodReplyAdded(self):
"""Adding a reply to a message breaks backward-compatibilty."""
self.assertNotBackwardCompatible('interface F { A(); };',
'interface F { A() => (); };')
def testInterfaceMethodReplyRemoved(self):
"""Removing a reply from a message breaks backward-compatibility."""
self.assertNotBackwardCompatible('interface F { A() => (); };',
'interface F { A(); };')
def testInterfaceMethodReplyParamsChanged(self):
"""Similar to request parameters, a change to reply parameters is considered
backward-compatible if it meets the same backward-compatibility
requirements imposed on equivalent struct changes."""
self.assertNotBackwardCompatible('interface F { A() => (); };',
'interface F { A() => (int32 x); };')
self.assertNotBackwardCompatible('interface F { A() => (int32 x); };',
'interface F { A() => (); };')
self.assertNotBackwardCompatible('interface F { A() => (bool x); };',
'interface F { A() => (int32 x); };')
self.assertBackwardCompatible('interface F { A() => (int32 a); };',
'interface F { A() => (int32 x); };')
self.assertBackwardCompatible(
'interface F { A() => (int32 x); };',
'interface F { A() => (int32 x, [MinVersion] string? s); };')
def testNewInterfaceMethodWithInvalidMinVersion(self):
"""Adding a new method to an existing version is not backward-compatible."""
self.assertNotBackwardCompatible(
"""\
interface F {
A();
[MinVersion=1] B();
};
""", """\
interface F {
A();
[MinVersion=1] B();
[MinVersion=1] C();
};
""")
def testNewInterfaceMethodWithValidMinVersion(self):
"""Adding a new method is fine as long as its MinVersion exceeds that of any
method on the old interface definition."""
self.assertBackwardCompatible('interface F { A(); };',
'interface F { A(); [MinVersion=1] B(); };')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/const_unittest.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom_parser_test_case import MojomParserTestCase
from mojom.generate import module as mojom
class ConstTest(MojomParserTestCase):
"""Tests constant parsing behavior."""
def testLiteralInt(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'const int32 k = 42;')
self.ParseMojoms([a_mojom])
a = self.LoadModule(a_mojom)
self.assertEqual(1, len(a.constants))
self.assertEqual('k', a.constants[0].mojom_name)
self.assertEqual('42', a.constants[0].value)
def testLiteralFloat(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'const float k = 42.5;')
self.ParseMojoms([a_mojom])
a = self.LoadModule(a_mojom)
self.assertEqual(1, len(a.constants))
self.assertEqual('k', a.constants[0].mojom_name)
self.assertEqual('42.5', a.constants[0].value)
def testLiteralString(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'const string k = "woot";')
self.ParseMojoms([a_mojom])
a = self.LoadModule(a_mojom)
self.assertEqual(1, len(a.constants))
self.assertEqual('k', a.constants[0].mojom_name)
self.assertEqual('"woot"', a.constants[0].value)
def testEnumConstant(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'module a; enum E { kA = 41, kB };')
b_mojom = 'b.mojom'
self.WriteFile(
b_mojom, """\
import "a.mojom";
const a.E kE1 = a.E.kB;
// We also allow value names to be unqualified, implying scope from the
// constant's type.
const a.E kE2 = kB;
""")
self.ParseMojoms([a_mojom, b_mojom])
a = self.LoadModule(a_mojom)
b = self.LoadModule(b_mojom)
self.assertEqual(1, len(a.enums))
self.assertEqual('E', a.enums[0].mojom_name)
self.assertEqual(2, len(b.constants))
self.assertEqual('kE1', b.constants[0].mojom_name)
self.assertEqual(a.enums[0], b.constants[0].kind)
self.assertEqual(a.enums[0].fields[1], b.constants[0].value.field)
self.assertEqual(42, b.constants[0].value.field.numeric_value)
self.assertEqual('kE2', b.constants[1].mojom_name)
self.assertEqual(a.enums[0].fields[1], b.constants[1].value.field)
self.assertEqual(42, b.constants[1].value.field.numeric_value)
def testConstantReference(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'const int32 kA = 42; const int32 kB = kA;')
self.ParseMojoms([a_mojom])
a = self.LoadModule(a_mojom)
self.assertEqual(2, len(a.constants))
self.assertEqual('kA', a.constants[0].mojom_name)
self.assertEqual('42', a.constants[0].value)
self.assertEqual('kB', a.constants[1].mojom_name)
self.assertEqual('42', a.constants[1].value)
def testImportedConstantReference(self):
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'const int32 kA = 42;')
b_mojom = 'b.mojom'
self.WriteFile(b_mojom, 'import "a.mojom"; const int32 kB = kA;')
self.ParseMojoms([a_mojom, b_mojom])
a = self.LoadModule(a_mojom)
b = self.LoadModule(b_mojom)
self.assertEqual(1, len(a.constants))
self.assertEqual(1, len(b.constants))
self.assertEqual('kA', a.constants[0].mojom_name)
self.assertEqual('42', a.constants[0].value)
self.assertEqual('kB', b.constants[0].mojom_name)
self.assertEqual('42', b.constants[0].value)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom_parser_test_case.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import os.path
import shutil
import tempfile
import unittest
import mojom_parser
from mojom.generate import module
class MojomParserTestCase(unittest.TestCase):
"""Tests covering the behavior defined by the main mojom_parser.py script.
This includes behavior around input and output path manipulation, dependency
resolution, and module serialization and deserialization."""
def __init__(self, method_name):
super().__init__(method_name)
self._temp_dir = None
def setUp(self):
self._temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._temp_dir)
self._temp_dir = None
def GetPath(self, path):
assert not os.path.isabs(path)
return os.path.join(self._temp_dir, path)
def GetModulePath(self, path):
assert not os.path.isabs(path)
return os.path.join(self.GetPath('out'), path) + '-module'
def WriteFile(self, path, contents):
full_path = self.GetPath(path)
dirname = os.path.dirname(full_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(full_path, 'w') as f:
f.write(contents)
def LoadModule(self, mojom_path):
with open(self.GetModulePath(mojom_path), 'rb') as f:
return module.Module.Load(f)
def ParseMojoms(self, mojoms, metadata=None):
"""Parse all input mojoms relative the temp dir."""
out_dir = self.GetPath('out')
args = [
'--input-root', self._temp_dir, '--input-root', out_dir,
'--output-root', out_dir, '--mojoms'
] + list(map(lambda mojom: os.path.join(self._temp_dir, mojom), mojoms))
if metadata:
args.extend(['--check-imports', self.GetPath(metadata)])
mojom_parser.Run(args)
def ExtractTypes(self, mojom):
filename = 'test.mojom'
self.WriteFile(filename, mojom)
self.ParseMojoms([filename])
m = self.LoadModule(filename)
definitions = {}
for kinds in (m.enums, m.structs, m.unions, m.interfaces, m.features):
for kind in kinds:
definitions[kind.mojom_name] = kind
return definitions
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom_parser.py | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses mojom IDL files.
This script parses one or more input mojom files and produces corresponding
module files fully describing the definitions contained within each mojom. The
module data is pickled and can be easily consumed by other tools to, e.g.,
generate usable language bindings.
"""
import argparse
import builtins
import codecs
import errno
import json
import logging
import multiprocessing
import os
import os.path
import sys
import traceback
from collections import defaultdict
from mojom.generate import module
from mojom.generate import translate
from mojom.parse import parser
from mojom.parse import conditional_features
# Disable this for easier debugging.
_ENABLE_MULTIPROCESSING = True
# https://docs.python.org/3/library/multiprocessing.html#:~:text=bpo-33725
if __name__ == '__main__' and sys.platform == 'darwin':
multiprocessing.set_start_method('fork')
_MULTIPROCESSING_USES_FORK = multiprocessing.get_start_method() == 'fork'
def _ResolveRelativeImportPath(path, roots):
"""Attempts to resolve a relative import path against a set of possible roots.
Args:
path: The relative import path to resolve.
roots: A list of absolute paths which will be checked in descending length
order for a match against path.
Returns:
A normalized absolute path combining one of the roots with the input path if
and only if such a file exists.
Raises:
ValueError: The path could not be resolved against any of the given roots.
"""
for root in reversed(sorted(roots, key=len)):
abs_path = os.path.join(root, path)
if os.path.isfile(abs_path):
return os.path.normcase(os.path.normpath(abs_path))
raise ValueError('"%s" does not exist in any of %s' % (path, roots))
def RebaseAbsolutePath(path, roots):
"""Rewrites an absolute file path as relative to an absolute directory path in
roots.
Args:
path: The absolute path of an existing file.
roots: A list of absolute directory paths. The given path argument must fall
within one of these directories.
Returns:
A path equivalent to the input path, but relative to one of the provided
roots. If the input path falls within multiple roots, the longest root is
chosen (and thus the shortest relative path is returned).
Paths returned by this method always use forward slashes as a separator to
mirror mojom import syntax.
Raises:
ValueError if the given path does not fall within any of the listed roots.
"""
assert os.path.isabs(path)
assert os.path.isfile(path)
assert all(map(os.path.isabs, roots))
sorted_roots = list(reversed(sorted(roots, key=len)))
def try_rebase_path(path, root):
head, rebased_path = os.path.split(path)
while head != root:
head, tail = os.path.split(head)
if not tail:
return None
rebased_path = os.path.join(tail, rebased_path)
return rebased_path
for root in sorted_roots:
relative_path = try_rebase_path(path, root)
if relative_path:
# TODO(crbug.com/953884): Use pathlib for this kind of thing once we're
# fully migrated to Python 3.
return relative_path.replace('\\', '/')
raise ValueError('%s does not fall within any of %s' % (path, sorted_roots))
def _GetModuleFilename(mojom_filename):
return mojom_filename + '-module'
def _EnsureInputLoaded(mojom_abspath, module_path, abs_paths, asts,
dependencies, loaded_modules, module_metadata):
"""Recursively ensures that a module and its dependencies are loaded.
Args:
mojom_abspath: An absolute file path pointing to a mojom file to load.
module_path: The relative path used to identify mojom_abspath.
abs_paths: A mapping from module paths to absolute file paths for all
inputs given to this execution of the script.
asts: A map from each input mojom's absolute path to its parsed AST.
dependencies: A mapping of which input mojoms depend on each other, indexed
by absolute file path.
loaded_modules: A mapping of all modules loaded so far, including non-input
modules that were pulled in as transitive dependencies of the inputs.
module_metadata: Metadata to be attached to every module loaded by this
helper.
Returns:
None
On return, loaded_modules will be populated with the loaded input mojom's
Module as well as the Modules of all of its transitive dependencies."""
if mojom_abspath in loaded_modules:
# Already done.
return
for dep_abspath, dep_path in sorted(dependencies[mojom_abspath]):
if dep_abspath not in loaded_modules:
_EnsureInputLoaded(dep_abspath, dep_path, abs_paths, asts, dependencies,
loaded_modules, module_metadata)
imports = {}
for imp in asts[mojom_abspath].import_list:
path = imp.import_filename
imports[path] = loaded_modules[abs_paths[path]]
loaded_modules[mojom_abspath] = translate.OrderedModule(
asts[mojom_abspath], module_path, imports)
loaded_modules[mojom_abspath].metadata = dict(module_metadata)
def _CollectAllowedImportsFromBuildMetadata(build_metadata_filename):
allowed_imports = set()
processed_deps = set()
def collect(metadata_filename):
processed_deps.add(metadata_filename)
# Paths in the metadata file are relative to the metadata file's dir.
metadata_dir = os.path.abspath(os.path.dirname(metadata_filename))
def to_abs(s):
return os.path.normpath(os.path.join(metadata_dir, s))
with open(metadata_filename) as f:
metadata = json.load(f)
allowed_imports.update(
[os.path.normcase(to_abs(s)) for s in metadata['sources']])
for dep_metadata in metadata['deps']:
dep_metadata = to_abs(dep_metadata)
if dep_metadata not in processed_deps:
collect(dep_metadata)
collect(build_metadata_filename)
return allowed_imports
# multiprocessing helper.
def _ParseAstHelper(mojom_abspath, enabled_features):
with codecs.open(mojom_abspath, encoding='utf-8') as f:
ast = parser.Parse(f.read(), mojom_abspath)
conditional_features.RemoveDisabledDefinitions(ast, enabled_features)
return mojom_abspath, ast
# multiprocessing helper.
def _SerializeHelper(mojom_abspath, mojom_path):
module_path = os.path.join(_SerializeHelper.output_root_path,
_GetModuleFilename(mojom_path))
module_dir = os.path.dirname(module_path)
if not os.path.exists(module_dir):
try:
# Python 2 doesn't support exist_ok on makedirs(), so we just ignore
# that failure if it happens. It's possible during build due to races
# among build steps with module outputs in the same directory.
os.makedirs(module_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
with open(module_path, 'wb') as f:
_SerializeHelper.loaded_modules[mojom_abspath].Dump(f)
class _ExceptionWrapper:
def __init__(self):
# Do not capture exception object to ensure pickling works.
self.formatted_trace = traceback.format_exc()
class _FuncWrapper:
"""Marshals exceptions and spreads args."""
def __init__(self, func):
self._func = func
def __call__(self, args):
# multiprocessing does not gracefully handle excptions.
# https://crbug.com/1219044
try:
return self._func(*args)
except: # pylint: disable=bare-except
return _ExceptionWrapper()
def _Shard(target_func, arg_list, processes=None):
arg_list = list(arg_list)
if processes is None:
processes = multiprocessing.cpu_count()
# Seems optimal to have each process perform at least 2 tasks.
processes = min(processes, len(arg_list) // 2)
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
processes = min(processes, 56)
# Don't spin up processes unless there is enough work to merit doing so.
if not _ENABLE_MULTIPROCESSING or processes < 2:
for arg_tuple in arg_list:
yield target_func(*arg_tuple)
return
pool = multiprocessing.Pool(processes=processes)
try:
wrapped_func = _FuncWrapper(target_func)
for result in pool.imap_unordered(wrapped_func, arg_list):
if isinstance(result, _ExceptionWrapper):
sys.stderr.write(result.formatted_trace)
sys.exit(1)
yield result
finally:
pool.close()
pool.join() # Needed on Windows to avoid WindowsError during terminate.
pool.terminate()
def _ParseMojoms(mojom_files,
input_root_paths,
output_root_path,
module_root_paths,
enabled_features,
module_metadata,
allowed_imports=None):
"""Parses a set of mojom files and produces serialized module outputs.
Args:
mojom_files: A list of mojom files to process. Paths must be absolute paths
which fall within one of the input or output root paths.
input_root_paths: A list of absolute filesystem paths which may be used to
resolve relative mojom file paths.
output_root_path: An absolute filesystem path which will service as the root
for all emitted artifacts. Artifacts produced from a given mojom file
are based on the mojom's relative path, rebased onto this path.
Additionally, the script expects this root to contain already-generated
modules for any transitive dependencies not listed in mojom_files.
module_root_paths: A list of absolute filesystem paths which contain
already-generated modules for any non-transitive dependencies.
enabled_features: A list of enabled feature names, controlling which AST
nodes are filtered by [EnableIf] or [EnableIfNot] attributes.
module_metadata: A list of 2-tuples representing metadata key-value pairs to
attach to each compiled module output.
Returns:
None.
Upon completion, a mojom-module file will be saved for each input mojom.
"""
assert input_root_paths
assert output_root_path
loaded_mojom_asts = {}
loaded_modules = {}
input_dependencies = defaultdict(set)
mojom_files_to_parse = dict((os.path.normcase(abs_path),
RebaseAbsolutePath(abs_path, input_root_paths))
for abs_path in mojom_files)
abs_paths = dict(
(path, abs_path) for abs_path, path in mojom_files_to_parse.items())
logging.info('Parsing %d .mojom into ASTs', len(mojom_files_to_parse))
map_args = ((mojom_abspath, enabled_features)
for mojom_abspath in mojom_files_to_parse)
for mojom_abspath, ast in _Shard(_ParseAstHelper, map_args):
loaded_mojom_asts[mojom_abspath] = ast
logging.info('Processing dependencies')
for mojom_abspath, ast in sorted(loaded_mojom_asts.items()):
invalid_imports = []
for imp in ast.import_list:
import_abspath = _ResolveRelativeImportPath(imp.import_filename,
input_root_paths)
if allowed_imports and import_abspath not in allowed_imports:
invalid_imports.append(imp.import_filename)
abs_paths[imp.import_filename] = import_abspath
if import_abspath in mojom_files_to_parse:
# This import is in the input list, so we're going to translate it
# into a module below; however it's also a dependency of another input
# module. We retain record of dependencies to help with input
# processing later.
input_dependencies[mojom_abspath].add(
(import_abspath, imp.import_filename))
elif import_abspath not in loaded_modules:
# We have an import that isn't being parsed right now. It must already
# be parsed and have a module file sitting in a corresponding output
# location.
module_path = _GetModuleFilename(imp.import_filename)
module_abspath = _ResolveRelativeImportPath(
module_path, module_root_paths + [output_root_path])
with open(module_abspath, 'rb') as module_file:
loaded_modules[import_abspath] = module.Module.Load(module_file)
if invalid_imports:
raise ValueError(
'\nThe file %s imports the following files not allowed by build '
'dependencies:\n\n%s\n' % (mojom_abspath, '\n'.join(invalid_imports)))
logging.info('Loaded %d modules from dependencies', len(loaded_modules))
# At this point all transitive imports not listed as inputs have been loaded
# and we have a complete dependency tree of the unprocessed inputs. Now we can
# load all the inputs, resolving dependencies among them recursively as we go.
logging.info('Ensuring inputs are loaded')
num_existing_modules_loaded = len(loaded_modules)
for mojom_abspath, mojom_path in mojom_files_to_parse.items():
_EnsureInputLoaded(mojom_abspath, mojom_path, abs_paths, loaded_mojom_asts,
input_dependencies, loaded_modules, module_metadata)
assert (num_existing_modules_loaded +
len(mojom_files_to_parse) == len(loaded_modules))
# Now we have fully translated modules for every input and every transitive
# dependency. We can dump the modules to disk for other tools to use.
logging.info('Serializing %d modules', len(mojom_files_to_parse))
# Windows does not use fork() for multiprocessing, so we'd need to pass
# loaded_module via IPC rather than via globals. Doing so is slower than not
# using multiprocessing.
_SerializeHelper.loaded_modules = loaded_modules
_SerializeHelper.output_root_path = output_root_path
# Doesn't seem to help past 4. Perhaps IO bound here?
processes = 4 if _MULTIPROCESSING_USES_FORK else 0
map_args = mojom_files_to_parse.items()
for _ in _Shard(_SerializeHelper, map_args, processes=processes):
pass
def Run(command_line):
debug_logging = os.environ.get('MOJOM_PARSER_DEBUG', '0') != '0'
logging.basicConfig(level=logging.DEBUG if debug_logging else logging.WARNING,
format='%(levelname).1s %(relativeCreated)6d %(message)s')
logging.info('Started (%s)', os.path.basename(sys.argv[0]))
arg_parser = argparse.ArgumentParser(
description="""
Parses one or more mojom files and produces corresponding module outputs fully
describing the definitions therein. The output is exhaustive, stable, and
sufficient for another tool to consume and emit e.g. usable language
bindings based on the original mojoms.""",
epilog="""
Note that each transitive import dependency reachable from the input mojoms must
either also be listed as an input or must have its corresponding compiled module
already present in the provided output root.""")
arg_parser.add_argument(
'--input-root',
default=[],
action='append',
metavar='ROOT',
dest='input_root_paths',
help='Adds ROOT to the set of root paths against which relative input '
'paths should be resolved. Provided root paths are always searched '
'in order from longest absolute path to shortest.')
arg_parser.add_argument(
'--output-root',
action='store',
required=True,
dest='output_root_path',
metavar='ROOT',
help='Use ROOT as the root path in which the parser should emit compiled '
'modules for each processed input mojom. The path of emitted module is '
'based on the relative input path, rebased onto this root. Note that '
'ROOT is also searched for existing modules of any transitive imports '
'which were not included in the set of inputs.')
arg_parser.add_argument(
'--module-root',
default=[],
action='append',
metavar='ROOT',
dest='module_root_paths',
help='Adds ROOT to the set of root paths to search for existing modules '
'of non-transitive imports. Provided root paths are always searched in '
'order from longest absolute path to shortest.')
arg_parser.add_argument(
'--mojoms',
nargs='+',
dest='mojom_files',
default=[],
metavar='MOJOM_FILE',
help='Input mojom filename(s). Each filename must be either an absolute '
'path which falls within one of the given input or output roots, or a '
'relative path the parser will attempt to resolve using each of those '
'roots in unspecified order.')
arg_parser.add_argument(
'--mojom-file-list',
action='store',
metavar='LIST_FILENAME',
help='Input file whose contents are a list of mojoms to process. This '
'may be provided in lieu of --mojoms to avoid hitting command line '
'length limtations')
arg_parser.add_argument(
'--enable-feature',
dest='enabled_features',
default=[],
action='append',
metavar='FEATURE',
help='Enables a named feature when parsing the given mojoms. Features '
'are identified by arbitrary string values. Specifying this flag with a '
'given FEATURE name will cause the parser to process any syntax elements '
'tagged with an [EnableIf=FEATURE] or [EnableIfNot] attribute. If this '
'flag is not provided for a given FEATURE, such tagged elements are '
'discarded by the parser and will not be present in the compiled output.')
arg_parser.add_argument(
'--check-imports',
dest='build_metadata_filename',
action='store',
metavar='METADATA_FILENAME',
help='Instructs the parser to check imports against a set of allowed '
'imports. Allowed imports are based on build metadata within '
'METADATA_FILENAME. This is a JSON file with a `sources` key listing '
'paths to the set of input mojom files being processed by this parser '
'run, and a `deps` key listing paths to metadata files for any '
'dependencies of these inputs. This feature can be used to implement '
'build-time dependency checking for mojom imports, where each build '
'metadata file corresponds to a build target in the dependency graph of '
'a typical build system.')
arg_parser.add_argument(
'--add-module-metadata',
dest='module_metadata',
default=[],
action='append',
metavar='KEY=VALUE',
help='Adds a metadata key-value pair to the output module. This can be '
'used by build toolchains to augment parsed mojom modules with product-'
'specific metadata for later extraction and use by custom bindings '
'generators.')
args, _ = arg_parser.parse_known_args(command_line)
if args.mojom_file_list:
with open(args.mojom_file_list) as f:
args.mojom_files.extend(f.read().split())
if not args.mojom_files:
raise ValueError(
'Must list at least one mojom file via --mojoms or --mojom-file-list')
mojom_files = list(map(os.path.abspath, args.mojom_files))
input_roots = list(map(os.path.abspath, args.input_root_paths))
output_root = os.path.abspath(args.output_root_path)
module_roots = list(map(os.path.abspath, args.module_root_paths))
if args.build_metadata_filename:
allowed_imports = _CollectAllowedImportsFromBuildMetadata(
args.build_metadata_filename)
else:
allowed_imports = None
module_metadata = list(
map(lambda kvp: tuple(kvp.split('=')), args.module_metadata))
_ParseMojoms(mojom_files, input_roots, output_root, module_roots,
args.enabled_features, module_metadata, allowed_imports)
logging.info('Finished')
if __name__ == '__main__':
Run(sys.argv[1:])
# Exit without running GC, which can save multiple seconds due to the large
# number of object created. But flush is necessary as os._exit doesn't do
# that.
sys.stdout.flush()
sys.stderr.flush()
os._exit(0)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom_parser_unittest.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from mojom_parser_test_case import MojomParserTestCase
class MojomParserTest(MojomParserTestCase):
"""Tests covering the behavior defined by the main mojom_parser.py script.
This includes behavior around input and output path manipulation, dependency
resolution, and module serialization and deserialization."""
def testBasicParse(self):
"""Basic test to verify that we can parse a mojom file and get a module."""
mojom = 'foo/bar.mojom'
self.WriteFile(
mojom, """\
module test;
enum TestEnum { kFoo };
""")
self.ParseMojoms([mojom])
m = self.LoadModule(mojom)
self.assertEqual('foo/bar.mojom', m.path)
self.assertEqual('test', m.mojom_namespace)
self.assertEqual(1, len(m.enums))
def testBasicParseWithAbsolutePaths(self):
"""Verifies that we can parse a mojom file given an absolute path input."""
mojom = 'foo/bar.mojom'
self.WriteFile(
mojom, """\
module test;
enum TestEnum { kFoo };
""")
self.ParseMojoms([self.GetPath(mojom)])
m = self.LoadModule(mojom)
self.assertEqual('foo/bar.mojom', m.path)
self.assertEqual('test', m.mojom_namespace)
self.assertEqual(1, len(m.enums))
def testImport(self):
"""Verify imports within the same set of mojom inputs."""
a = 'a.mojom'
b = 'b.mojom'
self.WriteFile(
a, """\
module a;
import "b.mojom";
struct Foo { b.Bar bar; };""")
self.WriteFile(b, """\
module b;
struct Bar {};""")
self.ParseMojoms([a, b])
ma = self.LoadModule(a)
mb = self.LoadModule(b)
self.assertEqual('a.mojom', ma.path)
self.assertEqual('b.mojom', mb.path)
self.assertEqual(1, len(ma.imports))
self.assertEqual(mb, ma.imports[0])
def testPreProcessedImport(self):
"""Verify imports processed by a previous parser execution can be loaded
properly when parsing a dependent mojom."""
a = 'a.mojom'
self.WriteFile(a, """\
module a;
struct Bar {};""")
self.ParseMojoms([a])
b = 'b.mojom'
self.WriteFile(
b, """\
module b;
import "a.mojom";
struct Foo { a.Bar bar; };""")
self.ParseMojoms([b])
def testMissingImport(self):
"""Verify that an import fails if the imported mojom does not exist."""
a = 'a.mojom'
self.WriteFile(
a, """\
module a;
import "non-existent.mojom";
struct Bar {};""")
with self.assertRaisesRegexp(ValueError, "does not exist"):
self.ParseMojoms([a])
def testUnparsedImport(self):
"""Verify that an import fails if the imported mojom is not in the set of
mojoms provided to the parser on this execution AND there is no pre-existing
parsed output module already on disk for it."""
a = 'a.mojom'
b = 'b.mojom'
self.WriteFile(a, """\
module a;
struct Bar {};""")
self.WriteFile(
b, """\
module b;
import "a.mojom";
struct Foo { a.Bar bar; };""")
# a.mojom has not been parsed yet, so its import will fail when processing
# b.mojom here.
with self.assertRaisesRegexp(ValueError, "does not exist"):
self.ParseMojoms([b])
def testCheckImportsBasic(self):
"""Verify that the parser can handle --check-imports with a valid set of
inputs, including support for transitive dependency resolution."""
a = 'a.mojom'
a_metadata = 'out/a.build_metadata'
b = 'b.mojom'
b_metadata = 'out/b.build_metadata'
c = 'c.mojom'
c_metadata = 'out/c.build_metadata'
self.WriteFile(a_metadata,
json.dumps({
"sources": [self.GetPath(a)],
"deps": []
}))
self.WriteFile(
b_metadata,
json.dumps({
"sources": [self.GetPath(b)],
"deps": [self.GetPath(a_metadata)]
}))
self.WriteFile(
c_metadata,
json.dumps({
"sources": [self.GetPath(c)],
"deps": [self.GetPath(b_metadata)]
}))
self.WriteFile(a, """\
module a;
struct Bar {};""")
self.WriteFile(
b, """\
module b;
import "a.mojom";
struct Foo { a.Bar bar; };""")
self.WriteFile(
c, """\
module c;
import "a.mojom";
import "b.mojom";
struct Baz { b.Foo foo; };""")
self.ParseMojoms([a], metadata=a_metadata)
self.ParseMojoms([b], metadata=b_metadata)
self.ParseMojoms([c], metadata=c_metadata)
def testCheckImportsMissing(self):
"""Verify that the parser rejects valid input mojoms when imports don't
agree with build metadata given via --check-imports."""
a = 'a.mojom'
a_metadata = 'out/a.build_metadata'
b = 'b.mojom'
b_metadata = 'out/b.build_metadata'
self.WriteFile(a_metadata,
json.dumps({
"sources": [self.GetPath(a)],
"deps": []
}))
self.WriteFile(b_metadata,
json.dumps({
"sources": [self.GetPath(b)],
"deps": []
}))
self.WriteFile(a, """\
module a;
struct Bar {};""")
self.WriteFile(
b, """\
module b;
import "a.mojom";
struct Foo { a.Bar bar; };""")
self.ParseMojoms([a], metadata=a_metadata)
with self.assertRaisesRegexp(ValueError, "not allowed by build"):
self.ParseMojoms([b], metadata=b_metadata)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/README.md | # The Mojom Parser
The Mojom format is an interface definition language (IDL) for describing
interprocess communication (IPC) messages and data types for use with the
low-level cross-platform
[Mojo IPC library](https://chromium.googlesource.com/chromium/src/+/main/mojo/public/c/system/README.md).
This directory consists of a `mojom` Python module, its tests, and supporting
command-line tools. The Python module implements the parser used by the
command-line tools and exposes an API to help external bindings generators emit
useful code from the parser's outputs.
TODO(https://crbug.com/1060464): Fill out this documentation once the library
and tools have stabilized.
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/check_stable_mojom_compatibility.py | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verifies backward-compatibility of mojom type changes.
Given a set of pre- and post-diff mojom file contents, and a root directory
for a project, this tool verifies that any changes to [Stable] mojom types are
backward-compatible with the previous version.
This can be used e.g. by a presubmit check to prevent developers from making
breaking changes to stable mojoms."""
import argparse
import io
import json
import os
import os.path
import sys
from mojom.generate import module
from mojom.generate import translate
from mojom.parse import parser
# pylint: disable=raise-missing-from
class ParseError(Exception):
pass
def _ValidateDelta(root, delta):
"""Parses all modified mojoms (including all transitive mojom dependencies,
even if unmodified) to perform backward-compatibility checks on any types
marked with the [Stable] attribute.
Note that unlike the normal build-time parser in mojom_parser.py, this does
not produce or rely on cached module translations, but instead parses the full
transitive closure of a mojom's input dependencies all at once.
"""
translate.is_running_backwards_compatibility_check_hack = True
# First build a map of all files covered by the delta
affected_files = set()
old_files = {}
new_files = {}
for change in delta:
# TODO(crbug.com/953884): Use pathlib once we're migrated fully to Python 3.
filename = change['filename'].replace('\\', '/')
affected_files.add(filename)
if change['old']:
old_files[filename] = change['old']
if change['new']:
new_files[filename] = change['new']
# Parse and translate all mojoms relevant to the delta, including transitive
# imports that weren't modified.
unmodified_modules = {}
def parseMojom(mojom, file_overrides, override_modules):
if mojom in unmodified_modules or mojom in override_modules:
return
contents = file_overrides.get(mojom)
if contents:
modules = override_modules
else:
modules = unmodified_modules
with io.open(os.path.join(root, mojom), encoding='utf-8') as f:
contents = f.read()
try:
ast = parser.Parse(contents, mojom)
except Exception as e:
raise ParseError('encountered exception {0} while parsing {1}'.format(
e, mojom))
# Files which are generated at compile time can't be checked by this script
# (at the moment) since they may not exist in the output directory.
generated_files_to_skip = {
('third_party/blink/public/mojom/runtime_feature_state/'
'runtime_feature.mojom'),
('third_party/blink/public/mojom/origin_trial_feature/'
'origin_trial_feature.mojom'),
}
ast.import_list.items = [
x for x in ast.import_list.items
if x.import_filename not in generated_files_to_skip
]
for imp in ast.import_list:
if (not file_overrides.get(imp.import_filename)
and not os.path.exists(os.path.join(root, imp.import_filename))):
# Speculatively construct a path prefix to locate the import_filename
mojom_path = os.path.dirname(os.path.normpath(mojom)).split(os.sep)
test_prefix = ''
for path_component in mojom_path:
test_prefix = os.path.join(test_prefix, path_component)
test_import_filename = os.path.join(test_prefix, imp.import_filename)
if os.path.exists(os.path.join(root, test_import_filename)):
imp.import_filename = test_import_filename
break
parseMojom(imp.import_filename, file_overrides, override_modules)
# Now that the transitive set of dependencies has been imported and parsed
# above, translate each mojom AST into a Module so that all types are fully
# defined and can be inspected.
all_modules = {}
all_modules.update(unmodified_modules)
all_modules.update(override_modules)
modules[mojom] = translate.OrderedModule(ast, mojom, all_modules)
old_modules = {}
for mojom in old_files:
parseMojom(mojom, old_files, old_modules)
new_modules = {}
for mojom in new_files:
parseMojom(mojom, new_files, new_modules)
# At this point we have a complete set of translated Modules from both the
# pre- and post-diff mojom contents. Now we can analyze backward-compatibility
# of the deltas.
#
# Note that for backward-compatibility checks we only care about types which
# were marked [Stable] before the diff. Types newly marked as [Stable] are not
# checked.
def collectTypes(modules):
types = {}
for m in modules.values():
for kinds in (m.enums, m.structs, m.unions, m.interfaces):
for kind in kinds:
types[kind.qualified_name] = kind
return types
old_types = collectTypes(old_modules)
new_types = collectTypes(new_modules)
# Collect any renamed types so they can be compared accordingly.
renamed_types = {}
for name, kind in new_types.items():
old_name = kind.attributes and kind.attributes.get('RenamedFrom')
if old_name:
renamed_types[old_name] = name
for qualified_name, kind in old_types.items():
if not kind.stable:
continue
new_name = renamed_types.get(qualified_name, qualified_name)
if new_name not in new_types:
raise Exception(
'Stable type %s appears to be deleted by this change. If it was '
'renamed, please add a [RenamedFrom] attribute to the new type. This '
'can be deleted by a subsequent change.' % qualified_name)
checker = module.BackwardCompatibilityChecker()
try:
if not checker.IsBackwardCompatible(new_types[new_name], kind):
raise Exception(
'Stable type %s appears to have changed in a way which '
'breaks backward-compatibility. Please fix!\n\nIf you '
'believe this assessment to be incorrect, please file a '
'Chromium bug against the "Internals>Mojo>Bindings" '
'component.' % qualified_name)
except Exception as e:
raise Exception(
'Stable type %s appears to have changed in a way which '
'breaks backward-compatibility: \n\n%s.\nPlease fix!\n\nIf you '
'believe this assessment to be incorrect, please file a '
'Chromium bug against the "Internals>Mojo>Bindings" '
'component.' % (qualified_name, e))
def Run(command_line, delta=None):
"""Runs the tool with the given command_line. Normally this will read the
change description from stdin as a JSON-encoded list, but tests may pass a
delta directly for convenience."""
arg_parser = argparse.ArgumentParser(
description='Verifies backward-compatibility of mojom type changes.',
epilog="""
This tool reads a change description from stdin and verifies that all modified
[Stable] mojom types will retain backward-compatibility. The change description
must be a JSON-encoded list of objects, each with a "filename" key (path to a
changed mojom file, relative to ROOT); an "old" key whose value is a string of
the full file contents before the change, or null if the file is being added;
and a "new" key whose value is a string of the full file contents after the
change, or null if the file is being deleted.""")
arg_parser.add_argument(
'--src-root',
required=True,
action='store',
metavar='ROOT',
help='The root of the source tree in which the checked mojoms live.')
args, _ = arg_parser.parse_known_args(command_line)
if not delta:
delta = json.load(sys.stdin)
_ValidateDelta(args.src_root, delta)
if __name__ == '__main__':
Run(sys.argv[1:])
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/enum_unittest.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom_parser_test_case import MojomParserTestCase
class EnumTest(MojomParserTestCase):
"""Tests enum parsing behavior."""
def testExplicitValues(self):
"""Verifies basic parsing of assigned integral values."""
types = self.ExtractTypes('enum E { kFoo=0, kBar=2, kBaz };')
self.assertEqual('kFoo', types['E'].fields[0].mojom_name)
self.assertEqual(0, types['E'].fields[0].numeric_value)
self.assertEqual('kBar', types['E'].fields[1].mojom_name)
self.assertEqual(2, types['E'].fields[1].numeric_value)
self.assertEqual('kBaz', types['E'].fields[2].mojom_name)
self.assertEqual(3, types['E'].fields[2].numeric_value)
def testImplicitValues(self):
"""Verifies basic automatic assignment of integral values at parse time."""
types = self.ExtractTypes('enum E { kFoo, kBar, kBaz };')
self.assertEqual('kFoo', types['E'].fields[0].mojom_name)
self.assertEqual(0, types['E'].fields[0].numeric_value)
self.assertEqual('kBar', types['E'].fields[1].mojom_name)
self.assertEqual(1, types['E'].fields[1].numeric_value)
self.assertEqual('kBaz', types['E'].fields[2].mojom_name)
self.assertEqual(2, types['E'].fields[2].numeric_value)
def testSameEnumReference(self):
"""Verifies that an enum value can be assigned from the value of another
field within the same enum."""
types = self.ExtractTypes('enum E { kA, kB, kFirst=kA };')
self.assertEqual('kA', types['E'].fields[0].mojom_name)
self.assertEqual(0, types['E'].fields[0].numeric_value)
self.assertEqual('kB', types['E'].fields[1].mojom_name)
self.assertEqual(1, types['E'].fields[1].numeric_value)
self.assertEqual('kFirst', types['E'].fields[2].mojom_name)
self.assertEqual(0, types['E'].fields[2].numeric_value)
def testSameModuleOtherEnumReference(self):
"""Verifies that an enum value can be assigned from the value of a field
in another enum within the same module."""
types = self.ExtractTypes('enum E { kA, kB }; enum F { kA = E.kB };')
self.assertEqual(1, types['F'].fields[0].numeric_value)
def testImportedEnumReference(self):
"""Verifies that an enum value can be assigned from the value of a field
in another enum within a different module."""
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'module a; enum E { kFoo=42, kBar };')
b_mojom = 'b.mojom'
self.WriteFile(b_mojom,
'module b; import "a.mojom"; enum F { kFoo = a.E.kBar };')
self.ParseMojoms([a_mojom, b_mojom])
b = self.LoadModule(b_mojom)
self.assertEqual('F', b.enums[0].mojom_name)
self.assertEqual('kFoo', b.enums[0].fields[0].mojom_name)
self.assertEqual(43, b.enums[0].fields[0].numeric_value)
def testConstantReference(self):
"""Verifies that an enum value can be assigned from the value of an
integral constant within the same module."""
types = self.ExtractTypes('const int32 kFoo = 42; enum E { kA = kFoo };')
self.assertEqual(42, types['E'].fields[0].numeric_value)
def testInvalidConstantReference(self):
"""Verifies that enum values cannot be assigned from the value of
non-integral constants."""
with self.assertRaisesRegexp(ValueError, 'not an integer'):
self.ExtractTypes('const float kFoo = 1.0; enum E { kA = kFoo };')
with self.assertRaisesRegexp(ValueError, 'not an integer'):
self.ExtractTypes('const double kFoo = 1.0; enum E { kA = kFoo };')
with self.assertRaisesRegexp(ValueError, 'not an integer'):
self.ExtractTypes('const string kFoo = "lol"; enum E { kA = kFoo };')
def testImportedConstantReference(self):
"""Verifies that an enum value can be assigned from the value of an integral
constant within an imported module."""
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'module a; const int32 kFoo = 37;')
b_mojom = 'b.mojom'
self.WriteFile(b_mojom,
'module b; import "a.mojom"; enum F { kFoo = a.kFoo };')
self.ParseMojoms([a_mojom, b_mojom])
b = self.LoadModule(b_mojom)
self.assertEqual('F', b.enums[0].mojom_name)
self.assertEqual('kFoo', b.enums[0].fields[0].mojom_name)
self.assertEqual(37, b.enums[0].fields[0].numeric_value)
def testEnumAttributesAreEnums(self):
"""Verifies that enum values in attributes are really enum types."""
a_mojom = 'a.mojom'
self.WriteFile(a_mojom, 'module a; enum E { kFoo, kBar };')
b_mojom = 'b.mojom'
self.WriteFile(
b_mojom, 'module b;'
'import "a.mojom";'
'[MooCow=a.E.kFoo]'
'interface Foo { Foo(); };')
self.ParseMojoms([a_mojom, b_mojom])
b = self.LoadModule(b_mojom)
self.assertEqual(b.interfaces[0].attributes['MooCow'].mojom_name, 'kFoo')
def testConstantAttributes(self):
"""Verifies that constants as attributes are translated to the constant."""
a_mojom = 'a.mojom'
self.WriteFile(
a_mojom, 'module a;'
'enum E { kFoo, kBar };'
'const E kB = E.kFoo;'
'[Attr=kB] interface Hello { Foo(); };')
self.ParseMojoms([a_mojom])
a = self.LoadModule(a_mojom)
self.assertEqual(a.interfaces[0].attributes['Attr'].mojom_name, 'kB')
self.assertEquals(a.interfaces[0].attributes['Attr'].value.mojom_name,
'kFoo')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/stable_attribute_unittest.py | # Copyright 2020 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom_parser_test_case import MojomParserTestCase
from mojom.generate import module
class StableAttributeTest(MojomParserTestCase):
"""Tests covering usage of the [Stable] attribute."""
def testStableAttributeTagging(self):
"""Verify that we recognize the [Stable] attribute on relevant definitions
and the resulting parser outputs are tagged accordingly."""
mojom = 'test.mojom'
self.WriteFile(
mojom, """\
[Stable] enum TestEnum { kFoo };
enum UnstableEnum { kBar };
[Stable] struct TestStruct { TestEnum a; };
struct UnstableStruct { UnstableEnum a; };
[Stable] union TestUnion { TestEnum a; TestStruct b; };
union UnstableUnion { UnstableEnum a; UnstableStruct b; };
[Stable] interface TestInterface { Foo@0(TestUnion x) => (); };
interface UnstableInterface { Foo(UnstableUnion x) => (); };
""")
self.ParseMojoms([mojom])
m = self.LoadModule(mojom)
self.assertEqual(2, len(m.enums))
self.assertTrue(m.enums[0].stable)
self.assertFalse(m.enums[1].stable)
self.assertEqual(2, len(m.structs))
self.assertTrue(m.structs[0].stable)
self.assertFalse(m.structs[1].stable)
self.assertEqual(2, len(m.unions))
self.assertTrue(m.unions[0].stable)
self.assertFalse(m.unions[1].stable)
self.assertEqual(2, len(m.interfaces))
self.assertTrue(m.interfaces[0].stable)
self.assertFalse(m.interfaces[1].stable)
def testStableStruct(self):
"""A [Stable] struct is valid if all its fields are also stable."""
self.ExtractTypes('[Stable] struct S {};')
self.ExtractTypes('[Stable] struct S { int32 x; bool b; };')
self.ExtractTypes('[Stable] enum E { A }; [Stable] struct S { E e; };')
self.ExtractTypes('[Stable] struct S {}; [Stable] struct T { S s; };')
self.ExtractTypes(
'[Stable] struct S {}; [Stable] struct T { array<S> ss; };')
self.ExtractTypes(
'[Stable] interface F {}; [Stable] struct T { pending_remote<F> f; };')
with self.assertRaisesRegexp(Exception, 'because it depends on E'):
self.ExtractTypes('enum E { A }; [Stable] struct S { E e; };')
with self.assertRaisesRegexp(Exception, 'because it depends on X'):
self.ExtractTypes('struct X {}; [Stable] struct S { X x; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] struct S { array<T> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] struct S { map<int32, T> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] struct S { map<T, int32> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on F'):
self.ExtractTypes(
'interface F {}; [Stable] struct S { pending_remote<F> f; };')
with self.assertRaisesRegexp(Exception, 'because it depends on F'):
self.ExtractTypes(
'interface F {}; [Stable] struct S { pending_receiver<F> f; };')
def testStableUnion(self):
"""A [Stable] union is valid if all its fields' types are also stable."""
self.ExtractTypes('[Stable] union U {};')
self.ExtractTypes('[Stable] union U { int32 x; bool b; };')
self.ExtractTypes('[Stable] enum E { A }; [Stable] union U { E e; };')
self.ExtractTypes('[Stable] struct S {}; [Stable] union U { S s; };')
self.ExtractTypes(
'[Stable] struct S {}; [Stable] union U { array<S> ss; };')
self.ExtractTypes(
'[Stable] interface F {}; [Stable] union U { pending_remote<F> f; };')
with self.assertRaisesRegexp(Exception, 'because it depends on E'):
self.ExtractTypes('enum E { A }; [Stable] union U { E e; };')
with self.assertRaisesRegexp(Exception, 'because it depends on X'):
self.ExtractTypes('struct X {}; [Stable] union U { X x; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] union U { array<T> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] union U { map<int32, T> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on T'):
self.ExtractTypes('struct T {}; [Stable] union U { map<T, int32> xs; };')
with self.assertRaisesRegexp(Exception, 'because it depends on F'):
self.ExtractTypes(
'interface F {}; [Stable] union U { pending_remote<F> f; };')
with self.assertRaisesRegexp(Exception, 'because it depends on F'):
self.ExtractTypes(
'interface F {}; [Stable] union U { pending_receiver<F> f; };')
def testStableInterface(self):
"""A [Stable] interface is valid if all its methods' parameter types are
stable, including response parameters where applicable."""
self.ExtractTypes('[Stable] interface F {};')
self.ExtractTypes('[Stable] interface F { A@0(int32 x); };')
self.ExtractTypes('[Stable] interface F { A@0(int32 x) => (bool b); };')
self.ExtractTypes("""\
[Stable] enum E { A, B, C };
[Stable] struct S {};
[Stable] interface F { A@0(E e, S s) => (bool b, array<S> s); };
""")
with self.assertRaisesRegexp(Exception, 'because it depends on E'):
self.ExtractTypes(
'enum E { A, B, C }; [Stable] interface F { A@0(E e); };')
with self.assertRaisesRegexp(Exception, 'because it depends on E'):
self.ExtractTypes(
'enum E { A, B, C }; [Stable] interface F { A@0(int32 x) => (E e); };'
)
with self.assertRaisesRegexp(Exception, 'because it depends on S'):
self.ExtractTypes(
'struct S {}; [Stable] interface F { A@0(int32 x) => (S s); };')
with self.assertRaisesRegexp(Exception, 'because it depends on S'):
self.ExtractTypes(
'struct S {}; [Stable] interface F { A@0(S s) => (bool b); };')
with self.assertRaisesRegexp(Exception, 'explicit method ordinals'):
self.ExtractTypes('[Stable] interface F { A() => (); };')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools | repos/libcamera/utils/ipc/mojo/public/tools/mojom/union_unittest.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from mojom_parser_test_case import MojomParserTestCase
class UnionTest(MojomParserTestCase):
"""Tests union parsing behavior."""
def testExtensibleMustHaveDefault(self):
"""Verifies that extensible unions must have a default field."""
mojom = 'foo.mojom'
self.WriteFile(mojom, 'module foo; [Extensible] union U { bool x; };')
with self.assertRaisesRegexp(Exception, 'must specify a \[Default\]'):
self.ParseMojoms([mojom])
def testExtensibleSingleDefault(self):
"""Verifies that extensible unions must not have multiple default fields."""
mojom = 'foo.mojom'
self.WriteFile(
mojom, """\
module foo;
[Extensible] union U {
[Default] bool x;
[Default] bool y;
};
""")
with self.assertRaisesRegexp(Exception, 'Multiple \[Default\] fields'):
self.ParseMojoms([mojom])
def testExtensibleDefaultTypeValid(self):
"""Verifies that an extensible union's default field must be nullable or
integral type."""
mojom = 'foo.mojom'
self.WriteFile(
mojom, """\
module foo;
[Extensible] union U {
[Default] handle<message_pipe> p;
};
""")
with self.assertRaisesRegexp(Exception, 'must be nullable or integral'):
self.ParseMojoms([mojom])
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/fileutil.py | # Copyright 2015 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os.path
import sys
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
if not tail:
return None
if tail == dirname:
return path
def EnsureDirectoryExists(path, always_try_to_create=False):
"""A wrapper for os.makedirs that does not error if the directory already
exists. A different process could be racing to create this directory."""
if not os.path.exists(path) or always_try_to_create:
try:
os.makedirs(path)
except OSError as e:
# There may have been a race to create this directory.
if e.errno != errno.EEXIST:
raise
def AddLocalRepoThirdPartyDirToModulePath():
"""Helper function to find the top-level directory of this script's repository
assuming the script falls somewhere within a 'mojo' directory, and insert the
top-level 'third_party' directory early in the module search path. Used to
ensure that third-party dependencies provided within the repository itself
(e.g. Chromium sources include snapshots of jinja2 and ply) are preferred over
locally installed system library packages."""
toplevel_dir = _GetDirAbove('mojo')
if toplevel_dir:
sys.path.insert(1, os.path.join(toplevel_dir, 'third_party'))
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/fileutil_unittest.py | # Copyright 2015 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import shutil
import tempfile
import unittest
from mojom import fileutil
class FileUtilTest(unittest.TestCase):
def testEnsureDirectoryExists(self):
"""Test that EnsureDirectoryExists functions correctly."""
temp_dir = tempfile.mkdtemp()
try:
self.assertTrue(os.path.exists(temp_dir))
# Directory does not exist, yet.
full = os.path.join(temp_dir, "foo", "bar")
self.assertFalse(os.path.exists(full))
# Create the directory.
fileutil.EnsureDirectoryExists(full)
self.assertTrue(os.path.exists(full))
# Trying to create it again does not cause an error.
fileutil.EnsureDirectoryExists(full)
self.assertTrue(os.path.exists(full))
# Bypass check for directory existence to tickle error handling that
# occurs in response to a race.
fileutil.EnsureDirectoryExists(full, always_try_to_create=True)
self.assertTrue(os.path.exists(full))
finally:
shutil.rmtree(temp_dir)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/error.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Error(Exception):
"""Base class for Mojo IDL bindings parser/generator errors."""
def __init__(self, filename, message, lineno=None, addenda=None, **kwargs):
"""|filename| is the (primary) file which caused the error, |message| is the
error message, |lineno| is the 1-based line number (or |None| if not
applicable/available), and |addenda| is a list of additional lines to append
to the final error message."""
Exception.__init__(self, **kwargs)
self.filename = filename
self.message = message
self.lineno = lineno
self.addenda = addenda
def __str__(self):
if self.lineno:
s = "%s:%d: Error: %s" % (self.filename, self.lineno, self.message)
else:
s = "%s: Error: %s" % (self.filename, self.message)
return "\n".join([s] + self.addenda) if self.addenda else s
def __repr__(self):
return str(self)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert parse tree to AST.
This module converts the parse tree to the AST we use for code generation. The
main entry point is OrderedModule, which gets passed the parser
representation of a mojom file. When called it's assumed that all imports have
already been parsed and converted to ASTs before.
"""
import itertools
import os
import re
from collections import OrderedDict
from mojom.generate import generator
from mojom.generate import module as mojom
from mojom.parse import ast
is_running_backwards_compatibility_check_hack = False
### DO NOT ADD ENTRIES TO THIS LIST. ###
_EXTENSIBLE_ENUMS_MISSING_DEFAULT = (
'x:arc.keymaster.mojom.Algorithm',
'x:arc.keymaster.mojom.Digest',
'x:arc.keymaster.mojom.SignatureResult',
'x:arc.mojom.AccessibilityActionType',
'x:arc.mojom.AccessibilityBooleanProperty',
'x:arc.mojom.AccessibilityEventIntListProperty',
'x:arc.mojom.AccessibilityEventIntProperty',
'x:arc.mojom.AccessibilityEventStringProperty',
'x:arc.mojom.AccessibilityEventType',
'x:arc.mojom.AccessibilityFilterType',
'x:arc.mojom.AccessibilityIntListProperty',
'x:arc.mojom.AccessibilityIntProperty',
'x:arc.mojom.AccessibilityLiveRegionType',
'x:arc.mojom.AccessibilityNotificationStateType',
'x:arc.mojom.AccessibilityRangeType',
'x:arc.mojom.AccessibilitySelectionMode',
'x:arc.mojom.AccessibilityStringListProperty',
'x:arc.mojom.AccessibilityStringProperty',
'x:arc.mojom.AccessibilityWindowBooleanProperty',
'x:arc.mojom.AccessibilityWindowIntListProperty',
'x:arc.mojom.AccessibilityWindowIntProperty',
'x:arc.mojom.AccessibilityWindowStringProperty',
'x:arc.mojom.AccessibilityWindowType',
'x:arc.mojom.AccountCheckStatus',
'x:arc.mojom.AccountUpdateType',
'x:arc.mojom.ActionType',
'x:arc.mojom.Algorithm',
'x:arc.mojom.AndroidIdSource',
'x:arc.mojom.AnrSource',
'x:arc.mojom.AnrType',
'x:arc.mojom.AppDiscoveryRequestState',
'x:arc.mojom.AppKillType',
'x:arc.mojom.AppPermission',
'x:arc.mojom.AppPermissionGroup',
'x:arc.mojom.AppReinstallState',
'x:arc.mojom.AppShortcutItemType',
'x:arc.mojom.ArcAuthCodeStatus',
'x:arc.mojom.ArcClipboardDragDropEvent',
'x:arc.mojom.ArcCorePriAbiMigEvent',
'x:arc.mojom.ArcDnsQuery',
'x:arc.mojom.ArcImageCopyPasteCompatAction',
'x:arc.mojom.ArcNetworkError',
'x:arc.mojom.ArcNetworkEvent',
'x:arc.mojom.ArcNotificationEvent',
'x:arc.mojom.ArcNotificationExpandState',
'x:arc.mojom.ArcNotificationPriority',
'x:arc.mojom.ArcNotificationRemoteInputState',
'x:arc.mojom.ArcNotificationShownContents',
'x:arc.mojom.ArcNotificationStyle',
'x:arc.mojom.ArcNotificationType',
'x:arc.mojom.ArcPipEvent',
'x:arc.mojom.ArcResizeLockState',
'x:arc.mojom.ArcSignInSuccess',
'x:arc.mojom.ArcTimerResult',
'x:arc.mojom.AudioSwitch',
'x:arc.mojom.BluetoothAclState',
'x:arc.mojom.BluetoothAdapterState',
'x:arc.mojom.BluetoothAdvertisingDataType',
'x:arc.mojom.BluetoothBondState',
'x:arc.mojom.BluetoothDeviceType',
'x:arc.mojom.BluetoothDiscoveryState',
'x:arc.mojom.BluetoothGattDBAttributeType',
'x:arc.mojom.BluetoothGattStatus',
'x:arc.mojom.BluetoothPropertyType',
'x:arc.mojom.BluetoothScanMode',
'x:arc.mojom.BluetoothSdpAttributeType',
'x:arc.mojom.BluetoothSocketType',
'x:arc.mojom.BluetoothStatus',
'x:arc.mojom.BootType',
'x:arc.mojom.CaptionTextShadowType',
'x:arc.mojom.ChangeType',
'x:arc.mojom.ChromeAccountType',
'x:arc.mojom.ChromeApp',
'x:arc.mojom.ChromePage',
'x:arc.mojom.ClockId',
'x:arc.mojom.CloudProvisionFlowError',
'x:arc.mojom.CommandResultType',
'x:arc.mojom.CompanionLibApiId',
'x:arc.mojom.ConnectionStateType',
'x:arc.mojom.ContentChangeType',
'x:arc.mojom.CpuRestrictionState',
'x:arc.mojom.CursorCoordinateSpace',
'x:arc.mojom.DataRestoreStatus',
'x:arc.mojom.DecoderStatus',
'x:arc.mojom.DeviceType',
'x:arc.mojom.Digest',
'x:arc.mojom.DisplayWakeLockType',
'x:arc.mojom.EapMethod',
'x:arc.mojom.EapPhase2Method',
'x:arc.mojom.FileSelectorEventType',
'x:arc.mojom.GMSCheckInError',
'x:arc.mojom.GMSSignInError',
'x:arc.mojom.GeneralSignInError',
'x:arc.mojom.GetNetworksRequestType',
'x:arc.mojom.HalPixelFormat',
'x:arc.mojom.IPAddressType',
'x:arc.mojom.InstallErrorReason',
'x:arc.mojom.KeyFormat',
'x:arc.mojom.KeyManagement',
'x:arc.mojom.KeyPurpose',
'x:arc.mojom.KeymasterError',
'x:arc.mojom.MainAccountHashMigrationStatus',
'x:arc.mojom.MainAccountResolutionStatus',
'x:arc.mojom.ManagementChangeStatus',
'x:arc.mojom.ManagementState',
'x:arc.mojom.MessageCenterVisibility',
'x:arc.mojom.MetricsType',
'x:arc.mojom.MountEvent',
'x:arc.mojom.NativeBridgeType',
'x:arc.mojom.NetworkResult',
'x:arc.mojom.NetworkType',
'x:arc.mojom.OemCryptoAlgorithm',
'x:arc.mojom.OemCryptoCipherMode',
'x:arc.mojom.OemCryptoHdcpCapability',
'x:arc.mojom.OemCryptoLicenseType',
'x:arc.mojom.OemCryptoPrivateKey',
'x:arc.mojom.OemCryptoProvisioningMethod',
'x:arc.mojom.OemCryptoResult',
'x:arc.mojom.OemCryptoRsaPaddingScheme',
'x:arc.mojom.OemCryptoUsageEntryStatus',
'x:arc.mojom.Padding',
'x:arc.mojom.PaiFlowState',
'x:arc.mojom.PatternType',
'x:arc.mojom.PressureLevel',
'x:arc.mojom.PrintColorMode',
'x:arc.mojom.PrintContentType',
'x:arc.mojom.PrintDuplexMode',
'x:arc.mojom.PrinterStatus',
'x:arc.mojom.ProcessState',
'x:arc.mojom.PurchaseState',
'x:arc.mojom.ReauthReason',
'x:arc.mojom.ScaleFactor',
'x:arc.mojom.SecurityType',
'x:arc.mojom.SegmentStyle',
'x:arc.mojom.SelectFilesActionType',
'x:arc.mojom.SetNativeChromeVoxResponse',
'x:arc.mojom.ShowPackageInfoPage',
'x:arc.mojom.SpanType',
'x:arc.mojom.SupportedLinkChangeSource',
'x:arc.mojom.TetheringClientState',
'x:arc.mojom.TextInputType',
'x:arc.mojom.TtsEventType',
'x:arc.mojom.VideoCodecProfile',
'x:arc.mojom.VideoDecodeAccelerator.Result',
'x:arc.mojom.VideoEncodeAccelerator.Error',
'x:arc.mojom.VideoFrameStorageType',
'x:arc.mojom.VideoPixelFormat',
'x:arc.mojom.WakefulnessMode',
'x:arc.mojom.WebApkInstallResult',
'x:ash.ime.mojom.InputFieldType',
'x:ash.ime.mojom.PersonalizationMode',
'x:ash.language.mojom.FeatureId',
'x:blink.mojom.ScrollRestorationType',
'x:chromeos.cdm.mojom.CdmKeyStatus',
'x:chromeos.cdm.mojom.CdmMessageType',
'x:chromeos.cdm.mojom.CdmSessionType',
'x:chromeos.cdm.mojom.DecryptStatus',
'x:chromeos.cdm.mojom.EmeInitDataType',
'x:chromeos.cdm.mojom.EncryptionScheme',
'x:chromeos.cdm.mojom.HdcpVersion',
'x:chromeos.cdm.mojom.OutputProtection.LinkType',
'x:chromeos.cdm.mojom.OutputProtection.ProtectionType',
'x:chromeos.cdm.mojom.PromiseException',
'x:chromeos.cfm.mojom.EnqueuePriority',
'x:chromeos.cfm.mojom.LoggerErrorCode',
'x:chromeos.cfm.mojom.LoggerState',
'x:chromeos.cros_healthd.mojom.CryptoAlgorithm',
'x:chromeos.cros_healthd.mojom.EncryptionState',
'x:chromeos.machine_learning.mojom.AnnotationUsecase',
'x:chromeos.machine_learning.mojom.BuiltinModelId',
'x:chromeos.machine_learning.mojom.CreateGraphExecutorResult',
'x:chromeos.machine_learning.mojom.DocumentScannerResultStatus',
'x:chromeos.machine_learning.mojom.EndpointReason',
'x:chromeos.machine_learning.mojom.EndpointerType',
'x:chromeos.machine_learning.mojom.ExecuteResult',
'x:chromeos.machine_learning.mojom.GrammarCheckerResult.Status',
'x:chromeos.machine_learning.mojom.HandwritingRecognizerResult.Status',
'x:chromeos.machine_learning.mojom.LoadHandwritingModelResult',
'x:chromeos.machine_learning.mojom.LoadModelResult',
'x:chromeos.machine_learning.mojom.Rotation',
'x:chromeos.network_config.mojom.ConnectionStateType',
'x:chromeos.network_config.mojom.DeviceStateType',
'x:chromeos.network_config.mojom.IPConfigType',
'x:chromeos.network_config.mojom.NetworkType',
'x:chromeos.network_config.mojom.OncSource',
'x:chromeos.network_config.mojom.PolicySource',
'x:chromeos.network_config.mojom.PortalState',
'x:chromeos.wilco_dtc_supportd.mojom.WilcoDtcSupportdEvent',
'x:chromeos.wilco_dtc_supportd.mojom.WilcoDtcSupportdWebRequestHttpMethod',
'x:chromeos.wilco_dtc_supportd.mojom.WilcoDtcSupportdWebRequestStatus',
'x:cros.mojom.CameraClientType',
'x:cros.mojom.CameraMetadataSectionStart',
'x:cros.mojom.CameraMetadataTag',
'x:cros.mojom.HalPixelFormat',
'x:crosapi.mojom.AllowedPaths',
'x:crosapi.mojom.BrowserAppInstanceType',
'x:crosapi.mojom.CreationResult',
'x:crosapi.mojom.DeviceAccessResultCode',
'x:crosapi.mojom.DeviceMode',
'x:crosapi.mojom.DlpRestrictionLevel',
'x:crosapi.mojom.ExoImeSupport',
'x:crosapi.mojom.FullscreenVisibility',
'x:crosapi.mojom.GoogleServiceAuthError.State',
'x:crosapi.mojom.IsInstallableResult',
'x:crosapi.mojom.KeyTag',
'x:crosapi.mojom.KeystoreSigningAlgorithmName',
'x:crosapi.mojom.KeystoreType',
'x:crosapi.mojom.LacrosFeedbackSource',
'x:crosapi.mojom.MemoryPressureLevel',
'x:crosapi.mojom.MetricsReportingManaged',
'x:crosapi.mojom.NotificationType',
'x:crosapi.mojom.OndeviceHandwritingSupport',
'x:crosapi.mojom.OpenResult',
'x:crosapi.mojom.PolicyDomain',
'x:crosapi.mojom.RegistrationCodeType',
'x:crosapi.mojom.ScaleFactor',
'x:crosapi.mojom.SearchResult.OptionalBool',
'x:crosapi.mojom.SelectFileDialogType',
'x:crosapi.mojom.SelectFileResult',
'x:crosapi.mojom.SharesheetResult',
'x:crosapi.mojom.TouchEventType',
'x:crosapi.mojom.VideoRotation',
'x:crosapi.mojom.WallpaperLayout',
'x:crosapi.mojom.WebAppInstallResultCode',
'x:crosapi.mojom.WebAppUninstallResultCode',
'x:device.mojom.HidBusType',
'x:device.mojom.WakeLockReason',
'x:device.mojom.WakeLockType',
'x:drivefs.mojom.DialogReason.Type',
'x:drivefs.mojom.DriveError.Type',
'x:drivefs.mojom.DriveFsDelegate.ExtensionConnectionStatus',
'x:drivefs.mojom.FileMetadata.CanPinStatus',
'x:drivefs.mojom.FileMetadata.Type',
'x:drivefs.mojom.ItemEventReason',
'x:drivefs.mojom.MirrorPathStatus',
'x:drivefs.mojom.MirrorSyncStatus',
'x:drivefs.mojom.QueryParameters.SortField',
'x:fuzz.mojom.FuzzEnum',
'x:media.mojom.FillLightMode',
'x:media.mojom.MeteringMode',
'x:media.mojom.PowerLineFrequency',
'x:media.mojom.RedEyeReduction',
'x:media.mojom.ResolutionChangePolicy',
'x:media.mojom.VideoCaptureApi',
'x:media.mojom.VideoCaptureBufferType',
'x:media.mojom.VideoCaptureError',
'x:media.mojom.VideoCaptureFrameDropReason',
'x:media.mojom.VideoCapturePixelFormat',
'x:media.mojom.VideoCaptureTransportType',
'x:media.mojom.VideoFacingMode',
'x:media_session.mojom.AudioFocusType',
'x:media_session.mojom.CameraState',
'x:media_session.mojom.EnforcementMode',
'x:media_session.mojom.MediaAudioVideoState',
'x:media_session.mojom.MediaImageBitmapColorType',
'x:media_session.mojom.MediaPictureInPictureState',
'x:media_session.mojom.MediaPlaybackState',
'x:media_session.mojom.MediaSession.SuspendType',
'x:media_session.mojom.MediaSessionAction',
'x:media_session.mojom.MediaSessionImageType',
'x:media_session.mojom.MediaSessionInfo.SessionState',
'x:media_session.mojom.MicrophoneState',
'x:ml.model_loader.mojom.ComputeResult',
'x:ml.model_loader.mojom.CreateModelLoaderResult',
'x:ml.model_loader.mojom.LoadModelResult',
'x:mojo.test.AnExtensibleEnum',
'x:mojo.test.EnumB',
'x:mojo.test.ExtensibleEmptyEnum',
'x:mojo.test.enum_default_unittest.mojom.ExtensibleEnumWithoutDefault',
'x:network.mojom.WebSandboxFlags',
'x:payments.mojom.BillingResponseCode',
'x:payments.mojom.CreateDigitalGoodsResponseCode',
'x:payments.mojom.ItemType',
'x:printing.mojom.PrinterType',
'x:ui.mojom.KeyboardCode',
)
### DO NOT ADD ENTRIES TO THIS LIST. ###
def _DuplicateName(values):
"""Returns the 'mojom_name' of the first entry in |values| whose 'mojom_name'
has already been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value.mojom_name in names:
return value.mojom_name
names.add(value.mojom_name)
return None
def _ElemsOfType(elems, elem_type, scope):
"""Find all elements of the given type.
Args:
elems: {Sequence[Any]} Sequence of elems.
elem_type: {Type[C]} Extract all elems of this type.
scope: {str} The name of the surrounding scope (e.g. struct
definition). Used in error messages.
Returns:
{List[C]} All elems of matching type.
"""
assert isinstance(elem_type, type)
result = [elem for elem in elems if isinstance(elem, elem_type)]
duplicate_name = _DuplicateName(result)
if duplicate_name:
raise Exception('Names in mojom must be unique within a scope. The name '
'"%s" is used more than once within the scope "%s".' %
(duplicate_name, scope))
return result
def _ProcessElements(scope, elements, operations_by_type):
"""Iterates over the given elements, running a function from
operations_by_type for any element that matches a key in that dict. The scope
is the name of the surrounding scope, such as a filename or struct name, used
only in error messages."""
names_in_this_scope = set()
for element in elements:
# pylint: disable=unidiomatic-typecheck
element_type = type(element)
if element_type in operations_by_type:
if element.mojom_name in names_in_this_scope:
raise Exception('Names must be unique within a scope. The name "%s" is '
'used more than once within the scope "%s".' %
(duplicate_name, scope))
operations_by_type[element_type](element)
def _MapKind(kind):
map_to_kind = {
'bool': 'b',
'int8': 'i8',
'int16': 'i16',
'int32': 'i32',
'int64': 'i64',
'uint8': 'u8',
'uint16': 'u16',
'uint32': 'u32',
'uint64': 'u64',
'float': 'f',
'double': 'd',
'string': 's',
'handle': 'h',
'handle<data_pipe_consumer>': 'h:d:c',
'handle<data_pipe_producer>': 'h:d:p',
'handle<message_pipe>': 'h:m',
'handle<shared_buffer>': 'h:s',
'handle<platform>': 'h:p'
}
if kind.endswith('?'):
base_kind = _MapKind(kind[0:-1])
return '?' + base_kind
if kind.endswith('}'):
lbracket = kind.rfind('{')
value = kind[0:lbracket]
return 'm[' + _MapKind(kind[lbracket + 1:-1]) + '][' + _MapKind(value) + ']'
if kind.endswith(']'):
lbracket = kind.rfind('[')
typename = kind[0:lbracket]
return 'a' + kind[lbracket + 1:-1] + ':' + _MapKind(typename)
if kind.startswith('asso<'):
assert kind.endswith('>')
return 'asso:' + _MapKind(kind[5:-1])
if kind.startswith('rmt<'):
assert kind.endswith('>')
return 'rmt:' + _MapKind(kind[4:-1])
if kind.startswith('rcv<'):
assert kind.endswith('>')
return 'rcv:' + _MapKind(kind[4:-1])
if kind.startswith('rma<'):
assert kind.endswith('>')
return 'rma:' + _MapKind(kind[4:-1])
if kind.startswith('rca<'):
assert kind.endswith('>')
return 'rca:' + _MapKind(kind[4:-1])
if kind in map_to_kind:
return map_to_kind[kind]
return 'x:' + kind
def _MapAttributeValue(module, kind, value):
# True/False/None
if value is None:
return value
if not isinstance(value, str):
return value
# Is the attribute value the name of a feature?
try:
# Features cannot be nested in other types, so lookup in the global scope.
trial = _LookupKind(module.kinds, 'x:' + value,
_GetScopeForKind(module, kind))
if isinstance(trial, mojom.Feature):
return trial
except ValueError:
pass
# Is the attribute value a constant or enum value?
try:
trial = _LookupValue(module, None, None, ('IDENTIFIER', value))
if isinstance(trial, mojom.ConstantValue):
return trial.constant
if isinstance(trial, mojom.EnumValue):
return trial
except ValueError:
pass
# If not a referenceable mojo type - return as a string.
return value
def _AttributeListToDict(module, kind, attribute_list):
if attribute_list is None:
return None
assert isinstance(attribute_list, ast.AttributeList)
attributes = dict()
for attribute in attribute_list:
if attribute.key in attributes:
raise Exception("Duplicate key (%s) in attribute list" % attribute.key)
attributes[attribute.key] = _MapAttributeValue(module, kind,
attribute.value)
return attributes
builtin_values = frozenset([
"double.INFINITY", "double.NEGATIVE_INFINITY", "double.NAN",
"float.INFINITY", "float.NEGATIVE_INFINITY", "float.NAN"
])
def _IsBuiltinValue(value):
return value in builtin_values
def _LookupKind(kinds, spec, scope):
"""Tries to find which Kind a spec refers to, given the scope in which its
referenced. Starts checking from the narrowest scope to most general. For
example, given a struct field like
Foo.Bar x;
Foo.Bar could refer to the type 'Bar' in the 'Foo' namespace, or an inner
type 'Bar' in the struct 'Foo' in the current namespace.
|scope| is a tuple that looks like (namespace, struct/interface), referring
to the location where the type is referenced."""
if spec.startswith('x:'):
mojom_name = spec[2:]
for i in range(len(scope), -1, -1):
test_spec = 'x:'
if i > 0:
test_spec += '.'.join(scope[:i]) + '.'
test_spec += mojom_name
kind = kinds.get(test_spec)
if kind:
return kind
return kinds.get(spec)
def _GetScopeForKind(module, kind):
"""For a given kind, returns a tuple of progressively more specific names
used to qualify the kind. For example if kind is an enum named Bar nested in a
struct Foo within module 'foo', this would return ('foo', 'Foo', 'Bar')"""
if isinstance(kind, mojom.Enum) and kind.parent_kind:
# Enums may be nested in other kinds.
return _GetScopeForKind(module, kind.parent_kind) + (kind.mojom_name, )
module_fragment = (module.mojom_namespace, ) if module.mojom_namespace else ()
kind_fragment = (kind.mojom_name, ) if kind else ()
return module_fragment + kind_fragment
def _LookupValueInScope(module, kind, identifier):
"""Given a kind and an identifier, this attempts to resolve the given
identifier to a concrete NamedValue within the scope of the given kind."""
scope = _GetScopeForKind(module, kind)
for i in reversed(range(len(scope) + 1)):
qualified_name = '.'.join(scope[:i] + (identifier, ))
value = module.values.get(qualified_name)
if value:
return value
return None
def _LookupValue(module, parent_kind, implied_kind, ast_leaf_node):
"""Resolves a leaf node in the form ('IDENTIFIER', 'x') to a constant value
identified by 'x' in some mojom definition. parent_kind is used as context
when resolving the identifier. If the given leaf node is not an IDENTIFIER
(e.g. already a constant value), it is returned as-is.
If implied_kind is provided, the parsed identifier may also be resolved within
its scope as fallback. This can be useful for more concise value references
when assigning enum-typed constants or field values."""
if not isinstance(ast_leaf_node, tuple) or ast_leaf_node[0] != 'IDENTIFIER':
return ast_leaf_node
# First look for a known user-defined identifier to resolve this within the
# enclosing scope.
identifier = ast_leaf_node[1]
value = _LookupValueInScope(module, parent_kind, identifier)
if value:
return value
# Next look in the scope of implied_kind, if provided.
value = (implied_kind and implied_kind.module and _LookupValueInScope(
implied_kind.module, implied_kind, identifier))
if value:
return value
# Fall back on defined builtin symbols
if _IsBuiltinValue(identifier):
return mojom.BuiltinValue(identifier)
raise ValueError('Unknown identifier %s' % identifier)
def _Kind(kinds, spec, scope):
"""Convert a type name into a mojom.Kind object.
As a side-effect this function adds the result to 'kinds'.
Args:
kinds: {Dict[str, mojom.Kind]} All known kinds up to this point, indexed by
their names.
spec: {str} A name uniquely identifying a type.
scope: {Tuple[str, str]} A tuple that looks like (namespace,
struct/interface), referring to the location where the type is
referenced.
Returns:
{mojom.Kind} The type corresponding to 'spec'.
"""
kind = _LookupKind(kinds, spec, scope)
if kind:
return kind
if spec.startswith('?'):
kind = _Kind(kinds, spec[1:], scope)
kind = kind.MakeNullableKind()
elif spec.startswith('a:'):
kind = mojom.Array(_Kind(kinds, spec[2:], scope))
elif spec.startswith('asso:'):
inner_kind = _Kind(kinds, spec[5:], scope)
if isinstance(inner_kind, mojom.InterfaceRequest):
kind = mojom.AssociatedInterfaceRequest(inner_kind)
else:
kind = mojom.AssociatedInterface(inner_kind)
elif spec.startswith('a'):
colon = spec.find(':')
length = int(spec[1:colon])
kind = mojom.Array(_Kind(kinds, spec[colon + 1:], scope), length)
elif spec.startswith('r:'):
kind = mojom.InterfaceRequest(_Kind(kinds, spec[2:], scope))
elif spec.startswith('rmt:'):
kind = mojom.PendingRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rcv:'):
kind = mojom.PendingReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rma:'):
kind = mojom.PendingAssociatedRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rca:'):
kind = mojom.PendingAssociatedReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('m['):
# Isolate the two types from their brackets.
# It is not allowed to use map as key, so there shouldn't be nested ']'s
# inside the key type spec.
key_end = spec.find(']')
assert key_end != -1 and key_end < len(spec) - 1
assert spec[key_end + 1] == '[' and spec[-1] == ']'
first_kind = spec[2:key_end]
second_kind = spec[key_end + 2:-1]
kind = mojom.Map(
_Kind(kinds, first_kind, scope), _Kind(kinds, second_kind, scope))
else:
kind = mojom.Kind(spec)
kinds[spec] = kind
return kind
def _Import(module, import_module):
# Copy the struct kinds from our imports into the current module.
importable_kinds = (mojom.Struct, mojom.Union, mojom.Enum, mojom.Interface,
mojom.Feature)
for kind in import_module.kinds.values():
if (isinstance(kind, importable_kinds)
and kind.module.path == import_module.path):
module.kinds[kind.spec] = kind
# Ditto for values.
for value in import_module.values.values():
if value.module.path == import_module.path:
module.values[value.GetSpec()] = value
return import_module
def _Feature(module, parsed_feature):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_feature: {ast.Feature} Parsed feature.
Returns:
{mojom.Feature} AST feature.
"""
feature = mojom.Feature(module=module)
feature.mojom_name = parsed_feature.mojom_name
feature.spec = 'x:' + module.GetNamespacePrefix() + feature.mojom_name
module.kinds[feature.spec] = feature
feature.constants = []
_ProcessElements(
parsed_feature.mojom_name, parsed_feature.body, {
ast.Const:
lambda const: feature.constants.append(
_Constant(module, const, feature)),
})
feature.attributes = _AttributeListToDict(module, feature,
parsed_feature.attribute_list)
return feature
def _Struct(module, parsed_struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_struct: {ast.Struct} Parsed struct.
Returns:
{mojom.Struct} AST struct.
"""
struct = mojom.Struct(module=module)
struct.mojom_name = parsed_struct.mojom_name
struct.native_only = parsed_struct.body is None
struct.spec = 'x:' + module.GetNamespacePrefix() + struct.mojom_name
module.kinds[struct.spec] = struct
struct.enums = []
struct.constants = []
struct.fields_data = []
if not struct.native_only:
_ProcessElements(
parsed_struct.mojom_name, parsed_struct.body, {
ast.Enum:
lambda enum: struct.enums.append(_Enum(module, enum, struct)),
ast.Const:
lambda const: struct.constants.append(
_Constant(module, const, struct)),
ast.StructField:
struct.fields_data.append,
})
struct.attributes = _AttributeListToDict(module, struct,
parsed_struct.attribute_list)
# Enforce that a [Native] attribute is set to make native-only struct
# declarations more explicit.
if struct.native_only:
if not struct.attributes or not struct.attributes.get('Native', False):
raise Exception("Native-only struct declarations must include a " +
"Native attribute.")
if struct.attributes and struct.attributes.get('CustomSerializer', False):
struct.custom_serializer = True
return struct
def _Union(module, parsed_union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_union: {ast.Union} Parsed union.
Returns:
{mojom.Union} AST union.
"""
union = mojom.Union(module=module)
union.mojom_name = parsed_union.mojom_name
union.spec = 'x:' + module.GetNamespacePrefix() + union.mojom_name
module.kinds[union.spec] = union
# Stash fields parsed_union here temporarily.
union.fields_data = []
_ProcessElements(parsed_union.mojom_name, parsed_union.body,
{ast.UnionField: union.fields_data.append})
union.attributes = _AttributeListToDict(module, union,
parsed_union.attribute_list)
return union
def _StructField(module, parsed_field, struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.StructField} Parsed struct field.
struct: {mojom.Struct} Struct this field belongs to.
Returns:
{mojom.StructField} AST struct field.
"""
field = mojom.StructField()
field.mojom_name = parsed_field.mojom_name
field.kind = _Kind(module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, struct.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = _LookupValue(module, struct, field.kind,
parsed_field.default_value)
field.attributes = _AttributeListToDict(module, field,
parsed_field.attribute_list)
return field
def _UnionField(module, parsed_field, union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.UnionField} Parsed union field.
union: {mojom.Union} Union this fields belong to.
Returns:
{mojom.UnionField} AST union.
"""
field = mojom.UnionField()
field.mojom_name = parsed_field.mojom_name
# Disallow unions from being self-recursive.
parsed_typename = parsed_field.typename
if parsed_typename.endswith('?'):
parsed_typename = parsed_typename[:-1]
assert parsed_typename != union.mojom_name
field.kind = _Kind(module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, union.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = None
field.attributes = _AttributeListToDict(module, field,
parsed_field.attribute_list)
if field.is_default and not mojom.IsNullableKind(field.kind) and \
not mojom.IsIntegralKind(field.kind):
raise Exception(
'[Default] field for union %s must be nullable or integral type.' %
union.mojom_name)
return field
def _Parameter(module, parsed_param, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_param: {ast.Parameter} Parsed parameter.
union: {mojom.Interface} Interface this parameter belongs to.
Returns:
{mojom.Parameter} AST parameter.
"""
parameter = mojom.Parameter()
parameter.mojom_name = parsed_param.mojom_name
parameter.kind = _Kind(module.kinds, _MapKind(parsed_param.typename),
(module.mojom_namespace, interface.mojom_name))
parameter.ordinal = (parsed_param.ordinal.value
if parsed_param.ordinal else None)
parameter.default = None # TODO(tibell): We never have these. Remove field?
parameter.attributes = _AttributeListToDict(module, parameter,
parsed_param.attribute_list)
return parameter
def _Method(module, parsed_method, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_method: {ast.Method} Parsed method.
interface: {mojom.Interface} Interface this method belongs to.
Returns:
{mojom.Method} AST method.
"""
method = mojom.Method(
interface,
parsed_method.mojom_name,
ordinal=parsed_method.ordinal.value if parsed_method.ordinal else None)
method.parameters = list(
map(lambda parameter: _Parameter(module, parameter, interface),
parsed_method.parameter_list))
if parsed_method.response_parameter_list is not None:
method.response_parameters = list(
map(lambda parameter: _Parameter(module, parameter, interface),
parsed_method.response_parameter_list))
method.attributes = _AttributeListToDict(module, method,
parsed_method.attribute_list)
# Enforce that only methods with response can have a [Sync] attribute.
if method.sync and method.response_parameters is None:
raise Exception("Only methods with response can include a [Sync] "
"attribute. If no response parameters are needed, you "
"could use an empty response parameter list, i.e., "
"\"=> ()\".")
# And only methods with the [Sync] attribute can specify [NoInterrupt].
if not method.allow_interrupt and not method.sync:
raise Exception("Only [Sync] methods can be marked [NoInterrupt].")
return method
def _Interface(module, parsed_iface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_iface: {ast.Interface} Parsed interface.
Returns:
{mojom.Interface} AST interface.
"""
interface = mojom.Interface(module=module)
interface.mojom_name = parsed_iface.mojom_name
interface.spec = 'x:' + module.GetNamespacePrefix() + interface.mojom_name
module.kinds[interface.spec] = interface
interface.attributes = _AttributeListToDict(module, interface,
parsed_iface.attribute_list)
interface.enums = []
interface.constants = []
interface.methods_data = []
_ProcessElements(
parsed_iface.mojom_name, parsed_iface.body, {
ast.Enum:
lambda enum: interface.enums.append(_Enum(module, enum, interface)),
ast.Const:
lambda const: interface.constants.append(
_Constant(module, const, interface)),
ast.Method:
interface.methods_data.append,
})
return interface
def _EnumField(module, enum, parsed_field):
"""
Args:
module: {mojom.Module} Module currently being constructed.
enum: {mojom.Enum} Enum this field belongs to.
parsed_field: {ast.EnumValue} Parsed enum value.
Returns:
{mojom.EnumField} AST enum field.
"""
field = mojom.EnumField()
field.mojom_name = parsed_field.mojom_name
field.value = _LookupValue(module, enum, None, parsed_field.value)
field.attributes = _AttributeListToDict(module, field,
parsed_field.attribute_list)
value = mojom.EnumValue(module, enum, field)
module.values[value.GetSpec()] = value
return field
def _ResolveNumericEnumValues(enum):
"""
Given a reference to a mojom.Enum, resolves and assigns the numeric value of
each field, and also computes the min_value and max_value of the enum.
"""
# map of <mojom_name> -> integral value
prev_value = -1
min_value = None
max_value = None
for field in enum.fields:
# This enum value is +1 the previous enum value (e.g: BEGIN).
if field.value is None:
prev_value += 1
# Integral value (e.g: BEGIN = -0x1).
elif isinstance(field.value, str):
prev_value = int(field.value, 0)
# Reference to a previous enum value (e.g: INIT = BEGIN).
elif isinstance(field.value, mojom.EnumValue):
prev_value = field.value.field.numeric_value
elif isinstance(field.value, mojom.ConstantValue):
constant = field.value.constant
kind = constant.kind
if not mojom.IsIntegralKind(kind) or mojom.IsBoolKind(kind):
raise ValueError('Enum values must be integers. %s is not an integer.' %
constant.mojom_name)
prev_value = int(constant.value, 0)
else:
raise Exception('Unresolved enum value for %s' % field.value.GetSpec())
if prev_value in (-128, -127):
raise Exception(f'{field.mojom_name} in {enum.spec} has the value '
f'{prev_value}, which is reserved for WTF::HashTrait\'s '
'default enum specialization and may not be used.')
field.numeric_value = prev_value
if min_value is None or prev_value < min_value:
min_value = prev_value
if max_value is None or prev_value > max_value:
max_value = prev_value
enum.min_value = min_value
enum.max_value = max_value
def _Enum(module, parsed_enum, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_enum: {ast.Enum} Parsed enum.
Returns:
{mojom.Enum} AST enum.
"""
enum = mojom.Enum(module=module)
enum.mojom_name = parsed_enum.mojom_name
enum.native_only = parsed_enum.enum_value_list is None
mojom_name = enum.mojom_name
if parent_kind:
mojom_name = parent_kind.mojom_name + '.' + mojom_name
enum.spec = 'x:%s.%s' % (module.mojom_namespace, mojom_name)
enum.parent_kind = parent_kind
enum.attributes = _AttributeListToDict(module, enum,
parsed_enum.attribute_list)
if not enum.native_only:
enum.fields = list(
map(lambda field: _EnumField(module, enum, field),
parsed_enum.enum_value_list))
_ResolveNumericEnumValues(enum)
# TODO(https://crbug.com/731893): Require a default value to be
# specified.
for field in enum.fields:
if field.default:
if not enum.extensible:
raise Exception(
f'Non-extensible enum {enum.spec} may not specify a default')
if enum.default_field is not None:
raise Exception(f'Multiple [Default] enumerators in enum {enum.spec}')
enum.default_field = field
# While running the backwards compatibility check, ignore errors because the
# old version of the enum might not specify [Default].
if (enum.extensible and enum.default_field is None
and enum.spec not in _EXTENSIBLE_ENUMS_MISSING_DEFAULT
and not is_running_backwards_compatibility_check_hack):
raise Exception(
f'Extensible enum {enum.spec} must specify a [Default] enumerator')
module.kinds[enum.spec] = enum
# Enforce that a [Native] attribute is set to make native-only enum
# declarations more explicit.
if enum.native_only:
if not enum.attributes or not enum.attributes.get('Native', False):
raise Exception("Native-only enum declarations must include a " +
"Native attribute.")
return enum
def _Constant(module, parsed_const, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_const: {ast.Const} Parsed constant.
Returns:
{mojom.Constant} AST constant.
"""
constant = mojom.Constant()
constant.mojom_name = parsed_const.mojom_name
if parent_kind:
scope = (module.mojom_namespace, parent_kind.mojom_name)
else:
scope = (module.mojom_namespace, )
# TODO(mpcomplete): maybe we should only support POD kinds.
constant.kind = _Kind(module.kinds, _MapKind(parsed_const.typename), scope)
constant.parent_kind = parent_kind
constant.value = _LookupValue(module, parent_kind, constant.kind,
parsed_const.value)
# Iteratively resolve this constant reference to a concrete value
while isinstance(constant.value, mojom.ConstantValue):
constant.value = constant.value.constant.value
value = mojom.ConstantValue(module, parent_kind, constant)
module.values[value.GetSpec()] = value
return constant
def _CollectReferencedKinds(module, all_defined_kinds):
"""
Takes a {mojom.Module} object and a list of all defined kinds within that
module, and enumerates the complete dict of user-defined mojom types
(as {mojom.Kind} objects) referenced by the module's own defined kinds (i.e.
as types of struct or union or interface parameters. The returned dict is
keyed by kind spec.
"""
def extract_referenced_user_kinds(kind):
if mojom.IsArrayKind(kind):
return extract_referenced_user_kinds(kind.kind)
if mojom.IsMapKind(kind):
return (extract_referenced_user_kinds(kind.key_kind) +
extract_referenced_user_kinds(kind.value_kind))
if (mojom.IsInterfaceRequestKind(kind) or mojom.IsAssociatedKind(kind)
or mojom.IsPendingRemoteKind(kind)
or mojom.IsPendingReceiverKind(kind)):
return [kind.kind]
if mojom.IsStructKind(kind):
return [kind]
if (mojom.IsInterfaceKind(kind) or mojom.IsEnumKind(kind)
or mojom.IsUnionKind(kind)):
return [kind]
return []
def sanitize_kind(kind):
"""Removes nullability from a kind"""
if kind.spec.startswith('?'):
return _Kind(module.kinds, kind.spec[1:], (module.mojom_namespace, ''))
return kind
referenced_user_kinds = {}
for defined_kind in all_defined_kinds:
if mojom.IsStructKind(defined_kind) or mojom.IsUnionKind(defined_kind):
for field in defined_kind.fields:
for referenced_kind in extract_referenced_user_kinds(field.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
# Also scan for references in parameter lists
for interface in module.interfaces:
for method in interface.methods:
for param in itertools.chain(method.parameters or [],
method.response_parameters or []):
for referenced_kind in extract_referenced_user_kinds(param.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
# Consts can reference imported enums.
for const in module.constants:
if not const.kind in mojom.PRIMITIVES:
sanitized_kind = sanitize_kind(const.kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
return referenced_user_kinds
def _AssignDefaultOrdinals(items):
"""Assigns default ordinal values to a sequence of items if necessary."""
next_ordinal = 0
for item in items:
if item.ordinal is not None:
next_ordinal = item.ordinal + 1
else:
item.ordinal = next_ordinal
next_ordinal += 1
def _AssertTypeIsStable(kind):
"""Raises an error if a type is not stable, meaning it is composed of at least
one type that is not marked [Stable]."""
def assertDependencyIsStable(dependency):
if (mojom.IsEnumKind(dependency) or mojom.IsStructKind(dependency)
or mojom.IsUnionKind(dependency) or mojom.IsInterfaceKind(dependency)):
if not dependency.stable:
raise Exception(
'%s is marked [Stable] but cannot be stable because it depends on '
'%s, which is not marked [Stable].' %
(kind.mojom_name, dependency.mojom_name))
elif mojom.IsArrayKind(dependency) or mojom.IsAnyInterfaceKind(dependency):
assertDependencyIsStable(dependency.kind)
elif mojom.IsMapKind(dependency):
assertDependencyIsStable(dependency.key_kind)
assertDependencyIsStable(dependency.value_kind)
if mojom.IsStructKind(kind) or mojom.IsUnionKind(kind):
for field in kind.fields:
assertDependencyIsStable(field.kind)
elif mojom.IsInterfaceKind(kind):
for method in kind.methods:
for param in method.param_struct.fields:
assertDependencyIsStable(param.kind)
if method.response_param_struct:
for response_param in method.response_param_struct.fields:
assertDependencyIsStable(response_param.kind)
def _AssertStructIsValid(kind):
expected_ordinals = set(range(0, len(kind.fields)))
ordinals = set(map(lambda field: field.ordinal, kind.fields))
if ordinals != expected_ordinals:
raise Exception(
'Structs must use contiguous ordinals starting from 0. ' +
'{} is missing the following ordinals: {}.'.format(
kind.mojom_name, ', '.join(map(str, expected_ordinals - ordinals))))
def _Module(tree, path, imports):
"""
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = mojom.Module(path=path)
module.kinds = {}
for kind in mojom.PRIMITIVES:
module.kinds[kind.spec] = kind
module.values = {}
module.mojom_namespace = tree.module.mojom_namespace[1] if tree.module else ''
# Imports must come first, because they add to module.kinds which is used
# by by the others.
module.imports = [
_Import(module, imports[imp.import_filename]) for imp in tree.import_list
]
if tree.module and tree.module.attribute_list:
assert isinstance(tree.module.attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
module.attributes = dict((attribute.key, attribute.value)
for attribute in tree.module.attribute_list)
filename = os.path.basename(path)
# First pass collects kinds.
module.constants = []
module.enums = []
module.structs = []
module.unions = []
module.interfaces = []
module.features = []
_ProcessElements(
filename, tree.definition_list, {
ast.Const:
lambda const: module.constants.append(_Constant(module, const, None)),
ast.Enum:
lambda enum: module.enums.append(_Enum(module, enum, None)),
ast.Struct:
lambda struct: module.structs.append(_Struct(module, struct)),
ast.Union:
lambda union: module.unions.append(_Union(module, union)),
ast.Interface:
lambda interface: module.interfaces.append(
_Interface(module, interface)),
ast.Feature:
lambda feature: module.features.append(_Feature(module, feature)),
})
# Second pass expands fields and methods. This allows fields and parameters
# to refer to kinds defined anywhere in the mojom.
all_defined_kinds = {}
for struct in module.structs:
struct.fields = list(
map(lambda field: _StructField(module, field, struct),
struct.fields_data))
_AssignDefaultOrdinals(struct.fields)
del struct.fields_data
all_defined_kinds[struct.spec] = struct
for enum in struct.enums:
all_defined_kinds[enum.spec] = enum
for feature in module.features:
all_defined_kinds[feature.spec] = feature
for union in module.unions:
union.fields = list(
map(lambda field: _UnionField(module, field, union), union.fields_data))
_AssignDefaultOrdinals(union.fields)
for field in union.fields:
if field.is_default:
if union.default_field is not None:
raise Exception('Multiple [Default] fields in union %s.' %
union.mojom_name)
union.default_field = field
del union.fields_data
all_defined_kinds[union.spec] = union
if union.extensible and union.default_field is None:
raise Exception('Extensible union %s must specify a [Default] field' %
union.mojom_name)
for interface in module.interfaces:
interface.methods = list(
map(lambda method: _Method(module, method, interface),
interface.methods_data))
_AssignDefaultOrdinals(interface.methods)
del interface.methods_data
all_defined_kinds[interface.spec] = interface
for enum in interface.enums:
all_defined_kinds[enum.spec] = enum
for enum in module.enums:
all_defined_kinds[enum.spec] = enum
all_referenced_kinds = _CollectReferencedKinds(module,
all_defined_kinds.values())
imported_kind_specs = set(all_referenced_kinds.keys()).difference(
set(all_defined_kinds.keys()))
module.imported_kinds = OrderedDict((spec, all_referenced_kinds[spec])
for spec in sorted(imported_kind_specs))
generator.AddComputedData(module)
for iface in module.interfaces:
for method in iface.methods:
if method.param_struct:
_AssignDefaultOrdinals(method.param_struct.fields)
if method.response_param_struct:
_AssignDefaultOrdinals(method.response_param_struct.fields)
# Ensure that all types marked [Stable] are actually stable. Enums are
# automatically OK since they don't depend on other definitions.
for kinds in (module.structs, module.unions, module.interfaces):
for kind in kinds:
if kind.stable:
_AssertTypeIsStable(kind)
for kind in module.structs:
_AssertStructIsValid(kind)
return module
def OrderedModule(tree, path, imports):
"""Convert parse tree to AST module.
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = _Module(tree, path, imports)
return module
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
from mojom.generate import module as mojom
# This module provides a mechanism for determining the packed order and offsets
# of a mojom.Struct.
#
# ps = pack.PackedStruct(struct)
# ps.packed_fields will access a list of PackedField objects, each of which
# will have an offset, a size and a bit (for mojom.BOOLs).
# Size of struct header in bytes: num_bytes [4B] + version [4B].
HEADER_SIZE = 8
class PackedField:
kind_to_size = {
mojom.BOOL: 1,
mojom.INT8: 1,
mojom.UINT8: 1,
mojom.INT16: 2,
mojom.UINT16: 2,
mojom.INT32: 4,
mojom.UINT32: 4,
mojom.FLOAT: 4,
mojom.HANDLE: 4,
mojom.MSGPIPE: 4,
mojom.SHAREDBUFFER: 4,
mojom.PLATFORMHANDLE: 4,
mojom.DCPIPE: 4,
mojom.DPPIPE: 4,
mojom.NULLABLE_HANDLE: 4,
mojom.NULLABLE_MSGPIPE: 4,
mojom.NULLABLE_SHAREDBUFFER: 4,
mojom.NULLABLE_PLATFORMHANDLE: 4,
mojom.NULLABLE_DCPIPE: 4,
mojom.NULLABLE_DPPIPE: 4,
mojom.INT64: 8,
mojom.UINT64: 8,
mojom.DOUBLE: 8,
mojom.STRING: 8,
mojom.NULLABLE_STRING: 8
}
@classmethod
def GetSizeForKind(cls, kind):
if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct, mojom.Interface,
mojom.AssociatedInterface, mojom.PendingRemote,
mojom.PendingAssociatedRemote)):
return 8
if isinstance(kind, mojom.Union):
return 16
if isinstance(kind, (mojom.InterfaceRequest, mojom.PendingReceiver)):
kind = mojom.MSGPIPE
if isinstance(
kind,
(mojom.AssociatedInterfaceRequest, mojom.PendingAssociatedReceiver)):
return 4
if isinstance(kind, mojom.Enum):
# TODO(mpcomplete): what about big enums?
return cls.kind_to_size[mojom.INT32]
if not kind in cls.kind_to_size:
raise Exception("Undefined type: %s. Did you forget to import the file "
"containing the definition?" % kind.spec)
return cls.kind_to_size[kind]
@classmethod
def GetAlignmentForKind(cls, kind):
if isinstance(kind, (mojom.Interface, mojom.AssociatedInterface,
mojom.PendingRemote, mojom.PendingAssociatedRemote)):
return 4
if isinstance(kind, mojom.Union):
return 8
return cls.GetSizeForKind(kind)
def __init__(self,
field,
index,
ordinal,
original_field=None,
sub_ordinal=None,
linked_value_packed_field=None):
"""
Args:
field: the original field.
index: the position of the original field in the struct.
ordinal: the ordinal of the field for serialization.
original_field: See below.
sub_ordinal: See below.
linked_value_packed_field: See below.
original_field, sub_ordinal, and linked_value_packed_field are used to
support nullable ValueKind fields. For legacy reasons, nullable ValueKind
fields actually generate two PackedFields. This allows:
- backwards compatibility prior to Mojo support for nullable ValueKinds.
- correct packing of fields for the aforementioned backwards compatibility.
When translating Fields to PackedFields, the original field is turned into
two PackedFields: the first PackedField always has type mojom.BOOL, while
the second PackedField has the non-nullable version of the field's kind.
When constructing these PackedFields, original_field references the field
as defined in the mojom; the name as defined in the mojom will be used for
all layers above the wire/data layer.
sub_ordinal is used to sort the two PackedFields correctly with respect to
each other: the first mojom.BOOL field always has sub_ordinal 0, while the
second field always has sub_ordinal 1.
Finally, linked_value_packed_field is used by the serialization and
deserialization helpers, which generally just iterate over a PackedStruct's
PackedField's in ordinal order. This allows the helpers to easily reference
any related PackedFields rather than having to lookup related PackedFields
by index while iterating.
"""
self.field = field
self.index = index
self.ordinal = ordinal
self.original_field = original_field
self.sub_ordinal = sub_ordinal
self.linked_value_packed_field = linked_value_packed_field
self.size = self.GetSizeForKind(self.field.kind)
self.alignment = self.GetAlignmentForKind(self.field.kind)
self.offset = None
self.bit = None
self.min_version = None
def GetPad(offset, alignment):
"""Returns the pad necessary to reserve space so that |offset + pad| equals to
some multiple of |alignment|."""
return (alignment - (offset % alignment)) % alignment
def GetFieldOffset(field, last_field):
"""Returns a 2-tuple of the field offset and bit (for BOOLs)."""
if (field.field.kind == mojom.BOOL and last_field.field.kind == mojom.BOOL
and last_field.bit < 7):
return (last_field.offset, last_field.bit + 1)
offset = last_field.offset + last_field.size
pad = GetPad(offset, field.alignment)
return (offset + pad, 0)
def GetPayloadSizeUpToField(field):
"""Returns the payload size (not including struct header) if |field| is the
last field.
"""
if not field:
return 0
offset = field.offset + field.size
pad = GetPad(offset, 8)
return offset + pad
def IsNullableValueKindPackedField(field):
"""Returns true if `field` is derived from a nullable ValueKind field.
Nullable ValueKind fields often require special handling in the bindings due
to the way the implementation is constrained for wire compatibility.
"""
assert isinstance(field, PackedField)
return field.sub_ordinal is not None
def IsPrimaryNullableValueKindPackedField(field):
"""Returns true if `field` is derived from a nullable ValueKind mojom field
and is the "primary" field.
The primary field is a bool PackedField that controls if the field should be
considered as present or not; it will have a reference to the PackedField that
holds the actual value representation if considered present.
Bindings code that translates between the wire protocol and the higher layers
can use this to simplify mapping multiple PackedFields to the single field
that is logically exposed to bindings consumers.
"""
assert isinstance(field, PackedField)
return field.linked_value_packed_field is not None
class PackedStruct:
def __init__(self, struct):
self.struct = struct
# |packed_fields| contains all the fields, in increasing offset order.
self.packed_fields = []
# |packed_fields_in_ordinal_order| refers to the same fields as
# |packed_fields|, but in ordinal order.
self.packed_fields_in_ordinal_order = []
# No fields.
if (len(struct.fields) == 0):
return
# Start by sorting by ordinal.
src_fields = self.packed_fields_in_ordinal_order
ordinal = 0
for index, field in enumerate(struct.fields):
if field.ordinal is not None:
ordinal = field.ordinal
# Nullable value types are a bit weird: they generate two PackedFields
# despite being a single ValueKind. This is for wire compatibility to
# ease the transition from legacy mojom syntax where nullable value types
# were not supported.
if isinstance(field.kind, mojom.ValueKind) and field.kind.is_nullable:
# The suffixes intentionally use Unicode codepoints which are considered
# valid C++/Java/JavaScript identifiers, yet are unlikely to be used in
# actual user code.
has_value_field = copy.copy(field)
has_value_field.name = f'{field.mojom_name}_$flag'
has_value_field.kind = mojom.BOOL
value_field = copy.copy(field)
value_field.name = f'{field.mojom_name}_$value'
value_field.kind = field.kind.MakeUnnullableKind()
value_packed_field = PackedField(value_field,
index,
ordinal,
original_field=field,
sub_ordinal=1,
linked_value_packed_field=None)
has_value_packed_field = PackedField(
has_value_field,
index,
ordinal,
original_field=field,
sub_ordinal=0,
linked_value_packed_field=value_packed_field)
src_fields.append(has_value_packed_field)
src_fields.append(value_packed_field)
else:
src_fields.append(PackedField(field, index, ordinal))
ordinal += 1
src_fields.sort(key=lambda field: (field.ordinal, field.sub_ordinal))
# Set |min_version| for each field.
next_min_version = 0
for packed_field in src_fields:
if packed_field.field.min_version is None:
assert next_min_version == 0
else:
assert packed_field.field.min_version >= next_min_version
next_min_version = packed_field.field.min_version
packed_field.min_version = next_min_version
if (packed_field.min_version != 0
and mojom.IsReferenceKind(packed_field.field.kind)
and not packed_field.field.kind.is_nullable):
raise Exception(
"Non-nullable reference fields are only allowed in version 0 of a "
"struct. %s.%s is defined with [MinVersion=%d]." %
(self.struct.name, packed_field.field.name,
packed_field.min_version))
src_field = src_fields[0]
src_field.offset = 0
src_field.bit = 0
dst_fields = self.packed_fields
dst_fields.append(src_field)
# Then find first slot that each field will fit.
for src_field in src_fields[1:]:
last_field = dst_fields[0]
for i in range(1, len(dst_fields)):
next_field = dst_fields[i]
offset, bit = GetFieldOffset(src_field, last_field)
if offset + src_field.size <= next_field.offset:
# Found hole.
src_field.offset = offset
src_field.bit = bit
dst_fields.insert(i, src_field)
break
last_field = next_field
if src_field.offset is None:
# Add to end
src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field)
dst_fields.append(src_field)
class ByteInfo:
def __init__(self):
self.is_padding = False
self.packed_fields = []
def GetByteLayout(packed_struct):
total_payload_size = GetPayloadSizeUpToField(
packed_struct.packed_fields[-1] if packed_struct.packed_fields else None)
byte_info = [ByteInfo() for i in range(total_payload_size)]
limit_of_previous_field = 0
for packed_field in packed_struct.packed_fields:
for i in range(limit_of_previous_field, packed_field.offset):
byte_info[i].is_padding = True
byte_info[packed_field.offset].packed_fields.append(packed_field)
limit_of_previous_field = packed_field.offset + packed_field.size
for i in range(limit_of_previous_field, len(byte_info)):
byte_info[i].is_padding = True
for byte in byte_info:
# A given byte cannot both be padding and have a fields packed into it.
assert not (byte.is_padding and byte.packed_fields)
return byte_info
class VersionInfo:
def __init__(self, version, num_fields, num_packed_fields, num_bytes):
self.version = version
self.num_fields = num_fields
self.num_packed_fields = num_packed_fields
self.num_bytes = num_bytes
def GetVersionInfo(packed_struct):
"""Get version information for a struct.
Args:
packed_struct: A PackedStruct instance.
Returns:
A non-empty list of VersionInfo instances, sorted by version in increasing
order.
Note: The version numbers may not be consecutive.
"""
versions = []
last_version = 0
last_num_fields = 0
last_num_packed_fields = 0
last_payload_size = 0
for packed_field in packed_struct.packed_fields_in_ordinal_order:
if packed_field.min_version != last_version:
versions.append(
VersionInfo(last_version, last_num_fields, last_num_packed_fields,
last_payload_size + HEADER_SIZE))
last_version = packed_field.min_version
# Nullable numeric fields (e.g. `int32?`) expand to two packed fields, so to
# avoid double-counting, only increment if the field is:
# - not used for representing a nullable value kind field, or
# - the primary field representing the nullable value kind field.
last_num_fields += 1 if (
not IsNullableValueKindPackedField(packed_field)
or IsPrimaryNullableValueKindPackedField(packed_field)) else 0
last_num_packed_fields += 1
# The fields are iterated in ordinal order here. However, the size of a
# version is determined by the last field of that version in pack order,
# instead of ordinal order. Therefore, we need to calculate the max value.
last_payload_size = max(GetPayloadSizeUpToField(packed_field),
last_payload_size)
assert len(
versions) == 0 or last_num_packed_fields != versions[-1].num_packed_fields
versions.append(
VersionInfo(last_version, last_num_fields, last_num_packed_fields,
last_payload_size + HEADER_SIZE))
return versions
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import importlib.util
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
importlib.util.find_spec("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import generator
class StringManipulationTest(unittest.TestCase):
"""generator contains some string utilities, this tests only those."""
def testSplitCamelCase(self):
self.assertEquals(["camel", "case"], generator.SplitCamelCase("CamelCase"))
self.assertEquals(["url", "loader", "factory"],
generator.SplitCamelCase('URLLoaderFactory'))
self.assertEquals(["get99", "entries"],
generator.SplitCamelCase('Get99Entries'))
self.assertEquals(["get99entries"],
generator.SplitCamelCase('Get99entries'))
def testToCamel(self):
self.assertEquals("CamelCase", generator.ToCamel("camel_case"))
self.assertEquals("CAMELCASE", generator.ToCamel("CAMEL_CASE"))
self.assertEquals("camelCase",
generator.ToCamel("camel_case", lower_initial=True))
self.assertEquals("CamelCase", generator.ToCamel(
"camel case", delimiter=' '))
self.assertEquals("CaMelCaSe", generator.ToCamel("caMel_caSe"))
self.assertEquals("L2Tp", generator.ToCamel("l2tp", digits_split=True))
self.assertEquals("l2tp", generator.ToCamel("l2tp", lower_initial=True))
def testToSnakeCase(self):
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("snakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCASE"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3D11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3d11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("snakeD3d11Case"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("snakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCASE"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3D11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3d11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("snakeD3d11Case"))
if __name__ == "__main__":
unittest.main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/translate_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from mojom.generate import module as mojom
from mojom.generate import translate
from mojom.parse import ast
class TranslateTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testSimpleArray(self):
"""Tests a simple int32[]."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("int32[]"), "a:i32")
def testAssociativeArray(self):
"""Tests a simple uint8{string}."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]")
def testLeftToRightAssociativeArray(self):
"""Makes sure that parsing is done from right to left on the internal kinds
in the presence of an associative array."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]")
def testTranslateSimpleUnions(self):
"""Makes sure that a simple union is translated correctly."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
translation = translate.OrderedModule(tree, "mojom_tree", [])
self.assertEqual(1, len(translation.unions))
union = translation.unions[0]
self.assertTrue(isinstance(union, mojom.Union))
self.assertEqual("SomeUnion", union.mojom_name)
self.assertEqual(2, len(union.fields))
self.assertEqual("a", union.fields[0].mojom_name)
self.assertEqual(mojom.INT32.spec, union.fields[0].kind.spec)
self.assertEqual("b", union.fields[1].mojom_name)
self.assertEqual(mojom.STRING.spec, union.fields[1].kind.spec)
def testMapKindRaisesWithDuplicate(self):
"""Verifies _MapTreeForType() raises when passed two values with the same
name."""
methods = [
ast.Method('dup', None, None, ast.ParameterList(), None),
ast.Method('dup', None, None, ast.ParameterList(), None)
]
with self.assertRaises(Exception):
translate._ElemsOfType(methods, ast.Method, 'scope')
def testAssociatedKinds(self):
"""Tests type spec translation of associated interfaces and requests."""
# pylint: disable=W0212
self.assertEquals(
translate._MapKind("asso<SomeInterface>?"), "?asso:x:SomeInterface")
self.assertEquals(translate._MapKind("rca<SomeInterface>?"),
"?rca:x:SomeInterface")
def testSelfRecursiveUnions(self):
"""Verifies _UnionField() raises when a union is self-recursive."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union("SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion?")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
def testDuplicateAttributesException(self):
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"FakeUnion",
ast.AttributeList([
ast.Attribute("key1", "value"),
ast.Attribute("key1", "value")
]),
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
def testEnumWithReservedValues(self):
"""Verifies that assigning reserved values to enumerators fails."""
# -128 is reserved for the empty representation in WTF::HashTraits.
tree = ast.Mojom(None, ast.ImportList(), [
ast.Enum(
"MyEnum", None,
ast.EnumValueList([
ast.EnumValue('kReserved', None, '-128'),
]))
])
with self.assertRaises(Exception) as context:
translate.OrderedModule(tree, "mojom_tree", [])
self.assertIn("reserved for WTF::HashTrait", str(context.exception))
# -127 is reserved for the deleted representation in WTF::HashTraits.
tree = ast.Mojom(None, ast.ImportList(), [
ast.Enum(
"MyEnum", None,
ast.EnumValueList([
ast.EnumValue('kReserved', None, '-127'),
]))
])
with self.assertRaises(Exception) as context:
translate.OrderedModule(tree, "mojom_tree", [])
self.assertIn("reserved for WTF::HashTrait", str(context.exception))
# Implicitly assigning a reserved value should also fail.
tree = ast.Mojom(None, ast.ImportList(), [
ast.Enum(
"MyEnum", None,
ast.EnumValueList([
ast.EnumValue('kNotReserved', None, '-129'),
ast.EnumValue('kImplicitlyReserved', None, None),
]))
])
with self.assertRaises(Exception) as context:
translate.OrderedModule(tree, "mojom_tree", [])
self.assertIn("reserved for WTF::HashTrait", str(context.exception))
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/pack_unittest.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from mojom.generate import module as mojom
from mojom.generate import pack
class PackTest(unittest.TestCase):
def testOrdinalOrder(self):
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT32, 2)
struct.AddField('testfield2', mojom.INT32, 1)
ps = pack.PackedStruct(struct)
self.assertEqual(2, len(ps.packed_fields))
self.assertEqual('testfield2', ps.packed_fields[0].field.mojom_name)
self.assertEqual('testfield1', ps.packed_fields[1].field.mojom_name)
def testZeroFields(self):
struct = mojom.Struct('test')
ps = pack.PackedStruct(struct)
self.assertEqual(0, len(ps.packed_fields))
def testOneField(self):
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
ps = pack.PackedStruct(struct)
self.assertEqual(1, len(ps.packed_fields))
def _CheckPackSequence(self, kinds, fields, offsets):
"""Checks the pack order and offsets of a sequence of mojom.Kinds.
Args:
kinds: A sequence of mojom.Kinds that specify the fields that are to be
created.
fields: The expected order of the resulting fields, with the integer "1"
first.
offsets: The expected order of offsets, with the integer "0" first.
"""
struct = mojom.Struct('test')
index = 1
for kind in kinds:
struct.AddField('%d' % index, kind)
index += 1
ps = pack.PackedStruct(struct)
num_fields = len(ps.packed_fields)
self.assertEqual(len(kinds), num_fields)
for i in range(num_fields):
self.assertEqual('%d' % fields[i], ps.packed_fields[i].field.mojom_name)
self.assertEqual(offsets[i], ps.packed_fields[i].offset)
def testPaddingPackedInOrder(self):
return self._CheckPackSequence((mojom.INT8, mojom.UINT8, mojom.INT32),
(1, 2, 3), (0, 1, 4))
def testPaddingPackedOutOfOrder(self):
return self._CheckPackSequence((mojom.INT8, mojom.INT32, mojom.UINT8),
(1, 3, 2), (0, 1, 4))
def testPaddingPackedOverflow(self):
kinds = (mojom.INT8, mojom.INT32, mojom.INT16, mojom.INT8, mojom.INT8)
# 2 bytes should be packed together first, followed by short, then by int.
fields = (1, 4, 3, 2, 5)
offsets = (0, 1, 2, 4, 8)
return self._CheckPackSequence(kinds, fields, offsets)
def testNullableTypes(self):
kinds = (mojom.STRING.MakeNullableKind(), mojom.HANDLE.MakeNullableKind(),
mojom.Struct('test_struct').MakeNullableKind(),
mojom.DCPIPE.MakeNullableKind(), mojom.Array().MakeNullableKind(),
mojom.DPPIPE.MakeNullableKind(),
mojom.Array(length=5).MakeNullableKind(),
mojom.MSGPIPE.MakeNullableKind(),
mojom.Interface('test_interface').MakeNullableKind(),
mojom.SHAREDBUFFER.MakeNullableKind(),
mojom.InterfaceRequest().MakeNullableKind())
fields = (1, 2, 4, 3, 5, 6, 8, 7, 9, 10, 11)
offsets = (0, 8, 12, 16, 24, 32, 36, 40, 48, 56, 60)
return self._CheckPackSequence(kinds, fields, offsets)
def testAllTypes(self):
return self._CheckPackSequence(
(mojom.BOOL, mojom.INT8, mojom.STRING, mojom.UINT8, mojom.INT16,
mojom.DOUBLE, mojom.UINT16, mojom.INT32, mojom.UINT32, mojom.INT64,
mojom.FLOAT, mojom.STRING, mojom.HANDLE, mojom.UINT64,
mojom.Struct('test'), mojom.Array(), mojom.STRING.MakeNullableKind()),
(1, 2, 4, 5, 7, 3, 6, 8, 9, 10, 11, 13, 12, 14, 15, 16, 17, 18),
(0, 1, 2, 4, 6, 8, 16, 24, 28, 32, 40, 44, 48, 56, 64, 72, 80, 88))
def testPaddingPackedOutOfOrderByOrdinal(self):
struct = mojom.Struct('test')
struct.AddField('testfield1', mojom.INT8)
struct.AddField('testfield3', mojom.UINT8, 3)
struct.AddField('testfield2', mojom.INT32, 2)
ps = pack.PackedStruct(struct)
self.assertEqual(3, len(ps.packed_fields))
# Second byte should be packed in behind first, altering order.
self.assertEqual('testfield1', ps.packed_fields[0].field.mojom_name)
self.assertEqual('testfield3', ps.packed_fields[1].field.mojom_name)
self.assertEqual('testfield2', ps.packed_fields[2].field.mojom_name)
# Second byte should be packed with first.
self.assertEqual(0, ps.packed_fields[0].offset)
self.assertEqual(1, ps.packed_fields[1].offset)
self.assertEqual(4, ps.packed_fields[2].offset)
def testBools(self):
struct = mojom.Struct('test')
struct.AddField('bit0', mojom.BOOL)
struct.AddField('bit1', mojom.BOOL)
struct.AddField('int', mojom.INT32)
struct.AddField('bit2', mojom.BOOL)
struct.AddField('bit3', mojom.BOOL)
struct.AddField('bit4', mojom.BOOL)
struct.AddField('bit5', mojom.BOOL)
struct.AddField('bit6', mojom.BOOL)
struct.AddField('bit7', mojom.BOOL)
struct.AddField('bit8', mojom.BOOL)
ps = pack.PackedStruct(struct)
self.assertEqual(10, len(ps.packed_fields))
# First 8 bits packed together.
for i in range(8):
pf = ps.packed_fields[i]
self.assertEqual(0, pf.offset)
self.assertEqual("bit%d" % i, pf.field.mojom_name)
self.assertEqual(i, pf.bit)
# Ninth bit goes into second byte.
self.assertEqual("bit8", ps.packed_fields[8].field.mojom_name)
self.assertEqual(1, ps.packed_fields[8].offset)
self.assertEqual(0, ps.packed_fields[8].bit)
# int comes last.
self.assertEqual("int", ps.packed_fields[9].field.mojom_name)
self.assertEqual(4, ps.packed_fields[9].offset)
def testMinVersion(self):
"""Tests that |min_version| is properly set for packed fields."""
struct = mojom.Struct('test')
struct.AddField('field_2', mojom.BOOL, 2)
struct.AddField('field_0', mojom.INT32, 0)
struct.AddField('field_1', mojom.INT64, 1)
ps = pack.PackedStruct(struct)
self.assertEqual('field_0', ps.packed_fields[0].field.mojom_name)
self.assertEqual('field_2', ps.packed_fields[1].field.mojom_name)
self.assertEqual('field_1', ps.packed_fields[2].field.mojom_name)
self.assertEqual(0, ps.packed_fields[0].min_version)
self.assertEqual(0, ps.packed_fields[1].min_version)
self.assertEqual(0, ps.packed_fields[2].min_version)
struct.fields[0].attributes = {'MinVersion': 1}
ps = pack.PackedStruct(struct)
self.assertEqual(0, ps.packed_fields[0].min_version)
self.assertEqual(1, ps.packed_fields[1].min_version)
self.assertEqual(0, ps.packed_fields[2].min_version)
def testGetVersionInfoEmptyStruct(self):
"""Tests that pack.GetVersionInfo() never returns an empty list, even for
empty structs.
"""
struct = mojom.Struct('test')
ps = pack.PackedStruct(struct)
versions = pack.GetVersionInfo(ps)
self.assertEqual(1, len(versions))
self.assertEqual(0, versions[0].version)
self.assertEqual(0, versions[0].num_fields)
self.assertEqual(8, versions[0].num_bytes)
def testGetVersionInfoComplexOrder(self):
"""Tests pack.GetVersionInfo() using a struct whose definition order,
ordinal order and pack order for fields are all different.
"""
struct = mojom.Struct('test')
struct.AddField(
'field_3', mojom.BOOL, ordinal=3, attributes={'MinVersion': 3})
struct.AddField('field_0', mojom.INT32, ordinal=0)
struct.AddField(
'field_1', mojom.INT64, ordinal=1, attributes={'MinVersion': 2})
struct.AddField(
'field_2', mojom.INT64, ordinal=2, attributes={'MinVersion': 3})
ps = pack.PackedStruct(struct)
versions = pack.GetVersionInfo(ps)
self.assertEqual(3, len(versions))
self.assertEqual(0, versions[0].version)
self.assertEqual(1, versions[0].num_fields)
self.assertEqual(16, versions[0].num_bytes)
self.assertEqual(2, versions[1].version)
self.assertEqual(2, versions[1].num_fields)
self.assertEqual(24, versions[1].num_bytes)
self.assertEqual(3, versions[2].version)
self.assertEqual(4, versions[2].num_fields)
self.assertEqual(32, versions[2].num_bytes)
def testGetVersionInfoPackedStruct(self):
"""Tests that pack.GetVersionInfo() correctly sets version, num_fields,
and num_packed_fields for a packed struct.
"""
struct = mojom.Struct('test')
struct.AddField('field_0', mojom.BOOL, ordinal=0)
struct.AddField('field_1',
mojom.NULLABLE_BOOL,
ordinal=1,
attributes={'MinVersion': 1})
struct.AddField('field_2',
mojom.NULLABLE_BOOL,
ordinal=2,
attributes={'MinVersion': 2})
ps = pack.PackedStruct(struct)
versions = pack.GetVersionInfo(ps)
self.assertEqual(3, len(versions))
self.assertEqual(0, versions[0].version)
self.assertEqual(1, versions[1].version)
self.assertEqual(2, versions[2].version)
self.assertEqual(1, versions[0].num_fields)
self.assertEqual(2, versions[1].num_fields)
self.assertEqual(3, versions[2].num_fields)
self.assertEqual(1, versions[0].num_packed_fields)
self.assertEqual(3, versions[1].num_packed_fields)
self.assertEqual(5, versions[2].num_packed_fields)
def testInterfaceAlignment(self):
"""Tests that interfaces are aligned on 4-byte boundaries, although the size
of an interface is 8 bytes.
"""
kinds = (mojom.INT32, mojom.Interface('test_interface'))
fields = (1, 2)
offsets = (0, 4)
self._CheckPackSequence(kinds, fields, offsets)
def testAssociatedInterfaceAlignment(self):
"""Tests that associated interfaces are aligned on 4-byte boundaries,
although the size of an associated interface is 8 bytes.
"""
kinds = (mojom.INT32,
mojom.AssociatedInterface(mojom.Interface('test_interface')))
fields = (1, 2)
offsets = (0, 4)
self._CheckPackSequence(kinds, fields, offsets)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/template_expander.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Based on third_party/WebKit/Source/build/scripts/template_expander.py.
import os.path
import sys
from mojom import fileutil
fileutil.AddLocalRepoThirdPartyDirToModulePath()
import jinja2
def ApplyTemplate(mojo_generator, path_to_template, params, **kwargs):
loader = jinja2.ModuleLoader(
os.path.join(mojo_generator.bytecode_path,
"%s.zip" % mojo_generator.GetTemplatePrefix()))
final_kwargs = dict(mojo_generator.GetJinjaParameters())
final_kwargs.update(kwargs)
jinja_env = jinja2.Environment(
loader=loader, keep_trailing_newline=True, **final_kwargs)
jinja_env.globals.update(mojo_generator.GetGlobals())
jinja_env.filters.update(mojo_generator.GetFilters())
template = jinja_env.get_template(path_to_template)
return template.render(params)
def UseJinja(path_to_template, **kwargs):
def RealDecorator(generator):
def GeneratorInternal(*args, **kwargs2):
parameters = generator(*args, **kwargs2)
return ApplyTemplate(args[0], path_to_template, parameters, **kwargs)
GeneratorInternal.__name__ = generator.__name__
return GeneratorInternal
return RealDecorator
def ApplyImportedTemplate(mojo_generator, path_to_template, filename, params,
**kwargs):
loader = jinja2.FileSystemLoader(searchpath=path_to_template)
final_kwargs = dict(mojo_generator.GetJinjaParameters())
final_kwargs.update(kwargs)
jinja_env = jinja2.Environment(
loader=loader, keep_trailing_newline=True, **final_kwargs)
jinja_env.globals.update(mojo_generator.GetGlobals())
jinja_env.filters.update(mojo_generator.GetFilters())
template = jinja_env.get_template(filename)
return template.render(params)
def UseJinjaForImportedTemplate(func):
def wrapper(*args, **kwargs):
parameters = func(*args, **kwargs)
path_to_template = args[1]
filename = args[2]
return ApplyImportedTemplate(args[0], path_to_template, filename,
parameters)
wrapper.__name__ = func.__name__
return wrapper
def PrecompileTemplates(generator_modules, output_dir):
for module in generator_modules.values():
generator = module.Generator(None)
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader([
os.path.join(
os.path.dirname(module.__file__), generator.GetTemplatePrefix())
]))
jinja_env.filters.update(generator.GetFilters())
jinja_env.compile_templates(os.path.join(
output_dir, "%s.zip" % generator.GetTemplatePrefix()),
extensions=["tmpl"],
zip="stored",
ignore_errors=False)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/module_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import unittest
from mojom.generate import module as mojom
class ModuleTest(unittest.TestCase):
def testNonInterfaceAsInterfaceRequest(self):
"""Tests that a non-interface cannot be used for interface requests."""
module = mojom.Module('test_module', 'test_namespace')
struct = mojom.Struct('TestStruct', module=module)
with self.assertRaises(Exception) as e:
mojom.InterfaceRequest(struct)
self.assertEquals(
e.exception.__str__(),
'Interface request requires \'x:TestStruct\' to be an interface.')
def testNonInterfaceAsAssociatedInterface(self):
"""Tests that a non-interface type cannot be used for associated interfaces.
"""
module = mojom.Module('test_module', 'test_namespace')
struct = mojom.Struct('TestStruct', module=module)
with self.assertRaises(Exception) as e:
mojom.AssociatedInterface(struct)
self.assertEquals(
e.exception.__str__(),
'Associated interface requires \'x:TestStruct\' to be an interface.')
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/module.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This module's classes provide an interface to mojo modules. Modules are
# collections of interfaces and structs to be used by mojo ipc clients and
# servers.
#
# A simple interface would be created this way:
# module = mojom.generate.module.Module('Foo')
# interface = module.AddInterface('Bar')
# method = interface.AddMethod('Tat', 0)
# method.AddParameter('baz', 0, mojom.INT32)
import pickle
from collections import OrderedDict
from uuid import UUID
# pylint: disable=raise-missing-from
class BackwardCompatibilityChecker:
"""Used for memoization while recursively checking two type definitions for
backward-compatibility."""
def __init__(self):
self._cache = {}
def IsBackwardCompatible(self, new_kind, old_kind):
key = (new_kind, old_kind)
result = self._cache.get(key)
if result is None:
# Assume they're compatible at first to effectively ignore recursive
# checks between these types, e.g. if both kinds are a struct or union
# that references itself in a field.
self._cache[key] = True
result = new_kind.IsBackwardCompatible(old_kind, self)
self._cache[key] = result
return result
# We use our own version of __repr__ when displaying the AST, as the
# AST currently doesn't capture which nodes are reference (e.g. to
# types) and which nodes are definitions. This allows us to e.g. print
# the definition of a struct when it's defined inside a module, but
# only print its name when it's referenced in e.g. a method parameter.
def Repr(obj, as_ref=True):
"""A version of __repr__ that can distinguish references.
Sometimes we like to print an object's full representation
(e.g. with its fields) and sometimes we just want to reference an
object that was printed in full elsewhere. This function allows us
to make that distinction.
Args:
obj: The object whose string representation we compute.
as_ref: If True, use the short reference representation.
Returns:
A str representation of |obj|.
"""
if hasattr(obj, 'Repr'):
return obj.Repr(as_ref=as_ref)
# Since we cannot implement Repr for existing container types, we
# handle them here.
if isinstance(obj, list):
if not obj:
return '[]'
return ('[\n%s\n]' %
(',\n'.join(' %s' % Repr(elem, as_ref).replace('\n', '\n ')
for elem in obj)))
if isinstance(obj, dict):
if not obj:
return '{}'
return ('{\n%s\n}' % (',\n'.join(' %s: %s' %
(Repr(key, as_ref).replace('\n', '\n '),
Repr(val, as_ref).replace('\n', '\n '))
for key, val in obj.items())))
return repr(obj)
def GenericRepr(obj, names):
"""Compute generic Repr for |obj| based on the attributes in |names|.
Args:
obj: The object to compute a Repr for.
names: A dict from attribute names to include, to booleans
specifying whether those attributes should be shown as
references or not.
Returns:
A str representation of |obj|.
"""
def ReprIndent(name, as_ref):
return ' %s=%s' % (name, Repr(getattr(obj, name), as_ref).replace(
'\n', '\n '))
return '%s(\n%s\n)' % (obj.__class__.__name__, ',\n'.join(
ReprIndent(name, as_ref) for (name, as_ref) in names.items()))
class Kind:
"""Kind represents a type (e.g. int8, string).
Attributes:
spec: A string uniquely identifying the type. May be None.
module: {Module} The defining module. Set to None for built-in types.
parent_kind: The enclosing type. For example, an enum defined
inside an interface has that interface as its parent. May be None.
is_nullable: True if the type is nullable.
"""
def __init__(self, spec=None, is_nullable=False, module=None):
self.spec = spec
self.module = module
self.parent_kind = None
self.is_nullable = is_nullable
self.shared_definition = {}
@classmethod
def AddSharedProperty(cls, name):
"""Adds a property |name| to |cls|, which accesses the corresponding item in
|shared_definition|.
The reason of adding such indirection is to enable sharing definition
between a reference kind and its nullable variation. For example:
a = Struct('test_struct_1')
b = a.MakeNullableKind()
a.name = 'test_struct_2'
print(b.name) # Outputs 'test_struct_2'.
"""
def Get(self):
try:
return self.shared_definition[name]
except KeyError: # Must raise AttributeError if property doesn't exist.
raise AttributeError
def Set(self, value):
self.shared_definition[name] = value
setattr(cls, name, property(Get, Set))
def Repr(self, as_ref=True):
# pylint: disable=unused-argument
return '<%s spec=%r is_nullable=%r>' % (self.__class__.__name__, self.spec,
self.is_nullable)
def __repr__(self):
# Gives us a decent __repr__ for all kinds.
return self.Repr()
def __eq__(self, rhs):
# pylint: disable=unidiomatic-typecheck
return (type(self) == type(rhs)
and (self.spec, self.parent_kind, self.is_nullable)
== (rhs.spec, rhs.parent_kind, rhs.is_nullable))
def __hash__(self):
# TODO(crbug.com/1060471): Remove this and other __hash__ methods on Kind
# and its subclasses. This is to support existing generator code which uses
# some primitive Kinds as dict keys. The default hash (object identity)
# breaks these dicts when a pickled Module instance is unpickled and used
# during a subsequent run of the parser.
return hash((self.spec, self.parent_kind, self.is_nullable))
# pylint: disable=unused-argument
def IsBackwardCompatible(self, rhs, checker):
return self == rhs
class ValueKind(Kind):
"""ValueKind represents values that aren't reference kinds.
The primary difference is the wire representation for nullable value kinds
still reserves space for the value type itself, even if that value itself
is logically null.
"""
def __init__(self, spec=None, is_nullable=False, module=None):
assert spec is None or is_nullable == spec.startswith('?')
Kind.__init__(self, spec, is_nullable, module)
def MakeNullableKind(self):
assert not self.is_nullable
if self == BOOL:
return NULLABLE_BOOL
if self == INT8:
return NULLABLE_INT8
if self == INT16:
return NULLABLE_INT16
if self == INT32:
return NULLABLE_INT32
if self == INT64:
return NULLABLE_INT64
if self == UINT8:
return NULLABLE_UINT8
if self == UINT16:
return NULLABLE_UINT16
if self == UINT32:
return NULLABLE_UINT32
if self == UINT64:
return NULLABLE_UINT64
if self == FLOAT:
return NULLABLE_FLOAT
if self == DOUBLE:
return NULLABLE_DOUBLE
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = '?' + self.spec
nullable_kind.is_nullable = True
nullable_kind.parent_kind = self.parent_kind
nullable_kind.module = self.module
return nullable_kind
def MakeUnnullableKind(self):
assert self.is_nullable
if self == NULLABLE_BOOL:
return BOOL
if self == NULLABLE_INT8:
return INT8
if self == NULLABLE_INT16:
return INT16
if self == NULLABLE_INT32:
return INT32
if self == NULLABLE_INT64:
return INT64
if self == NULLABLE_UINT8:
return UINT8
if self == NULLABLE_UINT16:
return UINT16
if self == NULLABLE_UINT32:
return UINT32
if self == NULLABLE_UINT64:
return UINT64
if self == NULLABLE_FLOAT:
return FLOAT
if self == NULLABLE_DOUBLE:
return DOUBLE
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = self.spec[1:]
nullable_kind.is_nullable = False
nullable_kind.parent_kind = self.parent_kind
nullable_kind.module = self.module
return nullable_kind
def __eq__(self, rhs):
return (isinstance(rhs, ValueKind) and super().__eq__(rhs))
def __hash__(self): # pylint: disable=useless-super-delegation
return super().__hash__()
class ReferenceKind(Kind):
"""ReferenceKind represents pointer and handle types.
A type is nullable if null (for pointer types) or invalid handle (for handle
types) is a legal value for the type.
"""
def __init__(self, spec=None, is_nullable=False, module=None):
assert spec is None or is_nullable == spec.startswith('?')
Kind.__init__(self, spec, is_nullable, module)
def MakeNullableKind(self):
assert not self.is_nullable
if self == STRING:
return NULLABLE_STRING
if self == HANDLE:
return NULLABLE_HANDLE
if self == DCPIPE:
return NULLABLE_DCPIPE
if self == DPPIPE:
return NULLABLE_DPPIPE
if self == MSGPIPE:
return NULLABLE_MSGPIPE
if self == SHAREDBUFFER:
return NULLABLE_SHAREDBUFFER
if self == PLATFORMHANDLE:
return NULLABLE_PLATFORMHANDLE
nullable_kind = type(self)()
nullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
nullable_kind.spec = '?' + self.spec
nullable_kind.is_nullable = True
nullable_kind.parent_kind = self.parent_kind
nullable_kind.module = self.module
return nullable_kind
def MakeUnnullableKind(self):
assert self.is_nullable
if self == NULLABLE_STRING:
return STRING
if self == NULLABLE_HANDLE:
return HANDLE
if self == NULLABLE_DCPIPE:
return DCPIPE
if self == NULLABLE_DPPIPE:
return DPPIPE
if self == NULLABLE_MSGPIPE:
return MSGPIPE
if self == NULLABLE_SHAREDBUFFER:
return SHAREDBUFFER
if self == NULLABLE_PLATFORMHANDLE:
return PLATFORMHANDLE
unnullable_kind = type(self)()
unnullable_kind.shared_definition = self.shared_definition
if self.spec is not None:
assert self.spec[0] == '?'
unnullable_kind.spec = self.spec[1:]
unnullable_kind.is_nullable = False
unnullable_kind.parent_kind = self.parent_kind
unnullable_kind.module = self.module
return unnullable_kind
def __eq__(self, rhs):
return (isinstance(rhs, ReferenceKind) and super().__eq__(rhs))
def __hash__(self): # pylint: disable=useless-super-delegation
return super().__hash__()
# Initialize the set of primitive types. These can be accessed by clients.
BOOL = ValueKind('b')
INT8 = ValueKind('i8')
INT16 = ValueKind('i16')
INT32 = ValueKind('i32')
INT64 = ValueKind('i64')
UINT8 = ValueKind('u8')
UINT16 = ValueKind('u16')
UINT32 = ValueKind('u32')
UINT64 = ValueKind('u64')
FLOAT = ValueKind('f')
DOUBLE = ValueKind('d')
NULLABLE_BOOL = ValueKind('?b', True)
NULLABLE_INT8 = ValueKind('?i8', True)
NULLABLE_INT16 = ValueKind('?i16', True)
NULLABLE_INT32 = ValueKind('?i32', True)
NULLABLE_INT64 = ValueKind('?i64', True)
NULLABLE_UINT8 = ValueKind('?u8', True)
NULLABLE_UINT16 = ValueKind('?u16', True)
NULLABLE_UINT32 = ValueKind('?u32', True)
NULLABLE_UINT64 = ValueKind('?u64', True)
NULLABLE_FLOAT = ValueKind('?f', True)
NULLABLE_DOUBLE = ValueKind('?d', True)
STRING = ReferenceKind('s')
HANDLE = ReferenceKind('h')
DCPIPE = ReferenceKind('h:d:c')
DPPIPE = ReferenceKind('h:d:p')
MSGPIPE = ReferenceKind('h:m')
SHAREDBUFFER = ReferenceKind('h:s')
PLATFORMHANDLE = ReferenceKind('h:p')
NULLABLE_STRING = ReferenceKind('?s', True)
NULLABLE_HANDLE = ReferenceKind('?h', True)
NULLABLE_DCPIPE = ReferenceKind('?h:d:c', True)
NULLABLE_DPPIPE = ReferenceKind('?h:d:p', True)
NULLABLE_MSGPIPE = ReferenceKind('?h:m', True)
NULLABLE_SHAREDBUFFER = ReferenceKind('?h:s', True)
NULLABLE_PLATFORMHANDLE = ReferenceKind('?h:p', True)
# Collection of all Primitive types
PRIMITIVES = (
BOOL,
INT8,
INT16,
INT32,
INT64,
UINT8,
UINT16,
UINT32,
UINT64,
FLOAT,
DOUBLE,
NULLABLE_BOOL,
NULLABLE_INT8,
NULLABLE_INT16,
NULLABLE_INT32,
NULLABLE_INT64,
NULLABLE_UINT8,
NULLABLE_UINT16,
NULLABLE_UINT32,
NULLABLE_UINT64,
NULLABLE_FLOAT,
NULLABLE_DOUBLE,
STRING,
HANDLE,
DCPIPE,
DPPIPE,
MSGPIPE,
SHAREDBUFFER,
PLATFORMHANDLE,
NULLABLE_STRING,
NULLABLE_HANDLE,
NULLABLE_DCPIPE,
NULLABLE_DPPIPE,
NULLABLE_MSGPIPE,
NULLABLE_SHAREDBUFFER,
NULLABLE_PLATFORMHANDLE,
)
ATTRIBUTE_MIN_VERSION = 'MinVersion'
ATTRIBUTE_DEFAULT = 'Default'
ATTRIBUTE_EXTENSIBLE = 'Extensible'
ATTRIBUTE_NO_INTERRUPT = 'NoInterrupt'
ATTRIBUTE_STABLE = 'Stable'
ATTRIBUTE_SUPPORTS_URGENT = 'SupportsUrgent'
ATTRIBUTE_SYNC = 'Sync'
ATTRIBUTE_UNLIMITED_SIZE = 'UnlimitedSize'
ATTRIBUTE_UUID = 'Uuid'
ATTRIBUTE_SERVICE_SANDBOX = 'ServiceSandbox'
ATTRIBUTE_REQUIRE_CONTEXT = 'RequireContext'
ATTRIBUTE_ALLOWED_CONTEXT = 'AllowedContext'
ATTRIBUTE_RUNTIME_FEATURE = 'RuntimeFeature'
class NamedValue:
def __init__(self, module, parent_kind, mojom_name):
self.module = module
self.parent_kind = parent_kind
self.mojom_name = mojom_name
def GetSpec(self):
return (self.module.GetNamespacePrefix() +
(self.parent_kind and
(self.parent_kind.mojom_name + '.') or "") + self.mojom_name)
def __eq__(self, rhs):
return (isinstance(rhs, NamedValue)
and (self.parent_kind, self.mojom_name) == (rhs.parent_kind,
rhs.mojom_name))
def __hash__(self):
return hash((self.parent_kind, self.mojom_name))
class BuiltinValue:
def __init__(self, value):
self.value = value
def __eq__(self, rhs):
return isinstance(rhs, BuiltinValue) and self.value == rhs.value
class ConstantValue(NamedValue):
def __init__(self, module, parent_kind, constant):
NamedValue.__init__(self, module, parent_kind, constant.mojom_name)
self.constant = constant
@property
def name(self):
return self.constant.name
class EnumValue(NamedValue):
def __init__(self, module, enum, field):
NamedValue.__init__(self, module, enum.parent_kind, field.mojom_name)
self.field = field
self.enum = enum
def GetSpec(self):
return (self.module.GetNamespacePrefix() +
(self.parent_kind and (self.parent_kind.mojom_name + '.') or "") +
self.enum.mojom_name + '.' + self.mojom_name)
@property
def name(self):
return self.field.name
class Constant:
def __init__(self, mojom_name=None, kind=None, value=None, parent_kind=None):
self.mojom_name = mojom_name
self.name = None
self.kind = kind
self.value = value
self.parent_kind = parent_kind
def Stylize(self, stylizer):
self.name = stylizer.StylizeConstant(self.mojom_name)
def __eq__(self, rhs):
return (isinstance(rhs, Constant)
and (self.mojom_name, self.kind, self.value,
self.parent_kind) == (rhs.mojom_name, rhs.kind, rhs.value,
rhs.parent_kind))
class Field:
def __init__(self,
mojom_name=None,
kind=None,
ordinal=None,
default=None,
attributes=None):
if self.__class__.__name__ == 'Field':
raise Exception()
self.mojom_name = mojom_name
self.name = None
self.kind = kind
self.ordinal = ordinal
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
# pylint: disable=unused-argument
# Fields are only referenced by objects which define them and thus
# they are always displayed as non-references.
return GenericRepr(self, {'mojom_name': False, 'kind': True})
def Stylize(self, stylizer):
self.name = stylizer.StylizeField(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
def __eq__(self, rhs):
return (isinstance(rhs, Field)
and (self.mojom_name, self.kind, self.ordinal, self.default,
self.attributes) == (rhs.mojom_name, rhs.kind, rhs.ordinal,
rhs.default, rhs.attributes))
def __hash__(self):
return hash((self.mojom_name, self.kind, self.ordinal, self.default))
class StructField(Field):
def __hash__(self):
return super(Field, self).__hash__()
class UnionField(Field):
def __init__(self,
mojom_name=None,
kind=None,
ordinal=None,
default=None,
attributes=None):
Field.__init__(self, mojom_name, kind, ordinal, default, attributes)
@property
def is_default(self):
return self.attributes.get(ATTRIBUTE_DEFAULT, False) \
if self.attributes else False
def _IsFieldBackwardCompatible(new_field, old_field, checker):
if (new_field.min_version or 0) != (old_field.min_version or 0):
return False
return checker.IsBackwardCompatible(new_field.kind, old_field.kind)
class Feature(ReferenceKind):
"""A runtime enabled feature defined from mojom.
Attributes:
mojom_name: {str} The name of the feature type as defined in mojom.
name: {str} The stylized name. (Note: not the "name" used by FeatureList.)
constants: {List[Constant]} The constants defined in the feature scope.
attributes: {dict} Additional information about the feature.
"""
Kind.AddSharedProperty('mojom_name')
Kind.AddSharedProperty('name')
Kind.AddSharedProperty('constants')
Kind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.name = None
self.constants = []
self.attributes = attributes
def Stylize(self, stylizer):
self.name = stylizer.StylizeFeature(self.mojom_name)
for constant in self.constants:
constant.Stylize(stylizer)
class Struct(ReferenceKind):
"""A struct with typed fields.
Attributes:
mojom_name: {str} The name of the struct type as defined in mojom.
name: {str} The stylized name.
native_only: {bool} Does the struct have a body (i.e. any fields) or is it
purely a native struct.
custom_serializer: {bool} Should we generate a serializer for the struct or
will one be provided by non-generated code.
fields: {List[StructField]} The members of the struct.
enums: {List[Enum]} The enums defined in the struct scope.
constants: {List[Constant]} The constants defined in the struct scope.
attributes: {dict} Additional information about the struct, such as
if it's a native struct.
"""
Kind.AddSharedProperty('mojom_name')
Kind.AddSharedProperty('name')
Kind.AddSharedProperty('native_only')
Kind.AddSharedProperty('custom_serializer')
Kind.AddSharedProperty('fields')
Kind.AddSharedProperty('enums')
Kind.AddSharedProperty('constants')
Kind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.name = None
self.native_only = False
self.custom_serializer = False
self.fields = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r module=%s>' % (self.__class__.__name__,
self.mojom_name,
Repr(self.module, as_ref=True))
return GenericRepr(self, {
'mojom_name': False,
'fields': False,
'module': True
})
def AddField(self,
mojom_name,
kind,
ordinal=None,
default=None,
attributes=None):
field = StructField(mojom_name, kind, ordinal, default, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeStruct(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
def IsBackwardCompatible(self, rhs, checker):
"""This struct is backward-compatible with rhs (older_struct) if and only if
all of the following conditions hold:
- Any newly added field is tagged with a [MinVersion] attribute specifying
a version number greater than all previously used [MinVersion]
attributes within the struct.
- All fields present in rhs remain present in the new struct,
with the same ordinal position, same optional or non-optional status,
same (or backward-compatible) type and where applicable, the same
[MinVersion] attribute value.
- All [MinVersion] attributes must be non-decreasing in ordinal order.
- All reference-typed (string, array, map, struct, or union) fields tagged
with a [MinVersion] greater than zero must be optional.
"""
def buildOrdinalFieldMap(struct):
fields_by_ordinal = {}
for field in struct.fields:
if field.ordinal in fields_by_ordinal:
raise Exception('Multiple fields with ordinal %s in struct %s.' %
(field.ordinal, struct.mojom_name))
fields_by_ordinal[field.ordinal] = field
return fields_by_ordinal
new_fields = buildOrdinalFieldMap(self)
old_fields = buildOrdinalFieldMap(rhs)
if len(new_fields) < len(old_fields):
# At least one field was removed, which is not OK.
return False
# If there are N fields, existing ordinal values must exactly cover the
# range from 0 to N-1.
num_old_ordinals = len(old_fields)
max_old_min_version = 0
for ordinal in range(num_old_ordinals):
new_field = new_fields[ordinal]
old_field = old_fields[ordinal]
if (old_field.min_version or 0) > max_old_min_version:
max_old_min_version = old_field.min_version
if not _IsFieldBackwardCompatible(new_field, old_field, checker):
# Type or min-version mismatch between old and new versions of the same
# ordinal field.
return False
# At this point we know all old fields are intact in the new struct
# definition. Now verify that all new fields have a high enough min version
# and are appropriately optional where required.
num_new_ordinals = len(new_fields)
last_min_version = max_old_min_version
for ordinal in range(num_old_ordinals, num_new_ordinals):
new_field = new_fields[ordinal]
min_version = new_field.min_version or 0
if min_version <= max_old_min_version:
# A new field is being added to an existing version, which is not OK.
return False
if min_version < last_min_version:
# The [MinVersion] of a field cannot be lower than the [MinVersion] of
# a field with lower ordinal value.
return False
if IsReferenceKind(new_field.kind) and not IsNullableKind(new_field.kind):
# New fields whose type can be nullable MUST be nullable.
return False
return True
@property
def stable(self):
return self.attributes.get(ATTRIBUTE_STABLE, False) \
if self.attributes else False
@property
def qualified_name(self):
if self.parent_kind:
prefix = self.parent_kind.qualified_name + '.'
else:
prefix = self.module.GetNamespacePrefix()
return '%s%s' % (prefix, self.mojom_name)
def _tuple(self):
return (self.mojom_name, self.native_only, self.fields, self.constants,
self.attributes)
def __eq__(self, rhs):
return isinstance(rhs, Struct) and self._tuple() == rhs._tuple()
def __lt__(self, rhs):
if not isinstance(self, type(rhs)):
return str(type(self)) < str(type(rhs))
return self._tuple() < rhs._tuple()
def __hash__(self):
return id(self)
class Union(ReferenceKind):
"""A union of several kinds.
Attributes:
mojom_name: {str} The name of the union type as defined in mojom.
name: {str} The stylized name.
fields: {List[UnionField]} The members of the union.
attributes: {dict} Additional information about the union, such as
which Java class name to use to represent it in the generated
bindings.
"""
Kind.AddSharedProperty('mojom_name')
Kind.AddSharedProperty('name')
Kind.AddSharedProperty('fields')
Kind.AddSharedProperty('attributes')
Kind.AddSharedProperty('default_field')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.name = None
self.fields = []
self.attributes = attributes
self.default_field = None
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r fields=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable, Repr(
self.fields))
return GenericRepr(self, {'fields': True, 'is_nullable': False})
def AddField(self, mojom_name, kind, ordinal=None, attributes=None):
field = UnionField(mojom_name, kind, ordinal, None, attributes)
self.fields.append(field)
return field
def Stylize(self, stylizer):
self.name = stylizer.StylizeUnion(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
def IsBackwardCompatible(self, rhs, checker):
"""This union is backward-compatible with rhs (older_union) if and only if
all of the following conditions hold:
- Any newly added field is tagged with a [MinVersion] attribute specifying
a version number greater than all previously used [MinVersion]
attributes within the union.
- All fields present in rhs remain present in the new union,
with the same ordinal value, same optional or non-optional status,
same (or backward-compatible) type, and where applicable, the same
[MinVersion] attribute value.
"""
def buildOrdinalFieldMap(union):
fields_by_ordinal = {}
for field in union.fields:
if field.ordinal in fields_by_ordinal:
raise Exception('Multiple fields with ordinal %s in union %s.' %
(field.ordinal, union.mojom_name))
fields_by_ordinal[field.ordinal] = field
return fields_by_ordinal
new_fields = buildOrdinalFieldMap(self)
old_fields = buildOrdinalFieldMap(rhs)
if len(new_fields) < len(old_fields):
# At least one field was removed, which is not OK.
return False
max_old_min_version = 0
for ordinal, old_field in old_fields.items():
new_field = new_fields.get(ordinal)
if not new_field:
# A field was removed, which is not OK.
return False
if not _IsFieldBackwardCompatible(new_field, old_field, checker):
# An field changed its type or MinVersion, which is not OK.
return False
old_min_version = old_field.min_version or 0
if old_min_version > max_old_min_version:
max_old_min_version = old_min_version
new_ordinals = set(new_fields.keys()) - set(old_fields.keys())
for ordinal in new_ordinals:
if (new_fields[ordinal].min_version or 0) <= max_old_min_version:
# New fields must use a MinVersion greater than any old fields.
return False
return True
@property
def extensible(self):
return self.attributes.get(ATTRIBUTE_EXTENSIBLE, False) \
if self.attributes else False
@property
def stable(self):
return self.attributes.get(ATTRIBUTE_STABLE, False) \
if self.attributes else False
@property
def qualified_name(self):
if self.parent_kind:
prefix = self.parent_kind.qualified_name + '.'
else:
prefix = self.module.GetNamespacePrefix()
return '%s%s' % (prefix, self.mojom_name)
def _tuple(self):
return (self.mojom_name, self.fields, self.attributes)
def __eq__(self, rhs):
return isinstance(rhs, Union) and self._tuple() == rhs._tuple()
def __lt__(self, rhs):
if not isinstance(self, type(rhs)):
return str(type(self)) < str(type(rhs))
return self._tuple() < rhs._tuple()
def __hash__(self):
return id(self)
class Array(ReferenceKind):
"""An array.
Attributes:
kind: {Kind} The type of the elements. May be None.
length: The number of elements. None if unknown.
"""
Kind.AddSharedProperty('kind')
Kind.AddSharedProperty('length')
def __init__(self, kind=None, length=None):
if kind is not None:
if length is not None:
spec = 'a%d:%s' % (length, kind.spec)
else:
spec = 'a:%s' % kind.spec
ReferenceKind.__init__(self, spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
self.length = length
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r kind=%s length=%r>' % (
self.__class__.__name__, self.spec, self.is_nullable, Repr(
self.kind), self.length)
return GenericRepr(self, {
'kind': True,
'length': False,
'is_nullable': False
})
def __eq__(self, rhs):
return (isinstance(rhs, Array)
and (self.kind, self.length) == (rhs.kind, rhs.length))
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return (isinstance(rhs, Array) and self.length == rhs.length
and checker.IsBackwardCompatible(self.kind, rhs.kind))
class Map(ReferenceKind):
"""A map.
Attributes:
key_kind: {Kind} The type of the keys. May be None.
value_kind: {Kind} The type of the elements. May be None.
"""
Kind.AddSharedProperty('key_kind')
Kind.AddSharedProperty('value_kind')
def __init__(self, key_kind=None, value_kind=None):
if (key_kind is not None and value_kind is not None):
ReferenceKind.__init__(
self, 'm[' + key_kind.spec + '][' + value_kind.spec + ']')
if IsNullableKind(key_kind):
raise Exception("Nullable kinds cannot be keys in maps.")
if IsAnyHandleKind(key_kind):
raise Exception("Handles cannot be keys in maps.")
if IsAnyInterfaceKind(key_kind):
raise Exception("Interfaces cannot be keys in maps.")
if IsArrayKind(key_kind):
raise Exception("Arrays cannot be keys in maps.")
else:
ReferenceKind.__init__(self)
self.key_kind = key_kind
self.value_kind = value_kind
def Repr(self, as_ref=True):
if as_ref:
return '<%s spec=%r is_nullable=%r key_kind=%s value_kind=%s>' % (
self.__class__.__name__, self.spec, self.is_nullable,
Repr(self.key_kind), Repr(self.value_kind))
return GenericRepr(self, {'key_kind': True, 'value_kind': True})
def __eq__(self, rhs):
return (isinstance(rhs, Map) and
(self.key_kind, self.value_kind) == (rhs.key_kind, rhs.value_kind))
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return (isinstance(rhs, Map)
and checker.IsBackwardCompatible(self.key_kind, rhs.key_kind)
and checker.IsBackwardCompatible(self.value_kind, rhs.value_kind))
class PendingRemote(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_remote<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rmt:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, PendingRemote) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return (isinstance(rhs, PendingRemote)
and checker.IsBackwardCompatible(self.kind, rhs.kind))
class PendingReceiver(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_receiver<T> requires T to be an interface type. Got %r' %
kind.spec)
ReferenceKind.__init__(self, 'rcv:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, PendingReceiver) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(rhs, PendingReceiver) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class PendingAssociatedRemote(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_remote<T> requires T to be an interface ' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rma:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, PendingAssociatedRemote) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(rhs,
PendingAssociatedRemote) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class PendingAssociatedReceiver(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
'pending_associated_receiver<T> requires T to be an interface' +
'type. Got %r' % kind.spec)
ReferenceKind.__init__(self, 'rca:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, PendingAssociatedReceiver) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(
rhs, PendingAssociatedReceiver) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class InterfaceRequest(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Interface request requires %r to be an interface." % kind.spec)
ReferenceKind.__init__(self, 'r:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, InterfaceRequest) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(rhs, InterfaceRequest) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class AssociatedInterfaceRequest(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, InterfaceRequest):
raise Exception(
"Associated interface request requires %r to be an interface "
"request." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind.kind if kind is not None else None
def __eq__(self, rhs):
return isinstance(rhs, AssociatedInterfaceRequest) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(
rhs, AssociatedInterfaceRequest) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class Parameter:
def __init__(self,
mojom_name=None,
kind=None,
ordinal=None,
default=None,
attributes=None):
self.mojom_name = mojom_name
self.name = None
self.ordinal = ordinal
self.kind = kind
self.default = default
self.attributes = attributes
def Repr(self, as_ref=True):
# pylint: disable=unused-argument
return '<%s mojom_name=%r kind=%s>' % (
self.__class__.__name__, self.mojom_name, self.kind.Repr(as_ref=True))
def Stylize(self, stylizer):
self.name = stylizer.StylizeParameter(self.mojom_name)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
def __eq__(self, rhs):
return (isinstance(rhs, Parameter)
and (self.mojom_name, self.ordinal, self.kind, self.default,
self.attributes) == (rhs.mojom_name, rhs.ordinal, rhs.kind,
rhs.default, rhs.attributes))
class Method:
def __init__(self, interface, mojom_name, ordinal=None, attributes=None):
self.interface = interface
self.mojom_name = mojom_name
self.name = None
self.explicit_ordinal = ordinal
self.ordinal = ordinal
self.parameters = []
self.param_struct = None
self.response_parameters = None
self.response_param_struct = None
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
return GenericRepr(self, {
'mojom_name': False,
'parameters': True,
'response_parameters': True
})
def AddParameter(self,
mojom_name,
kind,
ordinal=None,
default=None,
attributes=None):
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.parameters.append(parameter)
return parameter
def AddResponseParameter(self,
mojom_name,
kind,
ordinal=None,
default=None,
attributes=None):
if self.response_parameters == None:
self.response_parameters = []
parameter = Parameter(mojom_name, kind, ordinal, default, attributes)
self.response_parameters.append(parameter)
return parameter
def Stylize(self, stylizer):
self.name = stylizer.StylizeMethod(self.mojom_name)
for param in self.parameters:
param.Stylize(stylizer)
if self.response_parameters is not None:
for param in self.response_parameters:
param.Stylize(stylizer)
if self.param_struct:
self.param_struct.Stylize(stylizer)
if self.response_param_struct:
self.response_param_struct.Stylize(stylizer)
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
@property
def sync(self):
return self.attributes.get(ATTRIBUTE_SYNC) \
if self.attributes else None
@property
def allow_interrupt(self):
return not self.attributes.get(ATTRIBUTE_NO_INTERRUPT) \
if self.attributes else True
@property
def unlimited_message_size(self):
return self.attributes.get(ATTRIBUTE_UNLIMITED_SIZE) \
if self.attributes else False
@property
def allowed_context(self):
return self.attributes.get(ATTRIBUTE_ALLOWED_CONTEXT) \
if self.attributes else None
@property
def supports_urgent(self):
return self.attributes.get(ATTRIBUTE_SUPPORTS_URGENT) \
if self.attributes else None
@property
def runtime_feature(self):
if not self.attributes:
return None
runtime_feature = self.attributes.get(ATTRIBUTE_RUNTIME_FEATURE, None)
if runtime_feature is None:
return None
if not isinstance(runtime_feature, Feature):
raise Exception("RuntimeFeature attribute on %s must be a feature." %
self.name)
return runtime_feature
def _tuple(self):
return (self.mojom_name, self.ordinal, self.parameters,
self.response_parameters, self.attributes)
def __eq__(self, rhs):
return isinstance(rhs, Method) and self._tuple() == rhs._tuple()
def __lt__(self, rhs):
if not isinstance(self, type(rhs)):
return str(type(self)) < str(type(rhs))
return self._tuple() < rhs._tuple()
class Interface(ReferenceKind):
Kind.AddSharedProperty('mojom_name')
Kind.AddSharedProperty('name')
Kind.AddSharedProperty('methods')
Kind.AddSharedProperty('enums')
Kind.AddSharedProperty('constants')
Kind.AddSharedProperty('attributes')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ReferenceKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.name = None
self.methods = []
self.enums = []
self.constants = []
self.attributes = attributes
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
return GenericRepr(self, {
'mojom_name': False,
'attributes': False,
'methods': False
})
def AddMethod(self, mojom_name, ordinal=None, attributes=None):
method = Method(self, mojom_name, ordinal, attributes)
self.methods.append(method)
return method
def Stylize(self, stylizer):
self.name = stylizer.StylizeInterface(self.mojom_name)
for method in self.methods:
method.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
def IsBackwardCompatible(self, rhs, checker):
"""This interface is backward-compatible with rhs (older_interface) if and
only if all of the following conditions hold:
- All defined methods in rhs (when identified by ordinal) have
backward-compatible definitions in this interface. For each method this
means:
- The parameter list is backward-compatible, according to backward-
compatibility rules for structs, where each parameter is essentially
a struct field.
- If the old method definition does not specify a reply message, the
new method definition must not specify a reply message.
- If the old method definition specifies a reply message, the new
method definition must also specify a reply message with a parameter
list that is backward-compatible according to backward-compatibility
rules for structs.
- All newly introduced methods in this interface have a [MinVersion]
attribute specifying a version greater than any method in
rhs.
"""
def buildOrdinalMethodMap(interface):
methods_by_ordinal = {}
for method in interface.methods:
if method.ordinal in methods_by_ordinal:
raise Exception('Multiple methods with ordinal %s in interface %s.' %
(method.ordinal, interface.mojom_name))
methods_by_ordinal[method.ordinal] = method
return methods_by_ordinal
new_methods = buildOrdinalMethodMap(self)
old_methods = buildOrdinalMethodMap(rhs)
max_old_min_version = 0
for ordinal, old_method in old_methods.items():
new_method = new_methods.get(ordinal)
if not new_method:
# A method was removed, which is not OK.
return False
if not checker.IsBackwardCompatible(new_method.param_struct,
old_method.param_struct):
# The parameter list is not backward-compatible, which is not OK.
return False
if old_method.response_param_struct is None:
if new_method.response_param_struct is not None:
# A reply was added to a message which didn't have one before, and
# this is not OK.
return False
else:
if new_method.response_param_struct is None:
# A reply was removed from a message, which is not OK.
return False
if not checker.IsBackwardCompatible(new_method.response_param_struct,
old_method.response_param_struct):
# The new message's reply is not backward-compatible with the old
# message's reply, which is not OK.
return False
if (old_method.min_version or 0) > max_old_min_version:
max_old_min_version = old_method.min_version
# All the old methods are compatible with their new counterparts. Now verify
# that newly added methods are properly versioned.
new_ordinals = set(new_methods.keys()) - set(old_methods.keys())
for ordinal in new_ordinals:
new_method = new_methods[ordinal]
if (new_method.min_version or 0) <= max_old_min_version:
# A method was added to an existing version, which is not OK.
return False
return True
@property
def service_sandbox(self):
if not self.attributes:
return None
service_sandbox = self.attributes.get(ATTRIBUTE_SERVICE_SANDBOX, None)
if service_sandbox is None:
return None
# Constants are only allowed to refer to an enum here, so replace.
if isinstance(service_sandbox, Constant):
service_sandbox = service_sandbox.value
if not isinstance(service_sandbox, EnumValue):
raise Exception("ServiceSandbox attribute on %s must be an enum value." %
self.module.name)
return service_sandbox
@property
def runtime_feature(self):
if not self.attributes:
return None
runtime_feature = self.attributes.get(ATTRIBUTE_RUNTIME_FEATURE, None)
if runtime_feature is None:
return None
if not isinstance(runtime_feature, Feature):
raise Exception("RuntimeFeature attribute on %s must be a feature." %
self.name)
return runtime_feature
@property
def require_context(self):
if not self.attributes:
return None
return self.attributes.get(ATTRIBUTE_REQUIRE_CONTEXT, None)
@property
def stable(self):
return self.attributes.get(ATTRIBUTE_STABLE, False) \
if self.attributes else False
@property
def qualified_name(self):
if self.parent_kind:
prefix = self.parent_kind.qualified_name + '.'
else:
prefix = self.module.GetNamespacePrefix()
return '%s%s' % (prefix, self.mojom_name)
def _tuple(self):
return (self.mojom_name, self.methods, self.enums, self.constants,
self.attributes)
def __eq__(self, rhs):
return isinstance(rhs, Interface) and self._tuple() == rhs._tuple()
def __lt__(self, rhs):
if not isinstance(self, type(rhs)):
return str(type(self)) < str(type(rhs))
return self._tuple() < rhs._tuple()
@property
def uuid(self):
uuid_str = self.attributes.get(ATTRIBUTE_UUID) if self.attributes else None
if uuid_str is None:
return None
try:
u = UUID(uuid_str)
except:
raise ValueError('Invalid format for Uuid attribute on interface {}. '
'Expected standard RFC 4122 string representation of '
'a UUID.'.format(self.mojom_name))
return (int(u.hex[:16], 16), int(u.hex[16:], 16))
def __hash__(self):
return id(self)
class AssociatedInterface(ReferenceKind):
Kind.AddSharedProperty('kind')
def __init__(self, kind=None):
if kind is not None:
if not isinstance(kind, Interface):
raise Exception(
"Associated interface requires %r to be an interface." % kind.spec)
assert not kind.is_nullable
ReferenceKind.__init__(self, 'asso:' + kind.spec)
else:
ReferenceKind.__init__(self)
self.kind = kind
def __eq__(self, rhs):
return isinstance(rhs, AssociatedInterface) and self.kind == rhs.kind
def __hash__(self):
return id(self)
def IsBackwardCompatible(self, rhs, checker):
return isinstance(rhs,
AssociatedInterface) and checker.IsBackwardCompatible(
self.kind, rhs.kind)
class EnumField:
def __init__(self,
mojom_name=None,
value=None,
attributes=None,
numeric_value=None):
self.mojom_name = mojom_name
self.name = None
self.value = value
self.attributes = attributes
self.numeric_value = numeric_value
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnumField(self.mojom_name)
@property
def default(self):
return self.attributes.get(ATTRIBUTE_DEFAULT, False) \
if self.attributes else False
@property
def min_version(self):
return self.attributes.get(ATTRIBUTE_MIN_VERSION) \
if self.attributes else None
def __eq__(self, rhs):
return (isinstance(rhs, EnumField)
and (self.mojom_name, self.value, self.attributes,
self.numeric_value) == (rhs.mojom_name, rhs.value,
rhs.attributes, rhs.numeric_value))
class Enum(ValueKind):
Kind.AddSharedProperty('mojom_name')
Kind.AddSharedProperty('name')
Kind.AddSharedProperty('native_only')
Kind.AddSharedProperty('fields')
Kind.AddSharedProperty('attributes')
Kind.AddSharedProperty('min_value')
Kind.AddSharedProperty('max_value')
Kind.AddSharedProperty('default_field')
def __init__(self, mojom_name=None, module=None, attributes=None):
if mojom_name is not None:
spec = 'x:' + mojom_name
else:
spec = None
ValueKind.__init__(self, spec, False, module)
self.mojom_name = mojom_name
self.name = None
self.native_only = False
self.fields = []
self.attributes = attributes
self.min_value = None
self.max_value = None
self.default_field = None
def Repr(self, as_ref=True):
if as_ref:
return '<%s mojom_name=%r>' % (self.__class__.__name__, self.mojom_name)
return GenericRepr(self, {'mojom_name': False, 'fields': False})
def Stylize(self, stylizer):
self.name = stylizer.StylizeEnum(self.mojom_name)
for field in self.fields:
field.Stylize(stylizer)
@property
def extensible(self):
return self.attributes.get(ATTRIBUTE_EXTENSIBLE, False) \
if self.attributes else False
@property
def stable(self):
return self.attributes.get(ATTRIBUTE_STABLE, False) \
if self.attributes else False
@property
def qualified_name(self):
if self.parent_kind:
prefix = self.parent_kind.qualified_name + '.'
else:
prefix = self.module.GetNamespacePrefix()
return '%s%s' % (prefix, self.mojom_name)
# pylint: disable=unused-argument
def IsBackwardCompatible(self, rhs, checker):
"""This enum is backward-compatible with rhs (older_enum) if and only if one
of the following conditions holds:
- Neither enum is [Extensible] and both have the exact same set of valid
numeric values. Field names and aliases for the same numeric value do
not affect compatibility.
- rhs is [Extensible], and for every version defined by
rhs, this enum has the exact same set of valid numeric values.
"""
def buildVersionFieldMap(enum):
fields_by_min_version = {}
for field in enum.fields:
if field.min_version not in fields_by_min_version:
fields_by_min_version[field.min_version] = set()
fields_by_min_version[field.min_version].add(field.numeric_value)
return fields_by_min_version
old_fields = buildVersionFieldMap(rhs)
new_fields = buildVersionFieldMap(self)
if new_fields.keys() != old_fields.keys() and not rhs.extensible:
raise Exception("Non-extensible enum cannot be modified")
for min_version, valid_values in old_fields.items():
if min_version not in new_fields:
raise Exception('New values added to an extensible enum '
'do not specify MinVersion: %s' % new_fields)
if (new_fields[min_version] != valid_values):
if (len(new_fields[min_version]) < len(valid_values)):
raise Exception('Removing values for an existing MinVersion %s '
'is not allowed' % min_version)
raise Exception(
'New values don\'t match old values'
'for an existing MinVersion %s,'
' please specify MinVersion equal to "Next version" '
'in the enum description'
' for the following values:\n%s' %
(min_version, new_fields[min_version].difference(valid_values)))
return True
def _tuple(self):
return (self.mojom_name, self.native_only, self.fields, self.attributes,
self.min_value, self.max_value, self.default_field)
def __eq__(self, rhs):
return isinstance(rhs, Enum) and self._tuple() == rhs._tuple()
def __lt__(self, rhs):
if not isinstance(self, type(rhs)):
return str(type(self)) < str(type(rhs))
return self._tuple() < rhs._tuple()
def __hash__(self):
return id(self)
class Module:
def __init__(self, path=None, mojom_namespace=None, attributes=None):
self.path = path
self.mojom_namespace = mojom_namespace
self.namespace = None
self.structs = []
self.unions = []
self.interfaces = []
self.enums = []
self.features = []
self.constants = []
self.kinds = OrderedDict()
self.attributes = attributes
self.imports = []
self.imported_kinds = OrderedDict()
self.metadata = OrderedDict()
def __repr__(self):
# Gives us a decent __repr__ for modules.
return self.Repr()
def __eq__(self, rhs):
return (isinstance(rhs, Module)
and (self.path, self.attributes, self.mojom_namespace, self.imports,
self.constants, self.enums, self.structs, self.unions,
self.interfaces, self.features)
== (rhs.path, rhs.attributes, rhs.mojom_namespace, rhs.imports,
rhs.constants, rhs.enums, rhs.structs, rhs.unions,
rhs.interfaces, rhs.features))
def __hash__(self):
return id(self)
def Repr(self, as_ref=True):
if as_ref:
return '<%s path=%r mojom_namespace=%r>' % (
self.__class__.__name__, self.path, self.mojom_namespace)
return GenericRepr(
self, {
'path': False,
'mojom_namespace': False,
'attributes': False,
'structs': False,
'interfaces': False,
'unions': False,
'features': False,
})
def GetNamespacePrefix(self):
return '%s.' % self.mojom_namespace if self.mojom_namespace else ''
def AddInterface(self, mojom_name, attributes=None):
interface = Interface(mojom_name, self, attributes)
self.interfaces.append(interface)
return interface
def AddStruct(self, mojom_name, attributes=None):
struct = Struct(mojom_name, self, attributes)
self.structs.append(struct)
return struct
def AddUnion(self, mojom_name, attributes=None):
union = Union(mojom_name, self, attributes)
self.unions.append(union)
return union
def AddFeature(self, mojom_name, attributes=None):
feature = Feature(mojom_name, self, attributes)
self.features.append(feature)
return feature
def Stylize(self, stylizer):
self.namespace = stylizer.StylizeModule(self.mojom_namespace)
for struct in self.structs:
struct.Stylize(stylizer)
for union in self.unions:
union.Stylize(stylizer)
for interface in self.interfaces:
interface.Stylize(stylizer)
for enum in self.enums:
enum.Stylize(stylizer)
for constant in self.constants:
constant.Stylize(stylizer)
for feature in self.features:
feature.Stylize(stylizer)
for imported_module in self.imports:
imported_module.Stylize(stylizer)
def Dump(self, f):
pickle.dump(self, f)
@classmethod
def Load(cls, f):
result = pickle.load(f)
assert isinstance(result, Module)
return result
def IsBoolKind(kind):
return kind.spec == BOOL.spec or kind.spec == NULLABLE_BOOL.spec
def IsFloatKind(kind):
return kind.spec == FLOAT.spec or kind.spec == NULLABLE_FLOAT.spec
def IsDoubleKind(kind):
return kind.spec == DOUBLE.spec or kind.spec == NULLABLE_DOUBLE.spec
def IsIntegralKind(kind):
return (kind.spec == BOOL.spec or kind.spec == INT8.spec
or kind.spec == INT16.spec or kind.spec == INT32.spec
or kind.spec == INT64.spec or kind.spec == UINT8.spec
or kind.spec == UINT16.spec or kind.spec == UINT32.spec
or kind.spec == UINT64.spec or kind.spec == NULLABLE_BOOL.spec
or kind.spec == NULLABLE_INT8.spec or kind.spec == NULLABLE_INT16.spec
or kind.spec == NULLABLE_INT32.spec
or kind.spec == NULLABLE_INT64.spec
or kind.spec == NULLABLE_UINT8.spec
or kind.spec == NULLABLE_UINT16.spec
or kind.spec == NULLABLE_UINT32.spec
or kind.spec == NULLABLE_UINT64.spec)
def IsStringKind(kind):
return kind.spec == STRING.spec or kind.spec == NULLABLE_STRING.spec
def IsGenericHandleKind(kind):
return kind.spec == HANDLE.spec or kind.spec == NULLABLE_HANDLE.spec
def IsDataPipeConsumerKind(kind):
return kind.spec == DCPIPE.spec or kind.spec == NULLABLE_DCPIPE.spec
def IsDataPipeProducerKind(kind):
return kind.spec == DPPIPE.spec or kind.spec == NULLABLE_DPPIPE.spec
def IsMessagePipeKind(kind):
return kind.spec == MSGPIPE.spec or kind.spec == NULLABLE_MSGPIPE.spec
def IsSharedBufferKind(kind):
return (kind.spec == SHAREDBUFFER.spec
or kind.spec == NULLABLE_SHAREDBUFFER.spec)
def IsPlatformHandleKind(kind):
return (kind.spec == PLATFORMHANDLE.spec
or kind.spec == NULLABLE_PLATFORMHANDLE.spec)
def IsStructKind(kind):
return isinstance(kind, Struct)
def IsUnionKind(kind):
return isinstance(kind, Union)
def IsArrayKind(kind):
return isinstance(kind, Array)
def IsFeatureKind(kind):
return isinstance(kind, Feature)
def IsInterfaceKind(kind):
return isinstance(kind, Interface)
def IsAssociatedInterfaceKind(kind):
return isinstance(kind, AssociatedInterface)
def IsInterfaceRequestKind(kind):
return isinstance(kind, InterfaceRequest)
def IsAssociatedInterfaceRequestKind(kind):
return isinstance(kind, AssociatedInterfaceRequest)
def IsPendingRemoteKind(kind):
return isinstance(kind, PendingRemote)
def IsPendingReceiverKind(kind):
return isinstance(kind, PendingReceiver)
def IsPendingAssociatedRemoteKind(kind):
return isinstance(kind, PendingAssociatedRemote)
def IsPendingAssociatedReceiverKind(kind):
return isinstance(kind, PendingAssociatedReceiver)
def IsEnumKind(kind):
return isinstance(kind, Enum)
def IsValueKind(kind):
return isinstance(kind, ValueKind)
def IsReferenceKind(kind):
return isinstance(kind, ReferenceKind)
def IsNullableKind(kind):
return kind.is_nullable
def IsMapKind(kind):
return isinstance(kind, Map)
def IsObjectKind(kind):
return IsPointerKind(kind) or IsUnionKind(kind)
def IsPointerKind(kind):
return (IsStructKind(kind) or IsArrayKind(kind) or IsStringKind(kind)
or IsMapKind(kind))
# Please note that it doesn't include any interface kind.
def IsAnyHandleKind(kind):
return (IsGenericHandleKind(kind) or IsDataPipeConsumerKind(kind)
or IsDataPipeProducerKind(kind) or IsMessagePipeKind(kind)
or IsSharedBufferKind(kind) or IsPlatformHandleKind(kind))
def IsAnyInterfaceKind(kind):
return (IsInterfaceKind(kind) or IsInterfaceRequestKind(kind)
or IsAssociatedKind(kind) or IsPendingRemoteKind(kind)
or IsPendingReceiverKind(kind))
def IsAnyHandleOrInterfaceKind(kind):
return IsAnyHandleKind(kind) or IsAnyInterfaceKind(kind)
def IsAssociatedKind(kind):
return (IsAssociatedInterfaceKind(kind)
or IsAssociatedInterfaceRequestKind(kind)
or IsPendingAssociatedRemoteKind(kind)
or IsPendingAssociatedReceiverKind(kind))
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
# Finds out whether an interface passes associated interfaces and associated
# interface requests.
def PassesAssociatedKinds(interface):
visited_kinds = set()
for method in interface.methods:
if MethodPassesAssociatedKinds(method, visited_kinds):
return True
return False
def _AnyMethodParameterRecursive(method, predicate, visited_kinds=None):
def _HasProperty(kind):
if kind in visited_kinds:
# No need to examine the kind again.
return False
visited_kinds.add(kind)
if predicate(kind):
return True
if IsArrayKind(kind):
return _HasProperty(kind.kind)
if IsStructKind(kind) or IsUnionKind(kind):
for field in kind.fields:
if _HasProperty(field.kind):
return True
if IsMapKind(kind):
if _HasProperty(kind.key_kind) or _HasProperty(kind.value_kind):
return True
return False
if visited_kinds is None:
visited_kinds = set()
for param in method.parameters:
if _HasProperty(param.kind):
return True
if method.response_parameters != None:
for param in method.response_parameters:
if _HasProperty(param.kind):
return True
return False
# Finds out whether a method passes associated interfaces and associated
# interface requests.
def MethodPassesAssociatedKinds(method, visited_kinds=None):
return _AnyMethodParameterRecursive(
method, IsAssociatedKind, visited_kinds=visited_kinds)
# Determines whether a method passes interfaces.
def MethodPassesInterfaces(method):
return _AnyMethodParameterRecursive(method, IsInterfaceKind)
def GetSyncMethodOrdinals(interface):
return [method.ordinal for method in interface.methods if method.sync]
def HasUninterruptableMethods(interface):
for method in interface.methods:
if not method.allow_interrupt:
return True
return False
def ContainsHandlesOrInterfaces(kind):
"""Check if the kind contains any handles.
This check is recursive so it checks all struct fields, containers elements,
etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains handles.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsStructKind(kind):
return any(Check(field.kind) for field in kind.fields)
if IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
if IsAnyHandleKind(kind):
return True
if IsAnyInterfaceKind(kind):
return True
if IsArrayKind(kind):
return Check(kind.kind)
if IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
return False
return Check(kind)
def ContainsNativeTypes(kind):
"""Check if the kind contains any native type (struct or enum).
This check is recursive so it checks all struct fields, scoped interface
enums, etc.
Args:
struct: {Kind} The kind to check.
Returns:
{bool}: True if the kind contains native types.
"""
# We remember the types we already checked to avoid infinite recursion when
# checking recursive (or mutually recursive) types:
checked = set()
def Check(kind):
if kind.spec in checked:
return False
checked.add(kind.spec)
if IsEnumKind(kind):
return kind.native_only
if IsStructKind(kind):
if kind.native_only:
return True
if any(enum.native_only for enum in kind.enums):
return True
return any(Check(field.kind) for field in kind.fields)
if IsUnionKind(kind):
return any(Check(field.kind) for field in kind.fields)
if IsInterfaceKind(kind):
return any(enum.native_only for enum in kind.enums)
if IsArrayKind(kind):
return Check(kind.kind)
if IsMapKind(kind):
return Check(kind.key_kind) or Check(kind.value_kind)
return False
return Check(kind)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/generator.py | # Copyright 2013 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code shared by the various language-specific code generators."""
from __future__ import print_function
from functools import partial
import os.path
import re
from mojom import fileutil
from mojom.generate import module as mojom
from mojom.generate import pack
def ExpectedArraySize(kind):
if mojom.IsArrayKind(kind):
return kind.length
return None
def SplitCamelCase(identifier):
"""Splits a camel-cased |identifier| and returns a list of lower-cased
strings.
"""
# Add underscores after uppercase letters when appropriate. An uppercase
# letter is considered the end of a word if it is followed by an upper and a
# lower. E.g. URLLoaderFactory -> URL_LoaderFactory
identifier = re.sub('([A-Z][0-9]*)(?=[A-Z][0-9]*[a-z])', r'\1_', identifier)
# Add underscores after lowercase letters when appropriate. A lowercase letter
# is considered the end of a word if it is followed by an upper.
# E.g. URLLoaderFactory -> URLLoader_Factory
identifier = re.sub('([a-z][0-9]*)(?=[A-Z])', r'\1_', identifier)
return [x.lower() for x in identifier.split('_')]
def ToCamel(identifier, lower_initial=False, digits_split=False, delimiter='_'):
"""Splits |identifier| using |delimiter|, makes the first character of each
word uppercased (but makes the first character of the first word lowercased
if |lower_initial| is set to True), and joins the words. Please note that for
each word, all the characters except the first one are untouched.
"""
result = ''
capitalize_next = True
for i in range(len(identifier)):
if identifier[i] == delimiter:
capitalize_next = True
elif digits_split and identifier[i].isdigit():
capitalize_next = True
result += identifier[i]
elif capitalize_next:
capitalize_next = False
result += identifier[i].upper()
else:
result += identifier[i]
if lower_initial and result:
result = result[0].lower() + result[1:]
return result
def _ToSnakeCase(identifier, upper=False):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"URL_LOADER_FACTORY" if upper, otherwise "url_loader_factory".
"""
words = SplitCamelCase(identifier)
if words[0] == 'k' and len(words) > 1:
words = words[1:]
# Variables cannot start with a digit
if (words[0][0].isdigit()):
words[0] = '_' + words[0]
if upper:
words = map(lambda x: x.upper(), words)
return '_'.join(words)
def ToUpperSnakeCase(identifier):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"URL_LOADER_FACTORY".
"""
return _ToSnakeCase(identifier, upper=True)
def ToLowerSnakeCase(identifier):
"""Splits camel-cased |identifier| into lower case words, removes the first
word if it's "k" and joins them using "_" e.g. for "URLLoaderFactory", returns
"url_loader_factory".
"""
return _ToSnakeCase(identifier, upper=False)
class Stylizer:
"""Stylizers specify naming rules to map mojom names to names in generated
code. For example, if you would like method_name in mojom to be mapped to
MethodName in the generated code, you need to define a subclass of Stylizer
and override StylizeMethod to do the conversion."""
def StylizeConstant(self, mojom_name):
return mojom_name
def StylizeField(self, mojom_name):
return mojom_name
def StylizeStruct(self, mojom_name):
return mojom_name
def StylizeUnion(self, mojom_name):
return mojom_name
def StylizeParameter(self, mojom_name):
return mojom_name
def StylizeMethod(self, mojom_name):
return mojom_name
def StylizeInterface(self, mojom_name):
return mojom_name
def StylizeEnumField(self, mojom_name):
return mojom_name
def StylizeEnum(self, mojom_name):
return mojom_name
def StylizeFeature(self, mojom_name):
return mojom_name
def StylizeModule(self, mojom_namespace):
return mojom_namespace
def WriteFile(contents, full_path):
# If |contents| is same with the file content, we skip updating.
if not isinstance(contents, bytes):
data = contents.encode('utf8')
else:
data = contents
if os.path.isfile(full_path):
with open(full_path, 'rb') as destination_file:
if destination_file.read() == data:
return
# Make sure the containing directory exists.
full_dir = os.path.dirname(full_path)
fileutil.EnsureDirectoryExists(full_dir)
# Dump the data to disk.
with open(full_path, 'wb') as f:
f.write(data)
def AddComputedData(module):
"""Adds computed data to the given module. The data is computed once and
used repeatedly in the generation process."""
def _AddStructComputedData(exported, struct):
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
struct.exported = exported
def _AddInterfaceComputedData(interface):
interface.version = 0
for method in interface.methods:
# this field is never scrambled
method.sequential_ordinal = method.ordinal
if method.min_version is not None:
interface.version = max(interface.version, method.min_version)
method.param_struct = _GetStructFromMethod(method)
if interface.stable:
method.param_struct.attributes[mojom.ATTRIBUTE_STABLE] = True
if method.explicit_ordinal is None:
raise Exception(
'Stable interfaces must declare explicit method ordinals. The '
'method %s on stable interface %s does not declare an explicit '
'ordinal.' % (method.mojom_name, interface.qualified_name))
interface.version = max(interface.version,
method.param_struct.versions[-1].version)
if method.response_parameters is not None:
method.response_param_struct = _GetResponseStructFromMethod(method)
if interface.stable:
method.response_param_struct.attributes[mojom.ATTRIBUTE_STABLE] = True
interface.version = max(
interface.version,
method.response_param_struct.versions[-1].version)
else:
method.response_param_struct = None
def _GetStructFromMethod(method):
"""Converts a method's parameters into the fields of a struct."""
params_class = "%s_%s_Params" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class,
module=method.interface.module,
attributes={})
for param in method.parameters:
struct.AddField(
param.mojom_name,
param.kind,
param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
def _GetResponseStructFromMethod(method):
"""Converts a method's response_parameters into the fields of a struct."""
params_class = "%s_%s_ResponseParams" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class,
module=method.interface.module,
attributes={})
for param in method.response_parameters:
struct.AddField(
param.mojom_name,
param.kind,
param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
for struct in module.structs:
_AddStructComputedData(True, struct)
for interface in module.interfaces:
_AddInterfaceComputedData(interface)
class Generator:
# Pass |output_dir| to emit files to disk. Omit |output_dir| to echo all
# files to stdout.
def __init__(self,
module,
output_dir=None,
typemap=None,
variant=None,
bytecode_path=None,
for_blink=False,
js_generate_struct_deserializers=False,
export_attribute=None,
export_header=None,
generate_non_variant_code=False,
support_lazy_serialization=False,
disallow_native_types=False,
disallow_interfaces=False,
generate_message_ids=False,
generate_fuzzing=False,
enable_kythe_annotations=False,
extra_cpp_template_paths=None,
generate_extra_cpp_only=False):
self.module = module
self.output_dir = output_dir
self.typemap = typemap or {}
self.variant = variant
self.bytecode_path = bytecode_path
self.for_blink = for_blink
self.js_generate_struct_deserializers = js_generate_struct_deserializers
self.export_attribute = export_attribute
self.export_header = export_header
self.generate_non_variant_code = generate_non_variant_code
self.support_lazy_serialization = support_lazy_serialization
self.disallow_native_types = disallow_native_types
self.disallow_interfaces = disallow_interfaces
self.generate_message_ids = generate_message_ids
self.generate_fuzzing = generate_fuzzing
self.enable_kythe_annotations = enable_kythe_annotations
self.extra_cpp_template_paths = extra_cpp_template_paths
self.generate_extra_cpp_only = generate_extra_cpp_only
def Write(self, contents, filename):
if self.output_dir is None:
print(contents)
return
full_path = os.path.join(self.output_dir, filename)
WriteFile(contents, full_path)
def OptimizeEmpty(self, contents):
# Look for .cc files that contain no actual code. There are many of these
# and they collectively take a while to compile.
lines = contents.splitlines()
for line in lines:
if line.startswith('#') or line.startswith('//'):
continue
if re.match(r'namespace .* {', line) or re.match(r'}.*//.*namespace',
line):
continue
if line.strip():
# There is some actual code - return the unmodified contents.
return contents
# If we reach here then we have a .cc file with no actual code. The
# includes are therefore unneeded and can be removed.
new_lines = [line for line in lines if not line.startswith('#include')]
if len(new_lines) < len(lines):
new_lines.append('')
new_lines.append('// Includes removed due to no code being generated.')
return '\n'.join(new_lines)
def WriteWithComment(self, contents, filename):
generator_name = "mojom_bindings_generator.py"
comment = r"// %s is auto generated by %s, do not edit" % (filename,
generator_name)
contents = comment + '\n' + '\n' + contents;
if filename.endswith('.cc'):
contents = self.OptimizeEmpty(contents)
self.Write(contents, filename)
def GenerateFiles(self, args):
raise NotImplementedError("Subclasses must override/implement this method")
def GetJinjaParameters(self):
"""Returns default constructor parameters for the jinja environment."""
return {}
def GetGlobals(self):
"""Returns global mappings for the template generation."""
return {}
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/generate/check.py | # Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code shared by the various pre-generation mojom checkers."""
class CheckException(Exception):
def __init__(self, module, message):
self.module = module
self.message = message
super().__init__(self.message)
def __str__(self):
return "Failed mojo pre-generation check for {}:\n{}".format(
self.module.path, self.message)
class Check:
def __init__(self, module):
self.module = module
def CheckModule(self):
""" Subclass should return True if its Checks pass, and throw an
exception otherwise. CheckModule will be called immediately before
mojom.generate.Generator.GenerateFiles()"""
raise NotImplementedError("Subclasses must override/implement this method")
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features_unittest.py | # Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import importlib.util
import os
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
importlib.util.find_spec("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove('pylib'), 'pylib'))
import mojom.parse.ast as ast
import mojom.parse.conditional_features as conditional_features
import mojom.parse.parser as parser
ENABLED_FEATURES = frozenset({'red', 'green', 'blue'})
class ConditionalFeaturesTest(unittest.TestCase):
"""Tests |mojom.parse.conditional_features|."""
def parseAndAssertEqual(self, source, expected_source):
definition = parser.Parse(source, "my_file.mojom")
conditional_features.RemoveDisabledDefinitions(definition, ENABLED_FEATURES)
expected = parser.Parse(expected_source, "my_file.mojom")
self.assertEquals(definition, expected)
def testFilterConst(self):
"""Test that Consts are correctly filtered."""
const_source = """
[EnableIf=blue]
const int kMyConst1 = 1;
[EnableIf=orange]
const double kMyConst2 = 2;
const int kMyConst3 = 3;
"""
expected_source = """
[EnableIf=blue]
const int kMyConst1 = 1;
const int kMyConst3 = 3;
"""
self.parseAndAssertEqual(const_source, expected_source)
def testFilterIfNotConst(self):
"""Test that Consts are correctly filtered."""
const_source = """
[EnableIfNot=blue]
const int kMyConst1 = 1;
[EnableIfNot=orange]
const double kMyConst2 = 2;
[EnableIf=blue]
const int kMyConst3 = 3;
[EnableIfNot=blue]
const int kMyConst4 = 4;
[EnableIfNot=purple]
const int kMyConst5 = 5;
"""
expected_source = """
[EnableIfNot=orange]
const double kMyConst2 = 2;
[EnableIf=blue]
const int kMyConst3 = 3;
[EnableIfNot=purple]
const int kMyConst5 = 5;
"""
self.parseAndAssertEqual(const_source, expected_source)
def testFilterIfNotMultipleConst(self):
"""Test that Consts are correctly filtered."""
const_source = """
[EnableIfNot=blue]
const int kMyConst1 = 1;
[EnableIfNot=orange]
const double kMyConst2 = 2;
[EnableIfNot=orange]
const int kMyConst3 = 3;
"""
expected_source = """
[EnableIfNot=orange]
const double kMyConst2 = 2;
[EnableIfNot=orange]
const int kMyConst3 = 3;
"""
self.parseAndAssertEqual(const_source, expected_source)
def testFilterEnum(self):
"""Test that EnumValues are correctly filtered from an Enum."""
enum_source = """
enum MyEnum {
[EnableIf=purple]
VALUE1,
[EnableIf=blue]
VALUE2,
VALUE3,
};
"""
expected_source = """
enum MyEnum {
[EnableIf=blue]
VALUE2,
VALUE3
};
"""
self.parseAndAssertEqual(enum_source, expected_source)
def testFilterImport(self):
"""Test that imports are correctly filtered from a Mojom."""
import_source = """
[EnableIf=blue]
import "foo.mojom";
import "bar.mojom";
[EnableIf=purple]
import "baz.mojom";
"""
expected_source = """
[EnableIf=blue]
import "foo.mojom";
import "bar.mojom";
"""
self.parseAndAssertEqual(import_source, expected_source)
def testFilterIfNotImport(self):
"""Test that imports are correctly filtered from a Mojom."""
import_source = """
[EnableIf=blue]
import "foo.mojom";
[EnableIfNot=purple]
import "bar.mojom";
[EnableIfNot=green]
import "baz.mojom";
"""
expected_source = """
[EnableIf=blue]
import "foo.mojom";
[EnableIfNot=purple]
import "bar.mojom";
"""
self.parseAndAssertEqual(import_source, expected_source)
def testFilterInterface(self):
"""Test that definitions are correctly filtered from an Interface."""
interface_source = """
interface MyInterface {
[EnableIf=blue]
enum MyEnum {
[EnableIf=purple]
VALUE1,
VALUE2,
};
[EnableIf=blue]
const int32 kMyConst = 123;
[EnableIf=purple]
MyMethod();
};
"""
expected_source = """
interface MyInterface {
[EnableIf=blue]
enum MyEnum {
VALUE2,
};
[EnableIf=blue]
const int32 kMyConst = 123;
};
"""
self.parseAndAssertEqual(interface_source, expected_source)
def testFilterMethod(self):
"""Test that Parameters are correctly filtered from a Method."""
method_source = """
interface MyInterface {
[EnableIf=blue]
MyMethod([EnableIf=purple] int32 a) => ([EnableIf=red] int32 b);
};
"""
expected_source = """
interface MyInterface {
[EnableIf=blue]
MyMethod() => ([EnableIf=red] int32 b);
};
"""
self.parseAndAssertEqual(method_source, expected_source)
def testFilterStruct(self):
"""Test that definitions are correctly filtered from a Struct."""
struct_source = """
struct MyStruct {
[EnableIf=blue]
enum MyEnum {
VALUE1,
[EnableIf=purple]
VALUE2,
};
[EnableIf=yellow]
const double kMyConst = 1.23;
[EnableIf=green]
int32 a;
double b;
[EnableIf=purple]
int32 c;
[EnableIf=blue]
double d;
int32 e;
[EnableIf=orange]
double f;
};
"""
expected_source = """
struct MyStruct {
[EnableIf=blue]
enum MyEnum {
VALUE1,
};
[EnableIf=green]
int32 a;
double b;
[EnableIf=blue]
double d;
int32 e;
};
"""
self.parseAndAssertEqual(struct_source, expected_source)
def testFilterIfNotStruct(self):
"""Test that definitions are correctly filtered from a Struct."""
struct_source = """
struct MyStruct {
[EnableIf=blue]
enum MyEnum {
VALUE1,
[EnableIfNot=red]
VALUE2,
};
[EnableIfNot=yellow]
const double kMyConst = 1.23;
[EnableIf=green]
int32 a;
double b;
[EnableIfNot=purple]
int32 c;
[EnableIf=blue]
double d;
int32 e;
[EnableIfNot=red]
double f;
};
"""
expected_source = """
struct MyStruct {
[EnableIf=blue]
enum MyEnum {
VALUE1,
};
[EnableIfNot=yellow]
const double kMyConst = 1.23;
[EnableIf=green]
int32 a;
double b;
[EnableIfNot=purple]
int32 c;
[EnableIf=blue]
double d;
int32 e;
};
"""
self.parseAndAssertEqual(struct_source, expected_source)
def testFilterUnion(self):
"""Test that UnionFields are correctly filtered from a Union."""
union_source = """
union MyUnion {
[EnableIf=yellow]
int32 a;
[EnableIf=red]
bool b;
};
"""
expected_source = """
union MyUnion {
[EnableIf=red]
bool b;
};
"""
self.parseAndAssertEqual(union_source, expected_source)
def testSameNameFields(self):
mojom_source = """
enum Foo {
[EnableIf=red]
VALUE1 = 5,
[EnableIf=yellow]
VALUE1 = 6,
};
[EnableIf=red]
const double kMyConst = 1.23;
[EnableIf=yellow]
const double kMyConst = 4.56;
"""
expected_source = """
enum Foo {
[EnableIf=red]
VALUE1 = 5,
};
[EnableIf=red]
const double kMyConst = 1.23;
"""
self.parseAndAssertEqual(mojom_source, expected_source)
def testFeaturesWithEnableIf(self):
mojom_source = """
feature Foo {
const string name = "FooFeature";
[EnableIf=red]
const bool default_state = false;
[EnableIf=yellow]
const bool default_state = true;
};
"""
expected_source = """
feature Foo {
const string name = "FooFeature";
[EnableIf=red]
const bool default_state = false;
};
"""
self.parseAndAssertEqual(mojom_source, expected_source)
def testMultipleEnableIfs(self):
source = """
enum Foo {
[EnableIf=red,EnableIf=yellow]
kBarValue = 5,
};
"""
definition = parser.Parse(source, "my_file.mojom")
self.assertRaises(conditional_features.EnableIfError,
conditional_features.RemoveDisabledDefinitions,
definition, ENABLED_FEATURES)
def testMultipleEnableIfs(self):
source = """
enum Foo {
[EnableIf=red,EnableIfNot=yellow]
kBarValue = 5,
};
"""
definition = parser.Parse(source, "my_file.mojom")
self.assertRaises(conditional_features.EnableIfError,
conditional_features.RemoveDisabledDefinitions,
definition, ENABLED_FEATURES)
def testMultipleEnableIfs(self):
source = """
enum Foo {
[EnableIfNot=red,EnableIfNot=yellow]
kBarValue = 5,
};
"""
definition = parser.Parse(source, "my_file.mojom")
self.assertRaises(conditional_features.EnableIfError,
conditional_features.RemoveDisabledDefinitions,
definition, ENABLED_FEATURES)
if __name__ == '__main__':
unittest.main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from mojom.parse import ast
from mojom.parse import lexer
from mojom.parse import parser
class ParserTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testTrivialValidSource(self):
"""Tests a trivial, but valid, .mojom source."""
source = """\
// This is a comment.
module my_module;
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testSourceWithCrLfs(self):
"""Tests a .mojom source with CR-LFs instead of LFs."""
source = "// This is a comment.\r\n\r\nmodule my_module;\r\n"
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testUnexpectedEOF(self):
"""Tests a "truncated" .mojom source."""
source = """\
// This is a comment.
module my_module
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom: Error: Unexpected end of file$"):
parser.Parse(source, "my_file.mojom")
def testCommentLineNumbers(self):
"""Tests that line numbers are correctly tracked when comments are
present."""
source1 = """\
// Isolated C++-style comments.
// Foo.
asdf1
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:4: Error: Unexpected 'asdf1':\n *asdf1$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
// Consecutive C++-style comments.
// Foo.
// Bar.
struct Yada { // Baz.
// Quux.
int32 x;
};
asdf2
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:10: Error: Unexpected 'asdf2':\n *asdf2$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
/* Single-line C-style comments. */
/* Foobar. */
/* Baz. */
asdf3
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:5: Error: Unexpected 'asdf3':\n *asdf3$"):
parser.Parse(source3, "my_file.mojom")
source4 = """\
/* Multi-line C-style comments.
*/
/*
Foo.
Bar.
*/
/* Baz
Quux. */
asdf4
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:10: Error: Unexpected 'asdf4':\n *asdf4$"):
parser.Parse(source4, "my_file.mojom")
def testSimpleStruct(self):
"""Tests a simple .mojom source that just defines a struct."""
source = """\
module my_module;
struct MyStruct {
int32 a;
double b;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a', None, None, 'int32', None),
ast.StructField('b', None, None, 'double', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testSimpleStructWithoutModule(self):
"""Tests a simple struct without an explict module statement."""
source = """\
struct MyStruct {
int32 a;
double b;
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a', None, None, 'int32', None),
ast.StructField('b', None, None, 'double', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testValidStructDefinitions(self):
"""Tests all types of definitions that can occur in a struct."""
source = """\
struct MyStruct {
enum MyEnum { VALUE };
const double kMyConst = 1.23;
int32 a;
SomeOtherStruct b; // Invalidity detected at another stage.
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.Enum('MyEnum', None,
ast.EnumValueList(ast.EnumValue('VALUE', None, None))),
ast.Const('kMyConst', None, 'double', '1.23'),
ast.StructField('a', None, None, 'int32', None),
ast.StructField('b', None, None, 'SomeOtherStruct', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidStructDefinitions(self):
"""Tests that definitions that aren't allowed in a struct are correctly
detected."""
source1 = """\
struct MyStruct {
MyMethod(int32 a);
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\(':\n"
r" *MyMethod\(int32 a\);$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
struct MyStruct {
struct MyInnerStruct {
int32 a;
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n"
r" *struct MyInnerStruct {$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
struct MyStruct {
interface MyInterface {
MyMethod(int32 a);
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Unexpected 'interface':\n"
r" *interface MyInterface {$"):
parser.Parse(source3, "my_file.mojom")
def testMissingModuleName(self):
"""Tests an (invalid) .mojom with a missing module name."""
source1 = """\
// Missing module name.
module ;
struct MyStruct {
int32 a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Unexpected ';':\n *module ;$"):
parser.Parse(source1, "my_file.mojom")
# Another similar case, but make sure that line-number tracking/reporting
# is correct.
source2 = """\
module
// This line intentionally left unblank.
struct MyStruct {
int32 a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'struct':\n"
r" *struct MyStruct {$"):
parser.Parse(source2, "my_file.mojom")
def testMultipleModuleStatements(self):
"""Tests an (invalid) .mojom with multiple module statements."""
source = """\
module foo;
module bar;
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Multiple \"module\" statements not "
r"allowed:\n *module bar;$"):
parser.Parse(source, "my_file.mojom")
def testModuleStatementAfterImport(self):
"""Tests an (invalid) .mojom with a module statement after an import."""
source = """\
import "foo.mojom";
module foo;
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: \"module\" statements must precede imports "
r"and definitions:\n *module foo;$"):
parser.Parse(source, "my_file.mojom")
def testModuleStatementAfterDefinition(self):
"""Tests an (invalid) .mojom with a module statement after a definition."""
source = """\
struct MyStruct {
int32 a;
};
module foo;
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:4: Error: \"module\" statements must precede imports "
r"and definitions:\n *module foo;$"):
parser.Parse(source, "my_file.mojom")
def testImportStatementAfterDefinition(self):
"""Tests an (invalid) .mojom with an import statement after a definition."""
source = """\
struct MyStruct {
int32 a;
};
import "foo.mojom";
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:4: Error: \"import\" statements must precede "
r"definitions:\n *import \"foo.mojom\";$"):
parser.Parse(source, "my_file.mojom")
def testEnums(self):
"""Tests that enum statements are correctly parsed."""
source = """\
module my_module;
enum MyEnum1 { VALUE1, VALUE2 }; // No trailing comma.
enum MyEnum2 {
VALUE1 = -1,
VALUE2 = 0,
VALUE3 = + 987, // Check that space is allowed.
VALUE4 = 0xAF12,
VALUE5 = -0x09bcd,
VALUE6 = VALUE5,
VALUE7, // Leave trailing comma.
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Enum(
'MyEnum1', None,
ast.EnumValueList([
ast.EnumValue('VALUE1', None, None),
ast.EnumValue('VALUE2', None, None)
])),
ast.Enum(
'MyEnum2', None,
ast.EnumValueList([
ast.EnumValue('VALUE1', None, '-1'),
ast.EnumValue('VALUE2', None, '0'),
ast.EnumValue('VALUE3', None, '+987'),
ast.EnumValue('VALUE4', None, '0xAF12'),
ast.EnumValue('VALUE5', None, '-0x09bcd'),
ast.EnumValue('VALUE6', None, ('IDENTIFIER', 'VALUE5')),
ast.EnumValue('VALUE7', None, None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidEnumInitializers(self):
"""Tests that invalid enum initializers are correctly detected."""
# Floating point value.
source2 = "enum MyEnum { VALUE = 0.123 };"
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '0\.123':\n"
r"enum MyEnum { VALUE = 0\.123 };$"):
parser.Parse(source2, "my_file.mojom")
# Boolean value.
source2 = "enum MyEnum { VALUE = true };"
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected 'true':\n"
r"enum MyEnum { VALUE = true };$"):
parser.Parse(source2, "my_file.mojom")
def testConsts(self):
"""Tests some constants and struct members initialized with them."""
source = """\
module my_module;
struct MyStruct {
const int8 kNumber = -1;
int8 number@0 = kNumber;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.Const('kNumber', None, 'int8', '-1'),
ast.StructField('number', None, ast.Ordinal(0), 'int8',
('IDENTIFIER', 'kNumber'))
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testNoConditionals(self):
"""Tests that ?: is not allowed."""
source = """\
module my_module;
enum MyEnum {
MY_ENUM_1 = 1 ? 2 : 3
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected '\?':\n"
r" *MY_ENUM_1 = 1 \? 2 : 3$"):
parser.Parse(source, "my_file.mojom")
def testSimpleOrdinals(self):
"""Tests that (valid) ordinal values are scanned correctly."""
source = """\
module my_module;
// This isn't actually valid .mojom, but the problem (missing ordinals)
// should be handled at a different level.
struct MyStruct {
int32 a0@0;
int32 a1@1;
int32 a2@2;
int32 a9@9;
int32 a10 @10;
int32 a11 @11;
int32 a29 @29;
int32 a1234567890 @1234567890;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a0', None, ast.Ordinal(0), 'int32', None),
ast.StructField('a1', None, ast.Ordinal(1), 'int32', None),
ast.StructField('a2', None, ast.Ordinal(2), 'int32', None),
ast.StructField('a9', None, ast.Ordinal(9), 'int32', None),
ast.StructField('a10', None, ast.Ordinal(10), 'int32',
None),
ast.StructField('a11', None, ast.Ordinal(11), 'int32',
None),
ast.StructField('a29', None, ast.Ordinal(29), 'int32',
None),
ast.StructField('a1234567890', None,
ast.Ordinal(1234567890), 'int32', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidOrdinals(self):
"""Tests that (lexically) invalid ordinals are correctly detected."""
source1 = """\
module my_module;
struct MyStruct {
int32 a_missing@;
};
"""
with self.assertRaisesRegexp(
lexer.LexError, r"^my_file\.mojom:4: Error: Missing ordinal value$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
module my_module;
struct MyStruct {
int32 a_octal@01;
};
"""
with self.assertRaisesRegexp(
lexer.LexError, r"^my_file\.mojom:4: Error: "
r"Octal and hexadecimal ordinal values not allowed$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
module my_module; struct MyStruct { int32 a_invalid_octal@08; };
"""
with self.assertRaisesRegexp(
lexer.LexError, r"^my_file\.mojom:1: Error: "
r"Octal and hexadecimal ordinal values not allowed$"):
parser.Parse(source3, "my_file.mojom")
source4 = "module my_module; struct MyStruct { int32 a_hex@0x1aB9; };"
with self.assertRaisesRegexp(
lexer.LexError, r"^my_file\.mojom:1: Error: "
r"Octal and hexadecimal ordinal values not allowed$"):
parser.Parse(source4, "my_file.mojom")
source5 = "module my_module; struct MyStruct { int32 a_hex@0X0; };"
with self.assertRaisesRegexp(
lexer.LexError, r"^my_file\.mojom:1: Error: "
r"Octal and hexadecimal ordinal values not allowed$"):
parser.Parse(source5, "my_file.mojom")
source6 = """\
struct MyStruct {
int32 a_too_big@999999999999;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: "
r"Ordinal value 999999999999 too large:\n"
r" *int32 a_too_big@999999999999;$"):
parser.Parse(source6, "my_file.mojom")
def testNestedNamespace(self):
"""Tests that "nested" namespaces work."""
source = """\
module my.mod;
struct MyStruct {
int32 a;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my.mod'), None), ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody(ast.StructField('a', None, None, 'int32', None)))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testValidHandleTypes(self):
"""Tests (valid) handle types."""
source = """\
struct MyStruct {
handle a;
handle<data_pipe_consumer> b;
handle <data_pipe_producer> c;
handle < message_pipe > d;
handle
< shared_buffer
> e;
handle
<platform
> f;
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a', None, None, 'handle', None),
ast.StructField('b', None, None, 'handle<data_pipe_consumer>',
None),
ast.StructField('c', None, None, 'handle<data_pipe_producer>',
None),
ast.StructField('d', None, None, 'handle<message_pipe>', None),
ast.StructField('e', None, None, 'handle<shared_buffer>', None),
ast.StructField('f', None, None, 'handle<platform>', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidHandleType(self):
"""Tests an invalid (unknown) handle type."""
source = """\
struct MyStruct {
handle<wtf_is_this> foo;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: "
r"Invalid handle type 'wtf_is_this':\n"
r" *handle<wtf_is_this> foo;$"):
parser.Parse(source, "my_file.mojom")
def testValidDefaultValues(self):
"""Tests default values that are valid (to the parser)."""
source = """\
struct MyStruct {
int16 a0 = 0;
uint16 a1 = 0x0;
uint16 a2 = 0x00;
uint16 a3 = 0x01;
uint16 a4 = 0xcd;
int32 a5 = 12345;
int64 a6 = -12345;
int64 a7 = +12345;
uint32 a8 = 0x12cd3;
uint32 a9 = -0x12cD3;
uint32 a10 = +0x12CD3;
bool a11 = true;
bool a12 = false;
float a13 = 1.2345;
float a14 = -1.2345;
float a15 = +1.2345;
float a16 = 123.;
float a17 = .123;
double a18 = 1.23E10;
double a19 = 1.E-10;
double a20 = .5E+10;
double a21 = -1.23E10;
double a22 = +.123E10;
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a0', None, None, 'int16', '0'),
ast.StructField('a1', None, None, 'uint16', '0x0'),
ast.StructField('a2', None, None, 'uint16', '0x00'),
ast.StructField('a3', None, None, 'uint16', '0x01'),
ast.StructField('a4', None, None, 'uint16', '0xcd'),
ast.StructField('a5', None, None, 'int32', '12345'),
ast.StructField('a6', None, None, 'int64', '-12345'),
ast.StructField('a7', None, None, 'int64', '+12345'),
ast.StructField('a8', None, None, 'uint32', '0x12cd3'),
ast.StructField('a9', None, None, 'uint32', '-0x12cD3'),
ast.StructField('a10', None, None, 'uint32', '+0x12CD3'),
ast.StructField('a11', None, None, 'bool', 'true'),
ast.StructField('a12', None, None, 'bool', 'false'),
ast.StructField('a13', None, None, 'float', '1.2345'),
ast.StructField('a14', None, None, 'float', '-1.2345'),
ast.StructField('a15', None, None, 'float', '+1.2345'),
ast.StructField('a16', None, None, 'float', '123.'),
ast.StructField('a17', None, None, 'float', '.123'),
ast.StructField('a18', None, None, 'double', '1.23E10'),
ast.StructField('a19', None, None, 'double', '1.E-10'),
ast.StructField('a20', None, None, 'double', '.5E+10'),
ast.StructField('a21', None, None, 'double', '-1.23E10'),
ast.StructField('a22', None, None, 'double', '+.123E10')
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testValidFixedSizeArray(self):
"""Tests parsing a fixed size array."""
source = """\
struct MyStruct {
array<int32> normal_array;
array<int32, 1> fixed_size_array_one_entry;
array<int32, 10> fixed_size_array_ten_entries;
array<array<array<int32, 1>>, 2> nested_arrays;
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('normal_array', None, None, 'int32[]', None),
ast.StructField('fixed_size_array_one_entry', None, None,
'int32[1]', None),
ast.StructField('fixed_size_array_ten_entries', None, None,
'int32[10]', None),
ast.StructField('nested_arrays', None, None, 'int32[1][][2]',
None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testValidNestedArray(self):
"""Tests parsing a nested array."""
source = "struct MyStruct { array<array<int32>> nested_array; };"
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody(
ast.StructField('nested_array', None, None, 'int32[][]', None)))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidFixedArraySize(self):
"""Tests that invalid fixed array bounds are correctly detected."""
source1 = """\
struct MyStruct {
array<int32, 0> zero_size_array;
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Fixed array size 0 invalid:\n"
r" *array<int32, 0> zero_size_array;$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
struct MyStruct {
array<int32, 999999999999> too_big_array;
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Fixed array size 999999999999 invalid:\n"
r" *array<int32, 999999999999> too_big_array;$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
struct MyStruct {
array<int32, abcdefg> not_a_number;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'abcdefg':\n"
r" *array<int32, abcdefg> not_a_number;"):
parser.Parse(source3, "my_file.mojom")
def testValidAssociativeArrays(self):
"""Tests that we can parse valid associative array structures."""
source1 = "struct MyStruct { map<string, uint8> data; };"
expected1 = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody(
[ast.StructField('data', None, None, 'uint8{string}', None)]))
])
self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1)
source2 = "interface MyInterface { MyMethod(map<string, uint8> a); };"
expected2 = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody(
ast.Method(
'MyMethod', None, None,
ast.ParameterList(
ast.Parameter('a', None, None, 'uint8{string}')),
None)))
])
self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2)
source3 = "struct MyStruct { map<string, array<uint8>> data; };"
expected3 = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody(
[ast.StructField('data', None, None, 'uint8[]{string}', None)]))
])
self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3)
def testValidMethod(self):
"""Tests parsing method declarations."""
source1 = "interface MyInterface { MyMethod(int32 a); };"
expected1 = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody(
ast.Method(
'MyMethod', None, None,
ast.ParameterList(ast.Parameter('a', None, None, 'int32')),
None)))
])
self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1)
source2 = """\
interface MyInterface {
MyMethod1@0(int32 a@0, int64 b@1);
MyMethod2@1() => ();
};
"""
expected2 = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody([
ast.Method(
'MyMethod1', None, ast.Ordinal(0),
ast.ParameterList([
ast.Parameter('a', None, ast.Ordinal(0), 'int32'),
ast.Parameter('b', None, ast.Ordinal(1), 'int64')
]), None),
ast.Method('MyMethod2', None, ast.Ordinal(1),
ast.ParameterList(), ast.ParameterList())
]))
])
self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2)
source3 = """\
interface MyInterface {
MyMethod(string a) => (int32 a, bool b);
};
"""
expected3 = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody(
ast.Method(
'MyMethod', None, None,
ast.ParameterList(ast.Parameter('a', None, None, 'string')),
ast.ParameterList([
ast.Parameter('a', None, None, 'int32'),
ast.Parameter('b', None, None, 'bool')
]))))
])
self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3)
def testInvalidMethods(self):
"""Tests that invalid method declarations are correctly detected."""
# No trailing commas.
source1 = """\
interface MyInterface {
MyMethod(string a,);
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\)':\n"
r" *MyMethod\(string a,\);$"):
parser.Parse(source1, "my_file.mojom")
# No leading commas.
source2 = """\
interface MyInterface {
MyMethod(, string a);
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected ',':\n"
r" *MyMethod\(, string a\);$"):
parser.Parse(source2, "my_file.mojom")
def testValidInterfaceDefinitions(self):
"""Tests all types of definitions that can occur in an interface."""
source = """\
interface MyInterface {
enum MyEnum { VALUE };
const int32 kMyConst = 123;
MyMethod(int32 x) => (MyEnum y);
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody([
ast.Enum('MyEnum', None,
ast.EnumValueList(ast.EnumValue('VALUE', None, None))),
ast.Const('kMyConst', None, 'int32', '123'),
ast.Method(
'MyMethod', None, None,
ast.ParameterList(ast.Parameter('x', None, None, 'int32')),
ast.ParameterList(ast.Parameter('y', None, None, 'MyEnum')))
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidInterfaceDefinitions(self):
"""Tests that definitions that aren't allowed in an interface are correctly
detected."""
source1 = """\
interface MyInterface {
struct MyStruct {
int32 a;
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n"
r" *struct MyStruct {$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
interface MyInterface {
interface MyInnerInterface {
MyMethod(int32 x);
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Unexpected 'interface':\n"
r" *interface MyInnerInterface {$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
interface MyInterface {
int32 my_field;
};
"""
# The parser thinks that "int32" is a plausible name for a method, so it's
# "my_field" that gives it away.
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'my_field':\n"
r" *int32 my_field;$"):
parser.Parse(source3, "my_file.mojom")
def testValidAttributes(self):
"""Tests parsing attributes (and attribute lists)."""
# Note: We use structs because they have (optional) attribute lists.
# Empty attribute list.
source1 = "[] struct MyStruct {};"
expected1 = ast.Mojom(
None, ast.ImportList(),
[ast.Struct('MyStruct', ast.AttributeList(), ast.StructBody())])
self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1)
# One-element attribute list, with name value.
source2 = "[MyAttribute=MyName] struct MyStruct {};"
expected2 = ast.Mojom(None, ast.ImportList(), [
ast.Struct('MyStruct',
ast.AttributeList(ast.Attribute("MyAttribute", "MyName")),
ast.StructBody())
])
self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2)
# Two-element attribute list, with one string value and one integer value.
source3 = "[MyAttribute1 = \"hello\", MyAttribute2 = 5] struct MyStruct {};"
expected3 = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct',
ast.AttributeList([
ast.Attribute("MyAttribute1", "hello"),
ast.Attribute("MyAttribute2", 5)
]), ast.StructBody())
])
self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3)
# Various places that attribute list is allowed.
source4 = """\
[Attr0=0] module my_module;
[Attr1=1] import "my_import";
[Attr2=2] struct MyStruct {
[Attr3=3] int32 a;
};
[Attr4=4] union MyUnion {
[Attr5=5] int32 a;
};
[Attr6=6] enum MyEnum {
[Attr7=7] a
};
[Attr8=8] interface MyInterface {
[Attr9=9] MyMethod([Attr10=10] int32 a) => ([Attr11=11] bool b);
};
[Attr12=12] const double kMyConst = 1.23;
"""
expected4 = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'),
ast.AttributeList([ast.Attribute("Attr0", 0)])),
ast.ImportList(
ast.Import(
ast.AttributeList([ast.Attribute("Attr1", 1)]), "my_import")),
[
ast.Struct(
'MyStruct', ast.AttributeList(ast.Attribute("Attr2", 2)),
ast.StructBody(
ast.StructField(
'a', ast.AttributeList([ast.Attribute("Attr3", 3)]),
None, 'int32', None))),
ast.Union(
'MyUnion', ast.AttributeList(ast.Attribute("Attr4", 4)),
ast.UnionBody(
ast.UnionField(
'a', ast.AttributeList([ast.Attribute("Attr5", 5)]),
None, 'int32'))),
ast.Enum(
'MyEnum', ast.AttributeList(ast.Attribute("Attr6", 6)),
ast.EnumValueList(
ast.EnumValue(
'VALUE', ast.AttributeList([ast.Attribute("Attr7", 7)]),
None))),
ast.Interface(
'MyInterface', ast.AttributeList(ast.Attribute("Attr8", 8)),
ast.InterfaceBody(
ast.Method(
'MyMethod', ast.AttributeList(
ast.Attribute("Attr9", 9)), None,
ast.ParameterList(
ast.Parameter(
'a',
ast.AttributeList([ast.Attribute("Attr10", 10)
]), None, 'int32')),
ast.ParameterList(
ast.Parameter(
'b',
ast.AttributeList([ast.Attribute("Attr11", 11)
]), None, 'bool'))))),
ast.Const('kMyConst', ast.AttributeList(
ast.Attribute("Attr12", 12)), 'double', '1.23')
])
self.assertEquals(parser.Parse(source4, "my_file.mojom"), expected4)
# TODO(vtl): Boolean attributes don't work yet. (In fact, we just |eval()|
# literal (non-name) values, which is extremely dubious.)
def testInvalidAttributes(self):
"""Tests that invalid attributes and attribute lists are correctly
detected."""
# Trailing commas not allowed.
source1 = "[MyAttribute=MyName,] struct MyStruct {};"
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '\]':\n"
r"\[MyAttribute=MyName,\] struct MyStruct {};$"):
parser.Parse(source1, "my_file.mojom")
# Missing value.
source2 = "[MyAttribute=] struct MyStruct {};"
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '\]':\n"
r"\[MyAttribute=\] struct MyStruct {};$"):
parser.Parse(source2, "my_file.mojom")
# Missing key.
source3 = "[=MyName] struct MyStruct {};"
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:1: Error: Unexpected '=':\n"
r"\[=MyName\] struct MyStruct {};$"):
parser.Parse(source3, "my_file.mojom")
def testValidImports(self):
"""Tests parsing import statements."""
# One import (no module statement).
source1 = "import \"somedir/my.mojom\";"
expected1 = ast.Mojom(None,
ast.ImportList(ast.Import(None, "somedir/my.mojom")),
[])
self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1)
# Two imports (no module statement).
source2 = """\
import "somedir/my1.mojom";
import "somedir/my2.mojom";
"""
expected2 = ast.Mojom(
None,
ast.ImportList([
ast.Import(None, "somedir/my1.mojom"),
ast.Import(None, "somedir/my2.mojom")
]), [])
self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2)
# Imports with module statement.
source3 = """\
module my_module;
import "somedir/my1.mojom";
import "somedir/my2.mojom";
"""
expected3 = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None),
ast.ImportList([
ast.Import(None, "somedir/my1.mojom"),
ast.Import(None, "somedir/my2.mojom")
]), [])
self.assertEquals(parser.Parse(source3, "my_file.mojom"), expected3)
def testInvalidImports(self):
"""Tests that invalid import statements are correctly detected."""
source1 = """\
// Make the error occur on line 2.
import invalid
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'invalid':\n"
r" *import invalid$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
import // Missing string.
struct MyStruct {
int32 a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n"
r" *struct MyStruct {$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
import "foo.mojom" // Missing semicolon.
struct MyStruct {
int32 a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'struct':\n"
r" *struct MyStruct {$"):
parser.Parse(source3, "my_file.mojom")
def testValidNullableTypes(self):
"""Tests parsing nullable types."""
source = """\
struct MyStruct {
int32? a; // This is actually invalid, but handled at a different
// level.
string? b;
array<int32> ? c;
array<string ? > ? d;
array<array<int32>?>? e;
array<int32, 1>? f;
array<string?, 1>? g;
some_struct? h;
handle? i;
handle<data_pipe_consumer>? j;
handle<data_pipe_producer>? k;
handle<message_pipe>? l;
handle<shared_buffer>? m;
pending_receiver<some_interface>? n;
handle<platform>? o;
};
"""
expected = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a', None, None, 'int32?', None),
ast.StructField('b', None, None, 'string?', None),
ast.StructField('c', None, None, 'int32[]?', None),
ast.StructField('d', None, None, 'string?[]?', None),
ast.StructField('e', None, None, 'int32[]?[]?', None),
ast.StructField('f', None, None, 'int32[1]?', None),
ast.StructField('g', None, None, 'string?[1]?', None),
ast.StructField('h', None, None, 'some_struct?', None),
ast.StructField('i', None, None, 'handle?', None),
ast.StructField('j', None, None, 'handle<data_pipe_consumer>?',
None),
ast.StructField('k', None, None, 'handle<data_pipe_producer>?',
None),
ast.StructField('l', None, None, 'handle<message_pipe>?', None),
ast.StructField('m', None, None, 'handle<shared_buffer>?',
None),
ast.StructField('n', None, None, 'rcv<some_interface>?', None),
ast.StructField('o', None, None, 'handle<platform>?', None)
]))
])
self.assertEquals(parser.Parse(source, "my_file.mojom"), expected)
def testInvalidNullableTypes(self):
"""Tests that invalid nullable types are correctly detected."""
source1 = """\
struct MyStruct {
string?? a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\?':\n"
r" *string\?\? a;$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
struct MyStruct {
handle?<data_pipe_consumer> a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '<':\n"
r" *handle\?<data_pipe_consumer> a;$"):
parser.Parse(source2, "my_file.mojom")
def testSimpleUnion(self):
"""Tests a simple .mojom source that just defines a union."""
source = """\
module my_module;
union MyUnion {
int32 a;
double b;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Union(
'MyUnion', None,
ast.UnionBody([
ast.UnionField('a', None, None, 'int32'),
ast.UnionField('b', None, None, 'double')
]))
])
actual = parser.Parse(source, "my_file.mojom")
self.assertEquals(actual, expected)
def testUnionWithOrdinals(self):
"""Test that ordinals are assigned to fields."""
source = """\
module my_module;
union MyUnion {
int32 a @10;
double b @30;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Union(
'MyUnion', None,
ast.UnionBody([
ast.UnionField('a', None, ast.Ordinal(10), 'int32'),
ast.UnionField('b', None, ast.Ordinal(30), 'double')
]))
])
actual = parser.Parse(source, "my_file.mojom")
self.assertEquals(actual, expected)
def testUnionWithStructMembers(self):
"""Test that struct members are accepted."""
source = """\
module my_module;
union MyUnion {
SomeStruct s;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Union(
'MyUnion', None,
ast.UnionBody([ast.UnionField('s', None, None, 'SomeStruct')]))
])
actual = parser.Parse(source, "my_file.mojom")
self.assertEquals(actual, expected)
def testUnionWithArrayMember(self):
"""Test that array members are accepted."""
source = """\
module my_module;
union MyUnion {
array<int32> a;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Union(
'MyUnion', None,
ast.UnionBody([ast.UnionField('a', None, None, 'int32[]')]))
])
actual = parser.Parse(source, "my_file.mojom")
self.assertEquals(actual, expected)
def testUnionWithMapMember(self):
"""Test that map members are accepted."""
source = """\
module my_module;
union MyUnion {
map<int32, string> m;
};
"""
expected = ast.Mojom(
ast.Module(('IDENTIFIER', 'my_module'), None), ast.ImportList(), [
ast.Union(
'MyUnion', None,
ast.UnionBody(
[ast.UnionField('m', None, None, 'string{int32}')]))
])
actual = parser.Parse(source, "my_file.mojom")
self.assertEquals(actual, expected)
def testUnionDisallowNestedStruct(self):
"""Tests that structs cannot be nested in unions."""
source = """\
module my_module;
union MyUnion {
struct MyStruct {
int32 a;
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'struct':\n"
r" *struct MyStruct {$"):
parser.Parse(source, "my_file.mojom")
def testUnionDisallowNestedInterfaces(self):
"""Tests that interfaces cannot be nested in unions."""
source = """\
module my_module;
union MyUnion {
interface MyInterface {
MyMethod(int32 a);
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:4: Error: Unexpected 'interface':\n"
r" *interface MyInterface {$"):
parser.Parse(source, "my_file.mojom")
def testUnionDisallowNestedUnion(self):
"""Tests that unions cannot be nested in unions."""
source = """\
module my_module;
union MyUnion {
union MyOtherUnion {
int32 a;
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'union':\n"
r" *union MyOtherUnion {$"):
parser.Parse(source, "my_file.mojom")
def testUnionDisallowNestedEnum(self):
"""Tests that enums cannot be nested in unions."""
source = """\
module my_module;
union MyUnion {
enum MyEnum {
A,
};
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:4: Error: Unexpected 'enum':\n"
r" *enum MyEnum {$"):
parser.Parse(source, "my_file.mojom")
def testValidAssociatedKinds(self):
"""Tests parsing associated interfaces and requests."""
source1 = """\
struct MyStruct {
associated MyInterface a;
pending_associated_receiver<MyInterface> b;
associated MyInterface? c;
pending_associated_receiver<MyInterface>? d;
};
"""
expected1 = ast.Mojom(None, ast.ImportList(), [
ast.Struct(
'MyStruct', None,
ast.StructBody([
ast.StructField('a', None, None, 'asso<MyInterface>', None),
ast.StructField('b', None, None, 'rca<MyInterface>', None),
ast.StructField('c', None, None, 'asso<MyInterface>?', None),
ast.StructField('d', None, None, 'rca<MyInterface>?', None)
]))
])
self.assertEquals(parser.Parse(source1, "my_file.mojom"), expected1)
source2 = """\
interface MyInterface {
MyMethod(associated A a) =>(pending_associated_receiver<B> b);
};"""
expected2 = ast.Mojom(None, ast.ImportList(), [
ast.Interface(
'MyInterface', None,
ast.InterfaceBody(
ast.Method(
'MyMethod', None, None,
ast.ParameterList(ast.Parameter('a', None, None,
'asso<A>')),
ast.ParameterList(ast.Parameter('b', None, None,
'rca<B>')))))
])
self.assertEquals(parser.Parse(source2, "my_file.mojom"), expected2)
def testInvalidAssociatedKinds(self):
"""Tests that invalid associated interfaces and requests are correctly
detected."""
source1 = """\
struct MyStruct {
associated associated SomeInterface a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError,
r"^my_file\.mojom:2: Error: Unexpected 'associated':\n"
r" *associated associated SomeInterface a;$"):
parser.Parse(source1, "my_file.mojom")
source2 = """\
struct MyStruct {
associated handle a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected 'handle':\n"
r" *associated handle a;$"):
parser.Parse(source2, "my_file.mojom")
source3 = """\
struct MyStruct {
associated? MyInterface& a;
};
"""
with self.assertRaisesRegexp(
parser.ParseError, r"^my_file\.mojom:2: Error: Unexpected '\?':\n"
r" *associated\? MyInterface& a;$"):
parser.Parse(source3, "my_file.mojom")
if __name__ == "__main__":
unittest.main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import importlib.util
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
sys.path.insert(1, os.path.join(_GetDirAbove("mojo"), "third_party"))
from ply import lex
try:
importlib.util.find_spec("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
import mojom.parse.lexer
# This (monkey-patching LexToken to make comparison value-based) is evil, but
# we'll do it anyway. (I'm pretty sure ply's lexer never cares about comparing
# for object identity.)
def _LexTokenEq(self, other):
return self.type == other.type and self.value == other.value and \
self.lineno == other.lineno and self.lexpos == other.lexpos
setattr(lex.LexToken, '__eq__', _LexTokenEq)
def _MakeLexToken(token_type, value, lineno=1, lexpos=0):
"""Makes a LexToken with the given parameters. (Note that lineno is 1-based,
but lexpos is 0-based.)"""
rv = lex.LexToken()
rv.type, rv.value, rv.lineno, rv.lexpos = token_type, value, lineno, lexpos
return rv
def _MakeLexTokenForKeyword(keyword, **kwargs):
"""Makes a LexToken for the given keyword."""
return _MakeLexToken(keyword.upper(), keyword.lower(), **kwargs)
class LexerTest(unittest.TestCase):
"""Tests |mojom.parse.lexer.Lexer|."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Clone all lexer instances from this one, since making a lexer is slow.
self._zygote_lexer = lex.lex(mojom.parse.lexer.Lexer("my_file.mojom"))
def testValidKeywords(self):
"""Tests valid keywords."""
self.assertEquals(
self._SingleTokenForInput("handle"), _MakeLexTokenForKeyword("handle"))
self.assertEquals(
self._SingleTokenForInput("import"), _MakeLexTokenForKeyword("import"))
self.assertEquals(
self._SingleTokenForInput("module"), _MakeLexTokenForKeyword("module"))
self.assertEquals(
self._SingleTokenForInput("struct"), _MakeLexTokenForKeyword("struct"))
self.assertEquals(
self._SingleTokenForInput("union"), _MakeLexTokenForKeyword("union"))
self.assertEquals(
self._SingleTokenForInput("interface"),
_MakeLexTokenForKeyword("interface"))
self.assertEquals(
self._SingleTokenForInput("enum"), _MakeLexTokenForKeyword("enum"))
self.assertEquals(
self._SingleTokenForInput("const"), _MakeLexTokenForKeyword("const"))
self.assertEquals(
self._SingleTokenForInput("true"), _MakeLexTokenForKeyword("true"))
self.assertEquals(
self._SingleTokenForInput("false"), _MakeLexTokenForKeyword("false"))
self.assertEquals(
self._SingleTokenForInput("default"),
_MakeLexTokenForKeyword("default"))
self.assertEquals(
self._SingleTokenForInput("array"), _MakeLexTokenForKeyword("array"))
self.assertEquals(
self._SingleTokenForInput("map"), _MakeLexTokenForKeyword("map"))
self.assertEquals(
self._SingleTokenForInput("associated"),
_MakeLexTokenForKeyword("associated"))
def testValidIdentifiers(self):
"""Tests identifiers."""
self.assertEquals(
self._SingleTokenForInput("abcd"), _MakeLexToken("NAME", "abcd"))
self.assertEquals(
self._SingleTokenForInput("AbC_d012_"),
_MakeLexToken("NAME", "AbC_d012_"))
self.assertEquals(
self._SingleTokenForInput("_0123"), _MakeLexToken("NAME", "_0123"))
def testInvalidIdentifiers(self):
with self.assertRaisesRegexp(
mojom.parse.lexer.LexError,
r"^my_file\.mojom:1: Error: Illegal character '\$'$"):
self._TokensForInput("$abc")
with self.assertRaisesRegexp(
mojom.parse.lexer.LexError,
r"^my_file\.mojom:1: Error: Illegal character '\$'$"):
self._TokensForInput("a$bc")
def testDecimalIntegerConstants(self):
self.assertEquals(
self._SingleTokenForInput("0"), _MakeLexToken("INT_CONST_DEC", "0"))
self.assertEquals(
self._SingleTokenForInput("1"), _MakeLexToken("INT_CONST_DEC", "1"))
self.assertEquals(
self._SingleTokenForInput("123"), _MakeLexToken("INT_CONST_DEC", "123"))
self.assertEquals(
self._SingleTokenForInput("10"), _MakeLexToken("INT_CONST_DEC", "10"))
def testValidTokens(self):
"""Tests valid tokens (which aren't tested elsewhere)."""
# Keywords tested in |testValidKeywords|.
# NAME tested in |testValidIdentifiers|.
self.assertEquals(
self._SingleTokenForInput("@123"), _MakeLexToken("ORDINAL", "@123"))
self.assertEquals(
self._SingleTokenForInput("456"), _MakeLexToken("INT_CONST_DEC", "456"))
self.assertEquals(
self._SingleTokenForInput("0x01aB2eF3"),
_MakeLexToken("INT_CONST_HEX", "0x01aB2eF3"))
self.assertEquals(
self._SingleTokenForInput("123.456"),
_MakeLexToken("FLOAT_CONST", "123.456"))
self.assertEquals(
self._SingleTokenForInput("\"hello\""),
_MakeLexToken("STRING_LITERAL", "\"hello\""))
self.assertEquals(
self._SingleTokenForInput("+"), _MakeLexToken("PLUS", "+"))
self.assertEquals(
self._SingleTokenForInput("-"), _MakeLexToken("MINUS", "-"))
self.assertEquals(
self._SingleTokenForInput("?"), _MakeLexToken("QSTN", "?"))
self.assertEquals(
self._SingleTokenForInput("="), _MakeLexToken("EQUALS", "="))
self.assertEquals(
self._SingleTokenForInput("=>"), _MakeLexToken("RESPONSE", "=>"))
self.assertEquals(
self._SingleTokenForInput("("), _MakeLexToken("LPAREN", "("))
self.assertEquals(
self._SingleTokenForInput(")"), _MakeLexToken("RPAREN", ")"))
self.assertEquals(
self._SingleTokenForInput("["), _MakeLexToken("LBRACKET", "["))
self.assertEquals(
self._SingleTokenForInput("]"), _MakeLexToken("RBRACKET", "]"))
self.assertEquals(
self._SingleTokenForInput("{"), _MakeLexToken("LBRACE", "{"))
self.assertEquals(
self._SingleTokenForInput("}"), _MakeLexToken("RBRACE", "}"))
self.assertEquals(
self._SingleTokenForInput("<"), _MakeLexToken("LANGLE", "<"))
self.assertEquals(
self._SingleTokenForInput(">"), _MakeLexToken("RANGLE", ">"))
self.assertEquals(
self._SingleTokenForInput(";"), _MakeLexToken("SEMI", ";"))
self.assertEquals(
self._SingleTokenForInput(","), _MakeLexToken("COMMA", ","))
self.assertEquals(self._SingleTokenForInput("."), _MakeLexToken("DOT", "."))
def _TokensForInput(self, input_string):
"""Gets a list of tokens for the given input string."""
lexer = self._zygote_lexer.clone()
lexer.input(input_string)
rv = []
while True:
tok = lexer.token()
if not tok:
return rv
rv.append(tok)
def _SingleTokenForInput(self, input_string):
"""Gets the single token for the given input string. (Raises an exception if
the input string does not result in exactly one token.)"""
toks = self._TokensForInput(input_string)
assert len(toks) == 1
return toks[0]
if __name__ == "__main__":
unittest.main()
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Node classes for the AST for a Mojo IDL file."""
# Note: For convenience of testing, you probably want to define __eq__() methods
# for all node types; it's okay to be slightly lax (e.g., not compare filename
# and lineno). You may also define __repr__() to help with analyzing test
# failures, especially for more complex types.
import os.path
# Instance of 'NodeListBase' has no '_list_item_type' member (no-member)
# pylint: disable=no-member
class NodeBase:
"""Base class for nodes in the AST."""
def __init__(self, filename=None, lineno=None):
self.filename = filename
self.lineno = lineno
def __eq__(self, other):
# We want strict comparison of the two object's types. Disable pylint's
# insistence upon recommending isinstance().
# pylint: disable=unidiomatic-typecheck
return type(self) == type(other)
# Make != the inverse of ==. (Subclasses shouldn't have to override this.)
def __ne__(self, other):
return not self == other
# TODO(vtl): Some of this is complicated enough that it should be tested.
class NodeListBase(NodeBase):
"""Represents a list of other nodes, all having the same type. (This is meant
to be subclassed, with subclasses defining _list_item_type to be the class (or
classes, in a tuple) of the members of the list.)"""
def __init__(self, item_or_items=None, **kwargs):
super().__init__(**kwargs)
self.items = []
if item_or_items is None:
pass
elif isinstance(item_or_items, list):
for item in item_or_items:
assert isinstance(item, self._list_item_type)
self.Append(item)
else:
assert isinstance(item_or_items, self._list_item_type)
self.Append(item_or_items)
# Support iteration. For everything else, users should just access |items|
# directly. (We intentionally do NOT supply |__len__()| or |__nonzero__()|, so
# |bool(NodeListBase())| is true.)
def __iter__(self):
return self.items.__iter__()
def __eq__(self, other):
return super().__eq__(other) and \
self.items == other.items
# Implement this so that on failure, we get slightly more sensible output.
def __repr__(self):
return self.__class__.__name__ + "([" + \
", ".join([repr(elem) for elem in self.items]) + "])"
def Insert(self, item):
"""Inserts item at the front of the list."""
assert isinstance(item, self._list_item_type)
self.items.insert(0, item)
self._UpdateFilenameAndLineno()
def Append(self, item):
"""Appends item to the end of the list."""
assert isinstance(item, self._list_item_type)
self.items.append(item)
self._UpdateFilenameAndLineno()
def _UpdateFilenameAndLineno(self):
if self.items:
self.filename = self.items[0].filename
self.lineno = self.items[0].lineno
class Definition(NodeBase):
"""Represents a definition of anything that has a global name (e.g., enums,
enum values, consts, structs, struct fields, interfaces). (This does not
include parameter definitions.) This class is meant to be subclassed."""
def __init__(self, mojom_name, **kwargs):
assert isinstance(mojom_name, str)
NodeBase.__init__(self, **kwargs)
self.mojom_name = mojom_name
################################################################################
class Attribute(NodeBase):
"""Represents an attribute."""
def __init__(self, key, value, **kwargs):
assert isinstance(key, str)
super().__init__(**kwargs)
self.key = key
self.value = value
def __eq__(self, other):
return super().__eq__(other) and \
self.key == other.key and \
self.value == other.value
class AttributeList(NodeListBase):
"""Represents a list attributes."""
_list_item_type = Attribute
class Const(Definition):
"""Represents a const definition."""
def __init__(self, mojom_name, attribute_list, typename, value, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
# The typename is currently passed through as a string.
assert isinstance(typename, str)
# The value is either a literal (currently passed through as a string) or a
# "wrapped identifier".
assert isinstance(value, (tuple, str))
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.typename = typename
self.value = value
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.typename == other.typename and \
self.value == other.value
class Enum(Definition):
"""Represents an enum definition."""
def __init__(self, mojom_name, attribute_list, enum_value_list, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert enum_value_list is None or isinstance(enum_value_list, EnumValueList)
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.enum_value_list = enum_value_list
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.enum_value_list == other.enum_value_list
class EnumValue(Definition):
"""Represents a definition of an enum value."""
def __init__(self, mojom_name, attribute_list, value, **kwargs):
# The optional value is either an int (which is current a string) or a
# "wrapped identifier".
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert value is None or isinstance(value, (tuple, str))
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.value = value
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.value == other.value
class EnumValueList(NodeListBase):
"""Represents a list of enum value definitions (i.e., the "body" of an enum
definition)."""
_list_item_type = EnumValue
class Feature(Definition):
"""Represents a runtime feature definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, FeatureBody) or body is None
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
def __repr__(self):
return "Feature(mojom_name = %s, attribute_list = %s, body = %s)" % (
self.mojom_name, self.attribute_list, self.body)
# This needs to be declared after `FeatureConst` and `FeatureField`.
class FeatureBody(NodeListBase):
"""Represents the body of (i.e., list of definitions inside) a feature."""
# Features are compile time helpers so all fields are initializers/consts
# for the underlying platform feature type.
_list_item_type = (Const)
class Import(NodeBase):
"""Represents an import statement."""
def __init__(self, attribute_list, import_filename, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(import_filename, str)
super().__init__(**kwargs)
self.attribute_list = attribute_list
# TODO(crbug.com/953884): Use pathlib once we're migrated fully to Python 3.
self.import_filename = os.path.normpath(import_filename).replace('\\', '/')
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.import_filename == other.import_filename
class ImportList(NodeListBase):
"""Represents a list (i.e., sequence) of import statements."""
_list_item_type = Import
class Interface(Definition):
"""Represents an interface definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, InterfaceBody)
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
class Method(Definition):
"""Represents a method definition."""
def __init__(self, mojom_name, attribute_list, ordinal, parameter_list,
response_parameter_list, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(parameter_list, ParameterList)
assert response_parameter_list is None or \
isinstance(response_parameter_list, ParameterList)
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.parameter_list = parameter_list
self.response_parameter_list = response_parameter_list
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.parameter_list == other.parameter_list and \
self.response_parameter_list == other.response_parameter_list
# This needs to be declared after |Method|.
class InterfaceBody(NodeListBase):
"""Represents the body of (i.e., list of definitions inside) an interface."""
_list_item_type = (Const, Enum, Method)
class Module(NodeBase):
"""Represents a module statement."""
def __init__(self, mojom_namespace, attribute_list, **kwargs):
# |mojom_namespace| is either none or a "wrapped identifier".
assert mojom_namespace is None or isinstance(mojom_namespace, tuple)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
super().__init__(**kwargs)
self.mojom_namespace = mojom_namespace
self.attribute_list = attribute_list
def __eq__(self, other):
return super().__eq__(other) and \
self.mojom_namespace == other.mojom_namespace and \
self.attribute_list == other.attribute_list
class Mojom(NodeBase):
"""Represents an entire .mojom file. (This is the root node.)"""
def __init__(self, module, import_list, definition_list, **kwargs):
assert module is None or isinstance(module, Module)
assert isinstance(import_list, ImportList)
assert isinstance(definition_list, list)
super().__init__(**kwargs)
self.module = module
self.import_list = import_list
self.definition_list = definition_list
def __eq__(self, other):
return super().__eq__(other) and \
self.module == other.module and \
self.import_list == other.import_list and \
self.definition_list == other.definition_list
def __repr__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.module,
self.import_list, self.definition_list)
class Ordinal(NodeBase):
"""Represents an ordinal value labeling, e.g., a struct field."""
def __init__(self, value, **kwargs):
assert isinstance(value, int)
super().__init__(**kwargs)
self.value = value
def __eq__(self, other):
return super().__eq__(other) and \
self.value == other.value
class Parameter(NodeBase):
"""Represents a method request or response parameter."""
def __init__(self, mojom_name, attribute_list, ordinal, typename, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
super().__init__(**kwargs)
self.mojom_name = mojom_name
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
def __eq__(self, other):
return super().__eq__(other) and \
self.mojom_name == other.mojom_name and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename
class ParameterList(NodeListBase):
"""Represents a list of (method request or response) parameters."""
_list_item_type = Parameter
class Struct(Definition):
"""Represents a struct definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, StructBody) or body is None
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
def __repr__(self):
return "Struct(mojom_name = %s, attribute_list = %s, body = %s)" % (
self.mojom_name, self.attribute_list, self.body)
class StructField(Definition):
"""Represents a struct field definition."""
def __init__(self, mojom_name, attribute_list, ordinal, typename,
default_value, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
# The optional default value is currently either a value as a string or a
# "wrapped identifier".
assert default_value is None or isinstance(default_value, (str, tuple))
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
self.default_value = default_value
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename and \
self.default_value == other.default_value
def __repr__(self):
return ("StructField(mojom_name = %s, attribute_list = %s, ordinal = %s, "
"typename = %s, default_value = %s") % (
self.mojom_name, self.attribute_list, self.ordinal,
self.typename, self.default_value)
# This needs to be declared after |StructField|.
class StructBody(NodeListBase):
"""Represents the body of (i.e., list of definitions inside) a struct."""
_list_item_type = (Const, Enum, StructField)
class Union(Definition):
"""Represents a union definition."""
def __init__(self, mojom_name, attribute_list, body, **kwargs):
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert isinstance(body, UnionBody)
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.body = body
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.body == other.body
class UnionField(Definition):
def __init__(self, mojom_name, attribute_list, ordinal, typename, **kwargs):
assert isinstance(mojom_name, str)
assert attribute_list is None or isinstance(attribute_list, AttributeList)
assert ordinal is None or isinstance(ordinal, Ordinal)
assert isinstance(typename, str)
super().__init__(mojom_name, **kwargs)
self.attribute_list = attribute_list
self.ordinal = ordinal
self.typename = typename
def __eq__(self, other):
return super().__eq__(other) and \
self.attribute_list == other.attribute_list and \
self.ordinal == other.ordinal and \
self.typename == other.typename
class UnionBody(NodeListBase):
_list_item_type = UnionField
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/lexer.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os.path
import sys
from mojom import fileutil
from mojom.error import Error
fileutil.AddLocalRepoThirdPartyDirToModulePath()
from ply.lex import TOKEN
class LexError(Error):
"""Class for errors from the lexer."""
def __init__(self, filename, message, lineno):
Error.__init__(self, filename, message, lineno=lineno)
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Lexer:
def __init__(self, filename):
self.filename = filename
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
raise LexError(self.filename, msg, token.lineno)
##
## Reserved keywords
##
keywords = (
'HANDLE',
'IMPORT',
'MODULE',
'STRUCT',
'UNION',
'INTERFACE',
'ENUM',
'CONST',
'TRUE',
'FALSE',
'DEFAULT',
'ARRAY',
'MAP',
'ASSOCIATED',
'PENDING_REMOTE',
'PENDING_RECEIVER',
'PENDING_ASSOCIATED_REMOTE',
'PENDING_ASSOCIATED_RECEIVER',
'FEATURE',
)
keyword_map = {}
for keyword in keywords:
keyword_map[keyword.lower()] = keyword
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'NAME',
# Constants
'ORDINAL',
'INT_CONST_DEC',
'INT_CONST_HEX',
'FLOAT_CONST',
# String literals
'STRING_LITERAL',
# Operators
'MINUS',
'PLUS',
'QSTN',
# Assignment
'EQUALS',
# Request / response
'RESPONSE',
# Delimiters
'LPAREN',
'RPAREN', # ( )
'LBRACKET',
'RBRACKET', # [ ]
'LBRACE',
'RBRACE', # { }
'LANGLE',
'RANGLE', # < >
'SEMI', # ;
'COMMA',
'DOT' # , .
)
##
## Regexes for use in tokens
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
hex_prefix = '0[xX]'
hex_digits = '[0-9a-fA-F]+'
# integer constants (K&R2: A.2.5.1)
decimal_constant = '0|([1-9][0-9]*)'
hex_constant = hex_prefix + hex_digits
# Don't allow octal constants (even invalid octal).
octal_constant_disallowed = '0[0-9]+'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line
# directives with Windows paths as filenames (..\..\dir\file)
# For the same reason, decimal_escape allows all digit sequences. We want to
# parse all correct code, even if it means to sometimes parse incorrect
# code.
#
simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])"""
decimal_escape = r"""(\d+)"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])"""
escape_sequence = \
r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))'
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|""" + escape_sequence + ')'
string_literal = '"' + string_char + '*"'
bad_string_literal = '"' + string_char + '*' + bad_escape + string_char + '*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = \
'(((('+fractional_constant+')'+ \
exponent_part+'?)|([0-9]+'+exponent_part+')))'
# Ordinals
ordinal = r'@[0-9]+'
missing_ordinal_value = r'@'
# Don't allow ordinal values in octal (even invalid octal, like 09) or
# hexadecimal.
octal_or_hex_ordinal_disallowed = (
r'@((0[0-9]+)|(' + hex_prefix + hex_digits + '))')
##
## Rules for the normal state
##
t_ignore = ' \t\r'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
# Operators
t_MINUS = r'-'
t_PLUS = r'\+'
t_QSTN = r'\?'
# =
t_EQUALS = r'='
# =>
t_RESPONSE = r'=>'
# Delimiters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LANGLE = r'<'
t_RANGLE = r'>'
t_COMMA = r','
t_DOT = r'\.'
t_SEMI = r';'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(octal_constant_disallowed)
def t_OCTAL_CONSTANT_DISALLOWED(self, t):
msg = "Octal values not allowed"
self._error(msg, t)
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
# Handle ordinal-related tokens in the right order:
@TOKEN(octal_or_hex_ordinal_disallowed)
def t_OCTAL_OR_HEX_ORDINAL_DISALLOWED(self, t):
msg = "Octal and hexadecimal ordinal values not allowed"
self._error(msg, t)
@TOKEN(ordinal)
def t_ORDINAL(self, t):
return t
@TOKEN(missing_ordinal_value)
def t_BAD_ORDINAL(self, t):
msg = "Missing ordinal value"
self._error(msg, t)
@TOKEN(identifier)
def t_NAME(self, t):
t.type = self.keyword_map.get(t.value, "NAME")
return t
# Ignore C and C++ style comments
def t_COMMENT(self, t):
r'(/\*(.|\n)*?\*/)|(//.*(\n[ \t]*//.*)*)'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
msg = "Illegal character %s" % repr(t.value[0])
self._error(msg, t)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/ast_unittest.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from mojom.parse import ast
class _TestNode(ast.NodeBase):
"""Node type for tests."""
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def __eq__(self, other):
return super().__eq__(other) and self.value == other.value
class _TestNodeList(ast.NodeListBase):
"""Node list type for tests."""
_list_item_type = _TestNode
class ASTTest(unittest.TestCase):
"""Tests various AST classes."""
def testNodeBase(self):
# Test |__eq__()|; this is only used for testing, where we want to do
# comparison by value and ignore filenames/line numbers (for convenience).
node1 = ast.NodeBase(filename="hello.mojom", lineno=123)
node2 = ast.NodeBase()
self.assertEquals(node1, node2)
self.assertEquals(node2, node1)
# Check that |__ne__()| just defers to |__eq__()| properly.
self.assertFalse(node1 != node2)
self.assertFalse(node2 != node1)
# Check that |filename| and |lineno| are set properly (and are None by
# default).
self.assertEquals(node1.filename, "hello.mojom")
self.assertEquals(node1.lineno, 123)
self.assertIsNone(node2.filename)
self.assertIsNone(node2.lineno)
# |NodeBase|'s |__eq__()| should compare types (and a subclass's |__eq__()|
# should first defer to its superclass's).
node3 = _TestNode(123)
self.assertNotEqual(node1, node3)
self.assertNotEqual(node3, node1)
# Also test |__eq__()| directly.
self.assertFalse(node1 == node3)
self.assertFalse(node3 == node1)
node4 = _TestNode(123, filename="world.mojom", lineno=123)
self.assertEquals(node4, node3)
node5 = _TestNode(456)
self.assertNotEquals(node5, node4)
def testNodeListBase(self):
node1 = _TestNode(1, filename="foo.mojom", lineno=1)
# Equal to, but not the same as, |node1|:
node1b = _TestNode(1, filename="foo.mojom", lineno=1)
node2 = _TestNode(2, filename="foo.mojom", lineno=2)
nodelist1 = _TestNodeList() # Contains: (empty).
self.assertEquals(nodelist1, nodelist1)
self.assertEquals(nodelist1.items, [])
self.assertIsNone(nodelist1.filename)
self.assertIsNone(nodelist1.lineno)
nodelist2 = _TestNodeList(node1) # Contains: 1.
self.assertEquals(nodelist2, nodelist2)
self.assertEquals(nodelist2.items, [node1])
self.assertNotEqual(nodelist2, nodelist1)
self.assertEquals(nodelist2.filename, "foo.mojom")
self.assertEquals(nodelist2.lineno, 1)
nodelist3 = _TestNodeList([node2]) # Contains: 2.
self.assertEquals(nodelist3.items, [node2])
self.assertNotEqual(nodelist3, nodelist1)
self.assertNotEqual(nodelist3, nodelist2)
self.assertEquals(nodelist3.filename, "foo.mojom")
self.assertEquals(nodelist3.lineno, 2)
nodelist1.Append(node1b) # Contains: 1.
self.assertEquals(nodelist1.items, [node1])
self.assertEquals(nodelist1, nodelist2)
self.assertNotEqual(nodelist1, nodelist3)
self.assertEquals(nodelist1.filename, "foo.mojom")
self.assertEquals(nodelist1.lineno, 1)
nodelist1.Append(node2) # Contains: 1, 2.
self.assertEquals(nodelist1.items, [node1, node2])
self.assertNotEqual(nodelist1, nodelist2)
self.assertNotEqual(nodelist1, nodelist3)
self.assertEquals(nodelist1.lineno, 1)
nodelist2.Append(node2) # Contains: 1, 2.
self.assertEquals(nodelist2.items, [node1, node2])
self.assertEquals(nodelist2, nodelist1)
self.assertNotEqual(nodelist2, nodelist3)
self.assertEquals(nodelist2.lineno, 1)
nodelist3.Insert(node1) # Contains: 1, 2.
self.assertEquals(nodelist3.items, [node1, node2])
self.assertEquals(nodelist3, nodelist1)
self.assertEquals(nodelist3, nodelist2)
self.assertEquals(nodelist3.lineno, 1)
# Test iteration:
i = 1
for item in nodelist1:
self.assertEquals(item.value, i)
i += 1
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/conditional_features.py | # Copyright 2018 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helpers for processing conditionally enabled features in a mojom."""
from mojom.error import Error
from mojom.parse import ast
class EnableIfError(Error):
""" Class for errors from ."""
def __init__(self, filename, message, lineno=None):
Error.__init__(self, filename, message, lineno=lineno, addenda=None)
def _IsEnabled(definition, enabled_features):
"""Returns true if a definition is enabled.
A definition is enabled if it has no EnableIf/EnableIfNot attribute.
It is retained if it has an EnableIf attribute and the attribute is in
enabled_features. It is retained if it has an EnableIfNot attribute and the
attribute is not in enabled features.
"""
if not hasattr(definition, "attribute_list"):
return True
if not definition.attribute_list:
return True
already_defined = False
for a in definition.attribute_list:
if a.key == 'EnableIf' or a.key == 'EnableIfNot':
if already_defined:
raise EnableIfError(
definition.filename,
"EnableIf/EnableIfNot attribute may only be set once per field.",
definition.lineno)
already_defined = True
for attribute in definition.attribute_list:
if attribute.key == 'EnableIf' and attribute.value not in enabled_features:
return False
if attribute.key == 'EnableIfNot' and attribute.value in enabled_features:
return False
return True
def _FilterDisabledFromNodeList(node_list, enabled_features):
if not node_list:
return
assert isinstance(node_list, ast.NodeListBase)
node_list.items = [
item for item in node_list.items if _IsEnabled(item, enabled_features)
]
for item in node_list.items:
_FilterDefinition(item, enabled_features)
def _FilterDefinition(definition, enabled_features):
"""Filters definitions with a body."""
if isinstance(definition, ast.Enum):
_FilterDisabledFromNodeList(definition.enum_value_list, enabled_features)
elif isinstance(definition, ast.Method):
_FilterDisabledFromNodeList(definition.parameter_list, enabled_features)
_FilterDisabledFromNodeList(definition.response_parameter_list,
enabled_features)
elif isinstance(definition,
(ast.Interface, ast.Struct, ast.Union, ast.Feature)):
_FilterDisabledFromNodeList(definition.body, enabled_features)
def RemoveDisabledDefinitions(mojom, enabled_features):
"""Removes conditionally disabled definitions from a Mojom node."""
mojom.import_list = ast.ImportList([
imported_file for imported_file in mojom.import_list
if _IsEnabled(imported_file, enabled_features)
])
mojom.definition_list = [
definition for definition in mojom.definition_list
if _IsEnabled(definition, enabled_features)
]
for definition in mojom.definition_list:
_FilterDefinition(definition, enabled_features)
|
0 | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom | repos/libcamera/utils/ipc/mojo/public/tools/mojom/mojom/parse/parser.py | # Copyright 2014 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a syntax tree from a Mojo IDL file."""
# Breaking parser stanzas is unhelpful so allow longer lines.
# pylint: disable=line-too-long
import os.path
import sys
from mojom import fileutil
from mojom.error import Error
from mojom.parse import ast
from mojom.parse.lexer import Lexer
fileutil.AddLocalRepoThirdPartyDirToModulePath()
from ply import lex
from ply import yacc
_MAX_ORDINAL_VALUE = 0xffffffff
_MAX_ARRAY_SIZE = 0xffffffff
class ParseError(Error):
"""Class for errors from the parser."""
def __init__(self, filename, message, lineno=None, snippet=None):
Error.__init__(
self,
filename,
message,
lineno=lineno,
addenda=([snippet] if snippet else None))
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Parser:
def __init__(self, lexer, source, filename):
self.tokens = lexer.tokens
self.source = source
self.filename = filename
# Names of functions
#
# In general, we name functions after the left-hand-side of the rule(s) that
# they handle. E.g., |p_foo_bar| for a rule |foo_bar : ...|.
#
# There may be multiple functions handling rules for the same left-hand-side;
# then we name the functions |p_foo_bar_N| (for left-hand-side |foo_bar|),
# where N is a number (numbered starting from 1). Note that using multiple
# functions is actually more efficient than having single functions handle
# multiple rules (and, e.g., distinguishing them by examining |len(p)|).
#
# It's also possible to have a function handling multiple rules with different
# left-hand-sides. We do not do this.
#
# See http://www.dabeaz.com/ply/ply.html#ply_nn25 for more details.
# TODO(vtl): Get rid of the braces in the module "statement". (Consider
# renaming "module" -> "package".) Then we'll be able to have a single rule
# for root (by making module "optional").
def p_root_1(self, p):
"""root : """
p[0] = ast.Mojom(None, ast.ImportList(), [])
def p_root_2(self, p):
"""root : root module"""
if p[1].module is not None:
raise ParseError(
self.filename,
"Multiple \"module\" statements not allowed:",
p[2].lineno,
snippet=self._GetSnippet(p[2].lineno))
if p[1].import_list.items or p[1].definition_list:
raise ParseError(
self.filename,
"\"module\" statements must precede imports and definitions:",
p[2].lineno,
snippet=self._GetSnippet(p[2].lineno))
p[0] = p[1]
p[0].module = p[2]
def p_root_3(self, p):
"""root : root import"""
if p[1].definition_list:
raise ParseError(
self.filename,
"\"import\" statements must precede definitions:",
p[2].lineno,
snippet=self._GetSnippet(p[2].lineno))
p[0] = p[1]
p[0].import_list.Append(p[2])
def p_root_4(self, p):
"""root : root definition"""
p[0] = p[1]
p[0].definition_list.append(p[2])
def p_import(self, p):
"""import : attribute_section IMPORT STRING_LITERAL SEMI"""
# 'eval' the literal to strip the quotes.
# TODO(vtl): This eval is dubious. We should unquote/unescape ourselves.
p[0] = ast.Import(
p[1], eval(p[3]), filename=self.filename, lineno=p.lineno(2))
def p_module(self, p):
"""module : attribute_section MODULE identifier_wrapped SEMI"""
p[0] = ast.Module(p[3], p[1], filename=self.filename, lineno=p.lineno(2))
def p_definition(self, p):
"""definition : struct
| union
| interface
| enum
| const
| feature"""
p[0] = p[1]
def p_attribute_section_1(self, p):
"""attribute_section : """
p[0] = None
def p_attribute_section_2(self, p):
"""attribute_section : LBRACKET attribute_list RBRACKET"""
p[0] = p[2]
def p_attribute_list_1(self, p):
"""attribute_list : """
p[0] = ast.AttributeList()
def p_attribute_list_2(self, p):
"""attribute_list : nonempty_attribute_list"""
p[0] = p[1]
def p_nonempty_attribute_list_1(self, p):
"""nonempty_attribute_list : attribute"""
p[0] = ast.AttributeList(p[1])
def p_nonempty_attribute_list_2(self, p):
"""nonempty_attribute_list : nonempty_attribute_list COMMA attribute"""
p[0] = p[1]
p[0].Append(p[3])
def p_attribute_1(self, p):
"""attribute : name_wrapped EQUALS identifier_wrapped"""
p[0] = ast.Attribute(p[1],
p[3][1],
filename=self.filename,
lineno=p.lineno(1))
def p_attribute_2(self, p):
"""attribute : name_wrapped EQUALS evaled_literal
| name_wrapped EQUALS name_wrapped"""
p[0] = ast.Attribute(p[1], p[3], filename=self.filename, lineno=p.lineno(1))
def p_attribute_3(self, p):
"""attribute : name_wrapped"""
p[0] = ast.Attribute(p[1], True, filename=self.filename, lineno=p.lineno(1))
def p_evaled_literal(self, p):
"""evaled_literal : literal"""
# 'eval' the literal to strip the quotes. Handle keywords "true" and "false"
# specially since they cannot directly be evaluated to python boolean
# values.
if p[1] == "true":
p[0] = True
elif p[1] == "false":
p[0] = False
else:
p[0] = eval(p[1])
def p_struct_1(self, p):
"""struct : attribute_section STRUCT name_wrapped LBRACE struct_body RBRACE SEMI"""
p[0] = ast.Struct(p[3], p[1], p[5])
def p_struct_2(self, p):
"""struct : attribute_section STRUCT name_wrapped SEMI"""
p[0] = ast.Struct(p[3], p[1], None)
def p_struct_body_1(self, p):
"""struct_body : """
p[0] = ast.StructBody()
def p_struct_body_2(self, p):
"""struct_body : struct_body const
| struct_body enum
| struct_body struct_field"""
p[0] = p[1]
p[0].Append(p[2])
def p_struct_field(self, p):
"""struct_field : attribute_section typename name_wrapped ordinal default SEMI"""
p[0] = ast.StructField(p[3], p[1], p[4], p[2], p[5])
def p_feature(self, p):
"""feature : attribute_section FEATURE NAME LBRACE feature_body RBRACE SEMI"""
p[0] = ast.Feature(p[3], p[1], p[5])
def p_feature_body_1(self, p):
"""feature_body : """
p[0] = ast.FeatureBody()
def p_feature_body_2(self, p):
"""feature_body : feature_body const"""
p[0] = p[1]
p[0].Append(p[2])
def p_union(self, p):
"""union : attribute_section UNION name_wrapped LBRACE union_body RBRACE SEMI"""
p[0] = ast.Union(p[3], p[1], p[5])
def p_union_body_1(self, p):
"""union_body : """
p[0] = ast.UnionBody()
def p_union_body_2(self, p):
"""union_body : union_body union_field"""
p[0] = p[1]
p[1].Append(p[2])
def p_union_field(self, p):
"""union_field : attribute_section typename name_wrapped ordinal SEMI"""
p[0] = ast.UnionField(p[3], p[1], p[4], p[2])
def p_default_1(self, p):
"""default : """
p[0] = None
def p_default_2(self, p):
"""default : EQUALS constant"""
p[0] = p[2]
def p_interface(self, p):
"""interface : attribute_section INTERFACE name_wrapped LBRACE interface_body RBRACE SEMI"""
p[0] = ast.Interface(p[3], p[1], p[5])
def p_interface_body_1(self, p):
"""interface_body : """
p[0] = ast.InterfaceBody()
def p_interface_body_2(self, p):
"""interface_body : interface_body const
| interface_body enum
| interface_body method"""
p[0] = p[1]
p[0].Append(p[2])
def p_response_1(self, p):
"""response : """
p[0] = None
def p_response_2(self, p):
"""response : RESPONSE LPAREN parameter_list RPAREN"""
p[0] = p[3]
def p_method(self, p):
"""method : attribute_section name_wrapped ordinal LPAREN parameter_list RPAREN response SEMI"""
p[0] = ast.Method(p[2], p[1], p[3], p[5], p[7])
def p_parameter_list_1(self, p):
"""parameter_list : """
p[0] = ast.ParameterList()
def p_parameter_list_2(self, p):
"""parameter_list : nonempty_parameter_list"""
p[0] = p[1]
def p_nonempty_parameter_list_1(self, p):
"""nonempty_parameter_list : parameter"""
p[0] = ast.ParameterList(p[1])
def p_nonempty_parameter_list_2(self, p):
"""nonempty_parameter_list : nonempty_parameter_list COMMA parameter"""
p[0] = p[1]
p[0].Append(p[3])
def p_parameter(self, p):
"""parameter : attribute_section typename name_wrapped ordinal"""
p[0] = ast.Parameter(
p[3], p[1], p[4], p[2], filename=self.filename, lineno=p.lineno(3))
def p_typename(self, p):
"""typename : nonnullable_typename QSTN
| nonnullable_typename"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] + "?"
def p_nonnullable_typename(self, p):
"""nonnullable_typename : basictypename
| array
| fixed_array
| associative_array"""
p[0] = p[1]
def p_basictypename(self, p):
"""basictypename : remotetype
| receivertype
| associatedremotetype
| associatedreceivertype
| identifier
| ASSOCIATED identifier
| handletype"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = "asso<" + p[2] + ">"
def p_remotetype(self, p):
"""remotetype : PENDING_REMOTE LANGLE identifier RANGLE"""
p[0] = "rmt<%s>" % p[3]
def p_receivertype(self, p):
"""receivertype : PENDING_RECEIVER LANGLE identifier RANGLE"""
p[0] = "rcv<%s>" % p[3]
def p_associatedremotetype(self, p):
"""associatedremotetype : PENDING_ASSOCIATED_REMOTE LANGLE identifier RANGLE"""
p[0] = "rma<%s>" % p[3]
def p_associatedreceivertype(self, p):
"""associatedreceivertype : PENDING_ASSOCIATED_RECEIVER LANGLE identifier RANGLE"""
p[0] = "rca<%s>" % p[3]
def p_handletype(self, p):
"""handletype : HANDLE
| HANDLE LANGLE name_wrapped RANGLE"""
if len(p) == 2:
p[0] = p[1]
else:
if p[3] not in ('data_pipe_consumer', 'data_pipe_producer',
'message_pipe', 'shared_buffer', 'platform'):
# Note: We don't enable tracking of line numbers for everything, so we
# can't use |p.lineno(3)|.
raise ParseError(
self.filename,
"Invalid handle type %r:" % p[3],
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = "handle<" + p[3] + ">"
def p_array(self, p):
"""array : ARRAY LANGLE typename RANGLE"""
p[0] = p[3] + "[]"
def p_fixed_array(self, p):
"""fixed_array : ARRAY LANGLE typename COMMA INT_CONST_DEC RANGLE"""
value = int(p[5])
if value == 0 or value > _MAX_ARRAY_SIZE:
raise ParseError(
self.filename,
"Fixed array size %d invalid:" % value,
lineno=p.lineno(5),
snippet=self._GetSnippet(p.lineno(5)))
p[0] = p[3] + "[" + p[5] + "]"
def p_associative_array(self, p):
"""associative_array : MAP LANGLE identifier COMMA typename RANGLE"""
p[0] = p[5] + "{" + p[3] + "}"
def p_ordinal_1(self, p):
"""ordinal : """
p[0] = None
def p_ordinal_2(self, p):
"""ordinal : ORDINAL"""
value = int(p[1][1:])
if value > _MAX_ORDINAL_VALUE:
raise ParseError(
self.filename,
"Ordinal value %d too large:" % value,
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = ast.Ordinal(value, filename=self.filename, lineno=p.lineno(1))
def p_enum_1(self, p):
"""enum : attribute_section ENUM name_wrapped LBRACE enum_value_list RBRACE SEMI
| attribute_section ENUM name_wrapped LBRACE \
nonempty_enum_value_list COMMA RBRACE SEMI"""
p[0] = ast.Enum(
p[3], p[1], p[5], filename=self.filename, lineno=p.lineno(2))
def p_enum_2(self, p):
"""enum : attribute_section ENUM name_wrapped SEMI"""
p[0] = ast.Enum(
p[3], p[1], None, filename=self.filename, lineno=p.lineno(2))
def p_enum_value_list_1(self, p):
"""enum_value_list : """
p[0] = ast.EnumValueList()
def p_enum_value_list_2(self, p):
"""enum_value_list : nonempty_enum_value_list"""
p[0] = p[1]
def p_nonempty_enum_value_list_1(self, p):
"""nonempty_enum_value_list : enum_value"""
p[0] = ast.EnumValueList(p[1])
def p_nonempty_enum_value_list_2(self, p):
"""nonempty_enum_value_list : nonempty_enum_value_list COMMA enum_value"""
p[0] = p[1]
p[0].Append(p[3])
def p_enum_value(self, p):
"""enum_value : attribute_section name_wrapped
| attribute_section name_wrapped EQUALS int
| attribute_section name_wrapped EQUALS identifier_wrapped"""
p[0] = ast.EnumValue(
p[2],
p[1],
p[4] if len(p) == 5 else None,
filename=self.filename,
lineno=p.lineno(2))
def p_const(self, p):
"""const : attribute_section CONST typename name_wrapped EQUALS constant SEMI"""
p[0] = ast.Const(p[4], p[1], p[3], p[6])
def p_constant(self, p):
"""constant : literal
| identifier_wrapped"""
p[0] = p[1]
def p_identifier_wrapped(self, p):
"""identifier_wrapped : identifier"""
p[0] = ('IDENTIFIER', p[1])
# TODO(vtl): Make this produce a "wrapped" identifier (probably as an
# |ast.Identifier|, to be added) and get rid of identifier_wrapped.
def p_identifier(self, p):
"""identifier : name_wrapped
| name_wrapped DOT identifier"""
p[0] = ''.join(p[1:])
# Allow 'feature' to be a name literal not just a keyword.
def p_name_wrapped(self, p):
"""name_wrapped : NAME
| FEATURE"""
p[0] = p[1]
def p_literal(self, p):
"""literal : int
| float
| TRUE
| FALSE
| DEFAULT
| STRING_LITERAL"""
p[0] = p[1]
def p_int(self, p):
"""int : int_const
| PLUS int_const
| MINUS int_const"""
p[0] = ''.join(p[1:])
def p_int_const(self, p):
"""int_const : INT_CONST_DEC
| INT_CONST_HEX"""
p[0] = p[1]
def p_float(self, p):
"""float : FLOAT_CONST
| PLUS FLOAT_CONST
| MINUS FLOAT_CONST"""
p[0] = ''.join(p[1:])
def p_error(self, e):
if e is None:
# Unexpected EOF.
# TODO(vtl): Can we figure out what's missing?
raise ParseError(self.filename, "Unexpected end of file")
if e.value == 'feature':
raise ParseError(self.filename,
"`feature` is reserved for a future mojom keyword",
lineno=e.lineno,
snippet=self._GetSnippet(e.lineno))
raise ParseError(
self.filename,
"Unexpected %r:" % e.value,
lineno=e.lineno,
snippet=self._GetSnippet(e.lineno))
def _GetSnippet(self, lineno):
return self.source.split('\n')[lineno - 1]
def Parse(source, filename):
"""Parse source file to AST.
Args:
source: The source text as a str (Python 2 or 3) or unicode (Python 2).
filename: The filename that |source| originates from.
Returns:
The AST as a mojom.parse.ast.Mojom object.
"""
lexer = Lexer(filename)
parser = Parser(lexer, source, filename)
lex.lex(object=lexer)
yacc.yacc(module=parser, debug=0, write_tables=0)
tree = yacc.parse(source)
return tree
|
0 | repos/libcamera/utils/ipc | repos/libcamera/utils/ipc/generators/mojom_libcamera_generator.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Generates libcamera files from a mojom.Module.
import argparse
import datetime
import os
import re
import mojom.fileutil as fileutil
import mojom.generate.generator as generator
import mojom.generate.module as mojom
from mojom.generate.template_expander import UseJinja
GENERATOR_PREFIX = 'libcamera'
_kind_to_cpp_type = {
mojom.BOOL: 'bool',
mojom.INT8: 'int8_t',
mojom.UINT8: 'uint8_t',
mojom.INT16: 'int16_t',
mojom.UINT16: 'uint16_t',
mojom.INT32: 'int32_t',
mojom.UINT32: 'uint32_t',
mojom.FLOAT: 'float',
mojom.INT64: 'int64_t',
mojom.UINT64: 'uint64_t',
mojom.DOUBLE: 'double',
}
_bit_widths = {
mojom.BOOL: '8',
mojom.INT8: '8',
mojom.UINT8: '8',
mojom.INT16: '16',
mojom.UINT16: '16',
mojom.INT32: '32',
mojom.UINT32: '32',
mojom.FLOAT: '32',
mojom.INT64: '64',
mojom.UINT64: '64',
mojom.DOUBLE: '64',
}
def ModuleName(path):
return path.split('/')[-1].split('.')[0]
def ModuleClassName(module):
return re.sub(r'^IPA(.*)Interface$', lambda match: match.group(1),
module.interfaces[0].mojom_name)
def Capitalize(name):
return name[0].upper() + name[1:]
def ConstantStyle(name):
return generator.ToUpperSnakeCase(name)
def Choose(cond, t, f):
return t if cond else f
def CommaSep(l):
return ', '.join([m for m in l])
def ParamsCommaSep(l):
return ', '.join([m.mojom_name for m in l])
def GetDefaultValue(element):
if element.default is not None:
return element.default
if type(element.kind) == mojom.ValueKind:
return '0'
if IsFlags(element):
return ''
if mojom.IsEnumKind(element.kind):
return f'static_cast<{element.kind.mojom_name}>(0)'
if isinstance(element.kind, mojom.Struct) and \
element.kind.mojom_name == 'SharedFD':
return '-1'
return ''
def HasDefaultValue(element):
return GetDefaultValue(element) != ''
def HasDefaultFields(element):
return True in [HasDefaultValue(x) for x in element.fields]
def GetAllTypes(element):
if mojom.IsArrayKind(element):
return GetAllTypes(element.kind)
if mojom.IsMapKind(element):
return GetAllTypes(element.key_kind) + GetAllTypes(element.value_kind)
if isinstance(element, mojom.Parameter):
return GetAllTypes(element.kind)
if mojom.IsEnumKind(element):
return [element.mojom_name]
if not mojom.IsStructKind(element):
return [element.spec]
if len(element.fields) == 0:
return [element.mojom_name]
ret = [GetAllTypes(x.kind) for x in element.fields]
ret = [x for sublist in ret for x in sublist]
return list(set(ret))
def GetAllAttrs(element):
if mojom.IsArrayKind(element):
return GetAllAttrs(element.kind)
if mojom.IsMapKind(element):
return {**GetAllAttrs(element.key_kind), **GetAllAttrs(element.value_kind)}
if isinstance(element, mojom.Parameter):
return GetAllAttrs(element.kind)
if mojom.IsEnumKind(element):
return element.attributes if element.attributes is not None else {}
if mojom.IsStructKind(element) and len(element.fields) == 0:
return element.attributes if element.attributes is not None else {}
if not mojom.IsStructKind(element):
if hasattr(element, 'attributes'):
return element.attributes or {}
return {}
attrs = [(x.attributes) for x in element.fields]
ret = {}
for d in attrs:
ret.update(d or {})
if hasattr(element, 'attributes'):
ret.update(element.attributes or {})
return ret
def NeedsControlSerializer(element):
types = GetAllTypes(element)
for type in ['ControlList', 'ControlInfoMap']:
if f'x:{type}' in types:
raise Exception(f'Unknown type "{type}" in {element.mojom_name}, did you mean "libcamera.{type}"?')
return "ControlList" in types or "ControlInfoMap" in types
def HasFd(element):
attrs = GetAllAttrs(element)
if isinstance(element, mojom.Kind):
types = GetAllTypes(element)
else:
types = GetAllTypes(element.kind)
return "SharedFD" in types or (attrs is not None and "hasFd" in attrs)
def WithDefaultValues(element):
return [x for x in element if HasDefaultValue(x)]
def WithFds(element):
return [x for x in element if HasFd(x)]
def MethodParamInputs(method):
return method.parameters
def MethodParamOutputs(method):
if method.response_parameters is None:
return []
if MethodReturnValue(method) == 'void':
return method.response_parameters
if len(method.response_parameters) <= 1:
return []
return method.response_parameters[1:]
def MethodParamsHaveFd(parameters):
return len([x for x in parameters if HasFd(x)]) > 0
def MethodInputHasFd(method):
return MethodParamsHaveFd(method.parameters)
def MethodOutputHasFd(method):
return MethodParamsHaveFd(MethodParamOutputs(method))
def MethodParamNames(method):
params = []
for param in method.parameters:
params.append(param.mojom_name)
for param in MethodParamOutputs(method):
params.append(param.mojom_name)
return params
def MethodParameters(method):
params = []
for param in method.parameters:
params.append('const %s %s%s' % (GetNameForElement(param),
'' if IsPod(param) or IsEnum(param) else '&',
param.mojom_name))
for param in MethodParamOutputs(method):
params.append(f'{GetNameForElement(param)} *{param.mojom_name}')
return params
def MethodReturnValue(method):
if method.response_parameters is None or len(method.response_parameters) == 0:
return 'void'
first_output = method.response_parameters[0]
if ((len(method.response_parameters) == 1 and IsPod(first_output)) or
first_output.kind == mojom.INT32):
return GetNameForElement(first_output)
return 'void'
def IsAsync(method):
# Events are always async
if re.match("^IPA.*EventInterface$", method.interface.mojom_name):
return True
elif re.match("^IPA.*Interface$", method.interface.mojom_name):
if method.attributes is None:
return False
elif 'async' in method.attributes and method.attributes['async']:
return True
return False
def IsArray(element):
return mojom.IsArrayKind(element.kind)
def IsControls(element):
return mojom.IsStructKind(element.kind) and (element.kind.mojom_name == "ControlList" or
element.kind.mojom_name == "ControlInfoMap")
def IsEnum(element):
return mojom.IsEnumKind(element.kind)
# Only works the enum definition, not types
def IsScoped(element):
attributes = getattr(element, 'attributes', None)
if not attributes:
return False
return 'scopedEnum' in attributes
def IsEnumScoped(element):
if not IsEnum(element):
return False
return IsScoped(element.kind)
def IsFd(element):
return mojom.IsStructKind(element.kind) and element.kind.mojom_name == "SharedFD"
def IsFlags(element):
attributes = getattr(element, 'attributes', None)
if not attributes:
return False
return 'flags' in attributes
def IsMap(element):
return mojom.IsMapKind(element.kind)
def IsPlainStruct(element):
return mojom.IsStructKind(element.kind) and not IsControls(element) and not IsFd(element)
def IsPod(element):
return element.kind in _kind_to_cpp_type
def IsStr(element):
return element.kind.spec == 's'
def BitWidth(element):
if element.kind in _bit_widths:
return _bit_widths[element.kind]
if mojom.IsEnumKind(element.kind):
return '32'
return ''
def ByteWidthFromCppType(t):
key = None
for mojo_type, cpp_type in _kind_to_cpp_type.items():
if t == cpp_type:
key = mojo_type
if key is None:
raise Exception('invalid type')
return str(int(_bit_widths[key]) // 8)
# Get the type name for a given element
def GetNameForElement(element):
# Flags
if IsFlags(element):
return f'Flags<{GetFullNameForElement(element.kind)}>'
# structs
if (mojom.IsEnumKind(element) or
mojom.IsInterfaceKind(element) or
mojom.IsStructKind(element)):
return element.mojom_name
# vectors
if (mojom.IsArrayKind(element)):
elem_name = GetFullNameForElement(element.kind)
return f'std::vector<{elem_name}>'
# maps
if (mojom.IsMapKind(element)):
key_name = GetFullNameForElement(element.key_kind)
value_name = GetFullNameForElement(element.value_kind)
return f'std::map<{key_name}, {value_name}>'
# struct fields and function parameters
if isinstance(element, (mojom.Field, mojom.Method, mojom.Parameter)):
# maps and vectors
if (mojom.IsArrayKind(element.kind) or mojom.IsMapKind(element.kind)):
return GetNameForElement(element.kind)
# strings
if (mojom.IsReferenceKind(element.kind) and element.kind.spec == 's'):
return 'std::string'
# PODs
if element.kind in _kind_to_cpp_type:
return _kind_to_cpp_type[element.kind]
# structs and enums
return element.kind.mojom_name
# PODs that are members of vectors/maps
if (hasattr(element, '__hash__') and element in _kind_to_cpp_type):
return _kind_to_cpp_type[element]
if (hasattr(element, 'spec')):
# strings that are members of vectors/maps
if (element.spec == 's'):
return 'std::string'
# structs that aren't defined in mojom that are members of vectors/maps
if (element.spec[0] == 'x'):
return element.spec.replace('x:', '').replace('.', '::')
if (mojom.IsInterfaceRequestKind(element) or
mojom.IsAssociatedKind(element) or
mojom.IsPendingRemoteKind(element) or
mojom.IsPendingReceiverKind(element) or
mojom.IsUnionKind(element)):
raise Exception('Unsupported element: %s' % element)
raise Exception('Unexpected element: %s' % element)
def GetFullNameForElement(element):
name = GetNameForElement(element)
namespace_str = ''
if (mojom.IsStructKind(element) or mojom.IsEnumKind(element)):
namespace_str = element.module.mojom_namespace.replace('.', '::')
elif (hasattr(element, 'kind') and
(mojom.IsStructKind(element.kind) or mojom.IsEnumKind(element.kind))):
namespace_str = element.kind.module.mojom_namespace.replace('.', '::')
if namespace_str == '':
return name
if IsFlags(element):
return GetNameForElement(element)
return f'{namespace_str}::{name}'
def ValidateZeroLength(l, s, cap=True):
if l is None:
return
if len(l) > 0:
raise Exception(f'{s.capitalize() if cap else s} should be empty')
def ValidateSingleLength(l, s, cap=True):
if len(l) > 1:
raise Exception(f'Only one {s} allowed')
if len(l) < 1:
raise Exception(f'{s.capitalize() if cap else s} is required')
def GetMainInterface(interfaces):
intf = [x for x in interfaces
if re.match("^IPA.*Interface", x.mojom_name) and
not re.match("^IPA.*EventInterface", x.mojom_name)]
ValidateSingleLength(intf, 'main interface')
return None if len(intf) == 0 else intf[0]
def GetEventInterface(interfaces):
event = [x for x in interfaces if re.match("^IPA.*EventInterface", x.mojom_name)]
ValidateSingleLength(event, 'event interface')
return None if len(event) == 0 else event[0]
def ValidateNamespace(namespace):
if namespace == '':
raise Exception('Must have a namespace')
if not re.match(r'^ipa\.[0-9A-Za-z_]+', namespace):
raise Exception('Namespace must be of the form "ipa.{pipeline_name}"')
def ValidateInterfaces(interfaces):
# Validate presence of main interface
intf = GetMainInterface(interfaces)
if intf is None:
raise Exception('Must have main IPA interface')
# Validate presence of event interface
event = GetEventInterface(interfaces)
if intf is None:
raise Exception('Must have event IPA interface')
# Validate required main interface functions
f_init = [x for x in intf.methods if x.mojom_name == 'init']
f_start = [x for x in intf.methods if x.mojom_name == 'start']
f_stop = [x for x in intf.methods if x.mojom_name == 'stop']
ValidateSingleLength(f_init, 'init()', False)
ValidateSingleLength(f_start, 'start()', False)
ValidateSingleLength(f_stop, 'stop()', False)
f_stop = f_stop[0]
# No need to validate init() and start() as they are customizable
# Validate parameters to stop()
ValidateZeroLength(f_stop.parameters, 'input parameter to stop()')
ValidateZeroLength(f_stop.parameters, 'output parameter from stop()')
# Validate that event interface has at least one event
if len(event.methods) < 1:
raise Exception('Event interface must have at least one event')
# Validate that all async methods don't have return values
intf_methods_async = [x for x in intf.methods if IsAsync(x)]
for method in intf_methods_async:
ValidateZeroLength(method.response_parameters,
f'{method.mojom_name} response parameters', False)
event_methods_async = [x for x in event.methods if IsAsync(x)]
for method in event_methods_async:
ValidateZeroLength(method.response_parameters,
f'{method.mojom_name} response parameters', False)
class Generator(generator.Generator):
@staticmethod
def GetTemplatePrefix():
return 'libcamera_templates'
def GetFilters(self):
libcamera_filters = {
'all_types': GetAllTypes,
'bit_width': BitWidth,
'byte_width' : ByteWidthFromCppType,
'cap': Capitalize,
'choose': Choose,
'comma_sep': CommaSep,
'default_value': GetDefaultValue,
'has_default_fields': HasDefaultFields,
'has_fd': HasFd,
'is_async': IsAsync,
'is_array': IsArray,
'is_controls': IsControls,
'is_enum': IsEnum,
'is_enum_scoped': IsEnumScoped,
'is_fd': IsFd,
'is_flags': IsFlags,
'is_map': IsMap,
'is_plain_struct': IsPlainStruct,
'is_pod': IsPod,
'is_scoped': IsScoped,
'is_str': IsStr,
'method_input_has_fd': MethodInputHasFd,
'method_output_has_fd': MethodOutputHasFd,
'method_param_names': MethodParamNames,
'method_param_inputs': MethodParamInputs,
'method_param_outputs': MethodParamOutputs,
'method_parameters': MethodParameters,
'method_return_value': MethodReturnValue,
'name': GetNameForElement,
'name_full': GetFullNameForElement,
'needs_control_serializer': NeedsControlSerializer,
'params_comma_sep': ParamsCommaSep,
'with_default_values': WithDefaultValues,
'with_fds': WithFds,
}
return libcamera_filters
def _GetJinjaExports(self):
return {
'cmd_enum_name': '_%sCmd' % self.module_name,
'cmd_event_enum_name': '_%sEventCmd' % self.module_name,
'consts': self.module.constants,
'enums': self.module.enums,
'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0,
'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0,
'has_namespace': self.module.mojom_namespace != '',
'interface_event': GetEventInterface(self.module.interfaces),
'interface_main': GetMainInterface(self.module.interfaces),
'interface_name': 'IPA%sInterface' % self.module_name,
'module_name': ModuleName(self.module.path),
'namespace': self.module.mojom_namespace.split('.'),
'namespace_str': self.module.mojom_namespace.replace('.', '::') if
self.module.mojom_namespace is not None else '',
'proxy_name': 'IPAProxy%s' % self.module_name,
'proxy_worker_name': 'IPAProxy%sWorker' % self.module_name,
'structs_nonempty': [x for x in self.module.structs if len(x.fields) > 0],
}
def _GetJinjaExportsForCore(self):
return {
'consts': self.module.constants,
'enums_gen_header': [x for x in self.module.enums if x.attributes is None or 'skipHeader' not in x.attributes],
'has_array': len([x for x in self.module.kinds.keys() if x[0] == 'a']) > 0,
'has_map': len([x for x in self.module.kinds.keys() if x[0] == 'm']) > 0,
'structs_gen_header': [x for x in self.module.structs if x.attributes is None or 'skipHeader' not in x.attributes],
'structs_gen_serializer': [x for x in self.module.structs if x.attributes is None or 'skipSerdes' not in x.attributes],
}
@UseJinja('core_ipa_interface.h.tmpl')
def _GenerateCoreHeader(self):
return self._GetJinjaExportsForCore()
@UseJinja('core_ipa_serializer.h.tmpl')
def _GenerateCoreSerializer(self):
return self._GetJinjaExportsForCore()
@UseJinja('module_ipa_interface.h.tmpl')
def _GenerateDataHeader(self):
return self._GetJinjaExports()
@UseJinja('module_ipa_serializer.h.tmpl')
def _GenerateSerializer(self):
return self._GetJinjaExports()
@UseJinja('module_ipa_proxy.cpp.tmpl')
def _GenerateProxyCpp(self):
return self._GetJinjaExports()
@UseJinja('module_ipa_proxy.h.tmpl')
def _GenerateProxyHeader(self):
return self._GetJinjaExports()
@UseJinja('module_ipa_proxy_worker.cpp.tmpl')
def _GenerateProxyWorker(self):
return self._GetJinjaExports()
def GenerateFiles(self, unparsed_args):
parser = argparse.ArgumentParser()
parser.add_argument('--libcamera_generate_core_header', action='store_true')
parser.add_argument('--libcamera_generate_core_serializer', action='store_true')
parser.add_argument('--libcamera_generate_header', action='store_true')
parser.add_argument('--libcamera_generate_serializer', action='store_true')
parser.add_argument('--libcamera_generate_proxy_cpp', action='store_true')
parser.add_argument('--libcamera_generate_proxy_h', action='store_true')
parser.add_argument('--libcamera_generate_proxy_worker', action='store_true')
parser.add_argument('--libcamera_output_path')
args = parser.parse_args(unparsed_args)
if not args.libcamera_generate_core_header and \
not args.libcamera_generate_core_serializer:
ValidateNamespace(self.module.mojom_namespace)
ValidateInterfaces(self.module.interfaces)
self.module_name = ModuleClassName(self.module)
fileutil.EnsureDirectoryExists(os.path.dirname(args.libcamera_output_path))
gen_funcs = [
[args.libcamera_generate_core_header, self._GenerateCoreHeader],
[args.libcamera_generate_core_serializer, self._GenerateCoreSerializer],
[args.libcamera_generate_header, self._GenerateDataHeader],
[args.libcamera_generate_serializer, self._GenerateSerializer],
[args.libcamera_generate_proxy_cpp, self._GenerateProxyCpp],
[args.libcamera_generate_proxy_h, self._GenerateProxyHeader],
[args.libcamera_generate_proxy_worker, self._GenerateProxyWorker],
]
for pair in gen_funcs:
if pair[0]:
self.Write(pair[1](), args.libcamera_output_path)
|
0 | repos/libcamera/utils/ipc/tools | repos/libcamera/utils/ipc/tools/diagnosis/crbug_1001171.py | # Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper context wrapper for diagnosing crbug.com/1001171.
This module and all uses thereof can and should be removed once
crbug.com/1001171 has been resolved.
"""
from __future__ import print_function
import contextlib
import os
import sys
@contextlib.contextmanager
def DumpStateOnLookupError():
"""Prints potentially useful state info in the event of a LookupError."""
try:
yield
except LookupError:
print('LookupError diagnosis for crbug.com/1001171:')
for path_index, path_entry in enumerate(sys.path):
desc = 'unknown'
if not os.path.exists(path_entry):
desc = 'missing'
elif os.path.islink(path_entry):
desc = 'link -> %s' % os.path.realpath(path_entry)
elif os.path.isfile(path_entry):
desc = 'file'
elif os.path.isdir(path_entry):
desc = 'dir'
print(' sys.path[%d]: %s (%s)' % (path_index, path_entry, desc))
real_path_entry = os.path.realpath(path_entry)
if (path_entry.endswith(os.path.join('lib', 'python2.7'))
and os.path.isdir(real_path_entry)):
encodings_dir = os.path.realpath(
os.path.join(real_path_entry, 'encodings'))
if os.path.exists(encodings_dir):
if os.path.isdir(encodings_dir):
print(' %s contents: %s' % (encodings_dir,
str(os.listdir(encodings_dir))))
else:
print(' %s exists but is not a directory' % encodings_dir)
else:
print(' %s missing' % encodings_dir)
raise
|
0 | repos/libcamera/utils | repos/libcamera/utils/tracepoints/gen-tp-header.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Generate header file to contain lttng tracepoints
import datetime
import jinja2
import pathlib
import os
import sys
def main(argv):
if len(argv) < 4:
print(f'Usage: {argv[0]} include_build_dir output template tp_files...')
return 1
output = argv[2]
template = argv[3]
year = datetime.datetime.now().year
path = pathlib.Path(output).absolute().relative_to(argv[1])
source = ''
for fname in argv[4:]:
source += open(fname, 'r', encoding='utf-8').read() + '\n\n'
template = jinja2.Template(open(template, 'r', encoding='utf-8').read())
string = template.render(year=year, path=path, source=source)
f = open(output, 'w', encoding='utf-8').write(string)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera/utils | repos/libcamera/utils/tracepoints/analyze-ipa-trace.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Paul Elder <[email protected]>
#
# Example of how to extract information from libcamera lttng traces
import argparse
import bt2
import statistics as stats
import sys
# pipeline -> {function -> stack(timestamps)}
timestamps = {}
# pipeline:function -> samples[]
samples = {}
def main(argv):
parser = argparse.ArgumentParser(
description='A simple analysis script to get statistics on time taken for IPA calls')
parser.add_argument('-p', '--pipeline', type=str,
help='Name of pipeline to filter for')
parser.add_argument('trace_path', type=str,
help='Path to lttng trace (eg. ~/lttng-traces/demo-20201029-184003)')
args = parser.parse_args(argv[1:])
traces = bt2.TraceCollectionMessageIterator(args.trace_path)
for msg in traces:
if type(msg) is not bt2._EventMessageConst or \
'pipeline_name' not in msg.event.payload_field or \
(args.pipeline is not None and \
msg.event.payload_field['pipeline_name'] != args.pipeline):
continue
pipeline = msg.event.payload_field['pipeline_name']
event = msg.event.name
func = msg.event.payload_field['function_name']
timestamp_ns = msg.default_clock_snapshot.ns_from_origin
if event == 'libcamera:ipa_call_begin':
if pipeline not in timestamps:
timestamps[pipeline] = {}
if func not in timestamps[pipeline]:
timestamps[pipeline][func] = []
timestamps[pipeline][func].append(timestamp_ns)
if event == 'libcamera:ipa_call_end':
ts = timestamps[pipeline][func].pop()
key = f'{pipeline}:{func}'
if key not in samples:
samples[key] = []
samples[key].append(timestamp_ns - ts)
# Compute stats
rows = []
rows.append(['pipeline:function', 'min', 'max', 'mean', 'stddev'])
for k, v in samples.items():
mean = int(stats.mean(v))
stddev = int(stats.stdev(v))
minv = min(v)
maxv = max(v)
rows.append([k, str(minv), str(maxv), str(mean), str(stddev)])
# Get maximum string width for every column
widths = []
for i in range(len(rows[0])):
widths.append(max([len(row[i]) for row in rows]))
# Print stats table
for row in rows:
fmt = [row[i].rjust(widths[i]) for i in range(1, 5)]
print('{} {} {} {} {}'.format(row[0].ljust(widths[0]), *fmt))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera/utils | repos/libcamera/utils/raspberrypi/delayedctrls_parse.py | # SPDX-License-Identifier: BSD-2-Clause
import re
import sys
import os
if len(sys.argv) != 2:
print("Usage: {} <infile>".format(sys.argv[0]))
sys.exit()
infile = sys.argv[1]
insplit = os.path.splitext(infile)
outfile = insplit[0] + '_parsed' + insplit[1]
frame_re = re.compile(r'frame (\d+) started')
delays = {
'Analogue Gain': 1,
'Exposure': 2,
'Vertical Blanking': 2
}
ctrl_action = {
'Write': {},
'Get': {},
'Queue': {},
'No-op': {}
}
ctrl_re = {
'Write': re.compile(r'Setting (.*?) to (\d+) at index (\d+)'),
'No-op': re.compile(r'Queue is empty, (.*?) (.*?) (.*?)'),
'Get': re.compile(r'Reading (.*?) to (\d+) at index (\d+)'),
'Queue': re.compile(r'Queuing (.*?) to (\d+) at index (\d+)')
}
frame_num = -1
max_delay = 0
for k, d in delays.items():
if max_delay < d:
max_delay = d
with open(infile) as f:
lines = f.readlines()
for line in lines:
r = frame_re.search(line)
if r:
frame_num = int(r.group(1))
for (key, re) in ctrl_re.items():
r = re.search(line)
if r:
ctrl_action[key][(frame_num, r.group(1))] = (r.group(2), r.group(3))
with open(outfile, 'wt') as f:
queueIndex = 1
f.write('{:<10}{:<15}{:<12}{:<18}{}\n'.format('Frame', 'Action', 'Gain', 'Exposure', 'Vblank'))
for frame in range(0, frame_num + 1):
for (k, a) in ctrl_action.items():
str = '{:<10}{:<10}'.format(frame, k)
for c in delays.keys():
# Tabulate all results
str += '{:>5} {:<10}'.format(a[(frame, c)][0] if (frame, c) in a.keys() else '---',
'[' + (a[(frame, c)][1] if (frame, c) in a.keys() else '-') + ']')
f.write(str.strip() + '\n')
# Test the write -> get matches the set delay.
for (frame, c) in ctrl_action['Write'].keys():
set_value = ctrl_action['Write'][(frame, c)][0]
delay_frame = frame + delays[c]
if (delay_frame <= frame_num):
if (delay_frame, c) in ctrl_action['Get']:
get_value = ctrl_action['Get'][(delay_frame, c)][0]
if get_value != set_value:
print('Error: {} written at frame {} to value {} != {} at frame {}'
.format(c, frame, set_value, get_value, delay_frame))
else:
print('Warning: {} written at frame {} to value {} did not get logged on frame {} - dropped frame?'
.format(c, frame, set_value, delay_frame))
# Test the queue -> write matches the set delay.
for (frame, c) in ctrl_action['Queue'].keys():
set_value = ctrl_action['Queue'][(frame, c)][0]
delay_frame = frame + max_delay - delays[c] + 1
if (delay_frame <= frame_num):
if (delay_frame, c) in ctrl_action['Write']:
write_value = ctrl_action['Write'][(delay_frame, c)][0]
if write_value != set_value:
print('Info: {} queued at frame {} to value {} != {} written at frame {}'
' - lagging behind or double queue on a single frame!'
.format(c, frame, set_value, write_value, delay_frame))
else:
print('Warning: {} queued at frame {} to value {} did not get logged on frame {} - dropped frame?'
.format(c, frame, set_value, delay_frame))
# Test the get -> write matches the set delay going backwards.
for (frame, c) in ctrl_action['Get'].keys():
get_value = ctrl_action['Get'][(frame, c)][0]
delay_frame = frame - delays[c]
if (delay_frame >= 6):
if (delay_frame, c) in ctrl_action['Write']:
write_value = ctrl_action['Write'][(delay_frame, c)][0]
if get_value != write_value:
print('Info: {} got at frame {} to value {} != {} written at frame {}'
' - lagging behind or double queue on a single frame!'
.format(c, frame, get_value, write_value, delay_frame))
else:
print('Warning: {} got at frame {} to value {} did not get written on frame {}'
.format(c, frame, get_value, delay_frame))
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_tools.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool miscellaneous
import time
import re
import binascii
import os
import cv2
import numpy as np
import imutils
import sys
import matplotlib.pyplot as plt
from sklearn import cluster as cluster
from sklearn.neighbors import NearestCentroid as get_centroids
"""
This file contains some useful tools, the details of which aren't important to
understanding of the code. They ar collated here to attempt to improve code
readability in the main files.
"""
"""
obtain config values, unless it doesnt exist, in which case pick default
Furthermore, it can check if the input is the correct type
"""
def get_config(dictt, key, default, ttype):
try:
val = dictt[key]
if ttype == 'string':
val = str(val)
elif ttype == 'num':
if 'int' not in str(type(val)):
if 'float' not in str(type(val)):
raise ValueError
elif ttype == 'dict':
if not isinstance(val, dict):
raise ValueError
elif ttype == 'list':
if not isinstance(val, list):
raise ValueError
elif ttype == 'bool':
ttype = int(bool(ttype))
else:
val = dictt[key]
except (KeyError, ValueError):
val = default
return val
"""
argument parser
"""
def parse_input():
arguments = sys.argv[1:]
if len(arguments) % 2 != 0:
raise ArgError('\n\nERROR! Enter value for each arguent passed.')
params = arguments[0::2]
vals = arguments[1::2]
args_dict = dict(zip(params, vals))
json_output = get_config(args_dict, '-o', None, 'string')
directory = get_config(args_dict, '-i', None, 'string')
config = get_config(args_dict, '-c', None, 'string')
log_path = get_config(args_dict, '-l', None, 'string')
target = get_config(args_dict, '-t', "vc4", 'string')
if directory is None:
raise ArgError('\n\nERROR! No input directory given.')
if json_output is None:
raise ArgError('\n\nERROR! No output json given.')
return json_output, directory, config, log_path, target
"""
custom arg and macbeth error class
"""
class ArgError(Exception):
pass
class MacbethError(Exception):
pass
"""
correlation function to quantify match
"""
def correlate(im1, im2):
f1 = im1.flatten()
f2 = im2.flatten()
cor = np.corrcoef(f1, f2)
return cor[0][1]
"""
get list of files from directory
"""
def get_photos(directory='photos'):
filename_list = []
for filename in os.listdir(directory):
if 'jp' in filename or '.dng' in filename:
filename_list.append(filename)
return filename_list
"""
display image for debugging... read at your own risk...
"""
def represent(img, name='image'):
# if type(img) == tuple or type(img) == list:
# for i in range(len(img)):
# name = 'image {}'.format(i)
# cv2.imshow(name, img[i])
# else:
# cv2.imshow(name, img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# return 0
"""
code above displays using opencv, but this doesn't catch users pressing 'x'
with their mouse to close the window.... therefore matplotlib is used....
(thanks a lot opencv)
"""
grid = plt.GridSpec(22, 1)
plt.subplot(grid[:19, 0])
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.subplot(grid[21, 0])
plt.title('press \'q\' to continue')
plt.axis('off')
plt.show()
# f = plt.figure()
# ax = f.add_subplot(211)
# ax2 = f.add_subplot(122)
# ax.imshow(img, cmap='gray')
# ax.axis('off')
# ax2.set_figheight(2)
# ax2.title('press \'q\' to continue')
# ax2.axis('off')
# plt.show()
"""
reshape image to fixed width without distorting
returns image and scale factor
"""
def reshape(img, width):
factor = width/img.shape[0]
return cv2.resize(img, None, fx=factor, fy=factor), factor
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_geq.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for GEQ (green equalisation)
from ctt_tools import *
import matplotlib.pyplot as plt
import scipy.optimize as optimize
"""
Uses green differences in macbeth patches to fit green equalisation threshold
model. Ideally, all macbeth chart centres would fall below the threshold as
these should be corrected by geq.
"""
def geq_fit(Cam, plot):
imgs = Cam.imgs
"""
green equalisation to mitigate mazing.
Fits geq model by looking at difference
between greens in macbeth patches
"""
geqs = np.array([geq(Cam, Img)*Img.againQ8_norm for Img in imgs])
Cam.log += '\nProcessed all images'
geqs = geqs.reshape((-1, 2))
"""
data is sorted by green difference and top half is selected since higher
green difference data define the decision boundary.
"""
geqs = np.array(sorted(geqs, key=lambda r: np.abs((r[1]-r[0])/r[0])))
length = len(geqs)
g0 = geqs[length//2:, 0]
g1 = geqs[length//2:, 1]
gdiff = np.abs(g0-g1)
"""
find linear fit by minimising asymmetric least square errors
in order to cover most of the macbeth images.
the philosophy here is that every macbeth patch should fall within the
threshold, hence the upper bound approach
"""
def f(params):
m, c = params
a = gdiff - (m*g0+c)
"""
asymmetric square error returns:
1.95 * a**2 if a is positive
0.05 * a**2 if a is negative
"""
return(np.sum(a**2+0.95*np.abs(a)*a))
initial_guess = [0.01, 500]
"""
Nelder-Mead is usually not the most desirable optimisation method
but has been chosen here due to its robustness to undifferentiability
(is that a word?)
"""
result = optimize.minimize(f, initial_guess, method='Nelder-Mead')
"""
need to check if the fit worked correectly
"""
if result.success:
slope, offset = result.x
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
Cam.log += 'offset = {}'.format(int(offset))
"""
optional plotting code
"""
if plot:
x = np.linspace(max(g0)*1.1, 100)
y = slope*x + offset
plt.title('GEQ Asymmetric \'Upper Bound\' Fit')
plt.plot(x, y, color='red', ls='--', label='fit')
plt.scatter(g0, gdiff, color='b', label='data')
plt.ylabel('Difference in green channels')
plt.xlabel('Green value')
"""
This upper bound asymmetric gives correct order of magnitude values.
The pipeline approximates a 1st derivative of a gaussian with some
linear piecewise functions, introducing arbitrary cutoffs. For
pessimistic geq, the model parameters have been increased by a
scaling factor/constant.
Feel free to tune these or edit the json files directly if you
belive there are still mazing effects left (threshold too low) or if you
think it is being overcorrected (threshold too high).
We have gone for a one size fits most approach that will produce
acceptable results in most applications.
"""
slope *= 1.5
offset += 201
Cam.log += '\nFit after correction factors: slope = {:.5f}'.format(slope)
Cam.log += ' offset = {}'.format(int(offset))
"""
clamp offset at 0 due to pipeline considerations
"""
if offset < 0:
Cam.log += '\nOffset raised to 0'
offset = 0
"""
optional plotting code
"""
if plot:
y2 = slope*x + offset
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
plt.grid()
plt.legend()
plt.show()
"""
the case where for some reason the fit didn't work correctly
Transpose data and then least squares linear fit. Transposing data
makes it robust to many patches where green difference is the same
since they only contribute to one error minimisation, instead of dragging
the entire linear fit down.
"""
else:
print('\nError! Couldn\'t fit asymmetric lest squares')
print(result.message)
Cam.log += '\nWARNING: Asymmetric least squares fit failed! '
Cam.log += 'Standard fit used could possibly lead to worse results'
fit = np.polyfit(gdiff, g0, 1)
offset, slope = -fit[1]/fit[0], 1/fit[0]
Cam.log += '\nFit result: slope = {:.5f} '.format(slope)
Cam.log += 'offset = {}'.format(int(offset))
"""
optional plotting code
"""
if plot:
x = np.linspace(max(g0)*1.1, 100)
y = slope*x + offset
plt.title('GEQ Linear Fit')
plt.plot(x, y, color='red', ls='--', label='fit')
plt.scatter(g0, gdiff, color='b', label='data')
plt.ylabel('Difference in green channels')
plt.xlabel('Green value')
"""
Scaling factors (see previous justification)
The model here will not be an upper bound so scaling factors have
been increased.
This method of deriving geq model parameters is extremely arbitrary
and undesirable.
"""
slope *= 2.5
offset += 301
Cam.log += '\nFit after correction factors: slope = {:.5f}'.format(slope)
Cam.log += ' offset = {}'.format(int(offset))
if offset < 0:
Cam.log += '\nOffset raised to 0'
offset = 0
"""
optional plotting code
"""
if plot:
y2 = slope*x + offset
plt.plot(x, y2, color='green', ls='--', label='scaled fit')
plt.legend()
plt.grid()
plt.show()
return round(slope, 5), int(offset)
""""
Return green channels of macbeth patches
returns g0, g1 where
> g0 is green next to red
> g1 is green next to blue
"""
def geq(Cam, Img):
Cam.log += '\nProcessing image {}'.format(Img.name)
patches = [Img.patches[i] for i in Img.order][1:3]
g_patches = np.array([(np.mean(patches[0][i]), np.mean(patches[1][i])) for i in range(24)])
Cam.log += '\n'
return(g_patches)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_macbeth_locator.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool Macbeth chart locator
from ctt_ransac import *
from ctt_tools import *
import warnings
"""
NOTE: some custom functions have been used here to make the code more readable.
These are defined in tools.py if they are needed for reference.
"""
"""
Some inconsistencies between packages cause runtime warnings when running
the clustering algorithm. This catches these warnings so they don't flood the
output to the console
"""
def fxn():
warnings.warn("runtime", RuntimeWarning)
"""
Define the success message
"""
success_msg = 'Macbeth chart located successfully'
def find_macbeth(Cam, img, mac_config=(0, 0)):
small_chart, show = mac_config
print('Locating macbeth chart')
Cam.log += '\nLocating macbeth chart'
"""
catch the warnings
"""
warnings.simplefilter("ignore")
fxn()
"""
Reference macbeth chart is created that will be correlated with the located
macbeth chart guess to produce a confidence value for the match.
"""
ref = cv2.imread(Cam.path + 'ctt_ref.pgm', flags=cv2.IMREAD_GRAYSCALE)
ref_w = 120
ref_h = 80
rc1 = (0, 0)
rc2 = (0, ref_h)
rc3 = (ref_w, ref_h)
rc4 = (ref_w, 0)
ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
ref_data = (ref, ref_w, ref_h, ref_corns)
"""
locate macbeth chart
"""
cor, mac, coords, msg = get_macbeth_chart(img, ref_data)
# Keep a list that will include this and any brightened up versions of
# the image for reuse.
all_images = [img]
"""
following bits of code tries to fix common problems with simple
techniques.
If now or at any point the best correlation is of above 0.75, then
nothing more is tried as this is a high enough confidence to ensure
reliable macbeth square centre placement.
"""
"""
brighten image 2x
"""
if cor < 0.75:
a = 2
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
all_images.append(img_br)
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
"""
brighten image 4x
"""
if cor < 0.75:
a = 4
img_br = cv2.convertScaleAbs(img, alpha=a, beta=0)
all_images.append(img_br)
cor_b, mac_b, coords_b, msg_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, msg = cor_b, mac_b, coords_b, msg_b
"""
In case macbeth chart is too small, take a selection of the image and
attempt to locate macbeth chart within that. The scale increment is
root 2
"""
"""
These variables will be used to transform the found coordinates at smaller
scales back into the original. If ii is still -1 after this section that
means it was not successful
"""
ii = -1
w_best = 0
h_best = 0
d_best = 100
"""
d_best records the scale of the best match. Macbeth charts are only looked
for at one scale increment smaller than the current best match in order to avoid
unecessarily searching for macbeth charts at small scales.
If a macbeth chart ha already been found then set d_best to 0
"""
if cor != 0:
d_best = 0
"""
scale 3/2 (approx root2)
"""
if cor < 0.75:
imgs = []
"""
get size of image
"""
shape = list(img.shape[:2])
w, h = shape
"""
set dimensions of the subselection and the step along each axis between
selections
"""
w_sel = int(2*w/3)
h_sel = int(2*h/3)
w_inc = int(w/6)
h_inc = int(h/6)
"""
for each subselection, look for a macbeth chart
loop over this and any brightened up images that we made to increase the
likelihood of success
"""
for img_br in all_images:
for i in range(3):
for j in range(3):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img_br[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
"""
if the correlation is better than the best then record the
scale and current subselection at which macbeth chart was
found. Also record the coordinates, macbeth chart and message.
"""
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 1
"""
scale 2
"""
if cor < 0.75:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/2)
h_sel = int(h/2)
w_inc = int(w/8)
h_inc = int(h/8)
# Again, loop over any brightened up images as well
for img_br in all_images:
for i in range(5):
for j in range(5):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img_br[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 2
"""
The following code checks for macbeth charts at even smaller scales. This
slows the code down significantly and has therefore been omitted by default,
however it is not unusably slow so might be useful if the macbeth chart
is too small to be picked up to by the current subselections.
Use this for macbeth charts with side lengths around 1/5 image dimensions
(and smaller...?) it is, however, recommended that macbeth charts take up as
large as possible a proportion of the image.
"""
if small_chart:
if cor < 0.75 and d_best > 1:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/3)
h_sel = int(h/3)
w_inc = int(w/12)
h_inc = int(h/12)
for i in range(9):
for j in range(9):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = 3
if cor < 0.75 and d_best > 2:
imgs = []
shape = list(img.shape[:2])
w, h = shape
w_sel = int(w/4)
h_sel = int(h/4)
w_inc = int(w/16)
h_inc = int(h/16)
for i in range(13):
for j in range(13):
w_s, h_s = i*w_inc, j*h_inc
img_sel = img[w_s:w_s+w_sel, h_s:h_s+h_sel]
cor_ij, mac_ij, coords_ij, msg_ij = get_macbeth_chart(img_sel, ref_data)
if cor_ij > cor:
cor = cor_ij
mac, coords, msg = mac_ij, coords_ij, msg_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
"""
Transform coordinates from subselection to original image
"""
if ii != -1:
for a in range(len(coords)):
for b in range(len(coords[a][0])):
coords[a][0][b][1] += ii*w_best
coords[a][0][b][0] += jj*h_best
"""
initialise coords_fit variable
"""
coords_fit = None
# print('correlation: {}'.format(cor))
"""
print error or success message
"""
print(msg)
Cam.log += '\n' + str(msg)
if msg == success_msg:
coords_fit = coords
Cam.log += '\nMacbeth chart vertices:\n'
Cam.log += '{}'.format(2*np.round(coords_fit[0][0]), 0)
"""
if correlation is lower than 0.75 there may be a risk of macbeth chart
corners not having been located properly. It might be worth running
with show set to true to check where the macbeth chart centres have
been located.
"""
print('Confidence: {:.3f}'.format(cor))
Cam.log += '\nConfidence: {:.3f}'.format(cor)
if cor < 0.75:
print('Caution: Low confidence guess!')
Cam.log += 'WARNING: Low confidence guess!'
# cv2.imshow('MacBeth', mac)
# represent(mac, 'MacBeth chart')
"""
extract data from coords_fit and plot on original image
"""
if show and coords_fit is not None:
copy = img.copy()
verts = coords_fit[0][0]
cents = coords_fit[1][0]
"""
draw circles at vertices of macbeth chart
"""
for vert in verts:
p = tuple(np.round(vert).astype(np.int32))
cv2.circle(copy, p, 10, 1, -1)
"""
draw circles at centres of squares
"""
for i in range(len(cents)):
cent = cents[i]
p = tuple(np.round(cent).astype(np.int32))
"""
draw black circle on white square, white circle on black square an
grey circle everywhere else.
"""
if i == 3:
cv2.circle(copy, p, 8, 0, -1)
elif i == 23:
cv2.circle(copy, p, 8, 1, -1)
else:
cv2.circle(copy, p, 8, 0.5, -1)
copy, _ = reshape(copy, 400)
represent(copy)
return(coords_fit)
def get_macbeth_chart(img, ref_data):
"""
function returns coordinates of macbeth chart vertices and square centres,
along with an error/success message for debugging purposes. Additionally,
it scores the match with a confidence value.
Brief explanation of the macbeth chart locating algorithm:
- Find rectangles within image
- Take rectangles within percentage offset of median perimeter. The
assumption is that these will be the macbeth squares
- For each potential square, find the 24 possible macbeth centre locations
that would produce a square in that location
- Find clusters of potential macbeth chart centres to find the potential
macbeth centres with the most votes, i.e. the most likely ones
- For each potential macbeth centre, use the centres of the squares that
voted for it to find macbeth chart corners
- For each set of corners, transform the possible match into normalised
space and correlate with a reference chart to evaluate the match
- Select the highest correlation as the macbeth chart match, returning the
correlation as the confidence score
"""
"""
get reference macbeth chart data
"""
(ref, ref_w, ref_h, ref_corns) = ref_data
"""
the code will raise and catch a MacbethError in case of a problem, trying
to give some likely reasons why the problem occred, hence the try/except
"""
try:
"""
obtain image, convert to grayscale and normalise
"""
src = img
src, factor = reshape(src, 200)
original = src.copy()
a = 125/np.average(src)
src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
"""
This code checks if there are seperate colour channels. In the past the
macbeth locator ran on jpgs and this makes it robust to different
filetypes. Note that running it on a jpg has 4x the pixels of the
average bayer channel so coordinates must be doubled.
This is best done in img_load.py in the get_patches method. The
coordinates and image width, height must be divided by two if the
macbeth locator has been run on a demosaicked image.
"""
if len(src_norm.shape) == 3:
src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
else:
src_bw = src_norm
original_bw = src_bw.copy()
"""
obtain image edges
"""
sigma = 2
src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
t1, t2 = 50, 100
edges = cv2.Canny(src_bw, t1, t2)
"""
dilate edges to prevent self-intersections in contours
"""
k_size = 2
kernel = np.ones((k_size, k_size))
its = 1
edges = cv2.dilate(edges, kernel, iterations=its)
"""
find Contours in image
"""
conts, _ = cv2.findContours(edges, cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo contours found in image\n'
'Possible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
)
"""
find quadrilateral contours
"""
epsilon = 0.07
conts_per = []
for i in range(len(conts)):
per = cv2.arcLength(conts[i], True)
poly = cv2.approxPolyDP(conts[i], epsilon*per, True)
if len(poly) == 4 and cv2.isContourConvex(poly):
conts_per.append((poly, per))
if len(conts_per) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo quadrilateral contours found'
'\nPossible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is out of camera plane\n'
)
"""
sort contours by perimeter and get perimeters within percent of median
"""
conts_per = sorted(conts_per, key=lambda x: x[1])
med_per = conts_per[int(len(conts_per)/2)][1]
side = med_per/4
perc = 0.1
med_low, med_high = med_per*(1-perc), med_per*(1+perc)
squares = []
for i in conts_per:
if med_low <= i[1] and med_high >= i[1]:
squares.append(i[0])
"""
obtain coordinates of nomralised macbeth and squares
"""
square_verts, mac_norm = get_square_verts(0.06)
"""
for each square guess, find 24 possible macbeth chart centres
"""
mac_mids = []
squares_raw = []
for i in range(len(squares)):
square = squares[i]
squares_raw.append(square)
"""
convert quads to rotated rectangles. This is required as the
'squares' are usually quite irregular quadrilaterls, so performing
a transform would result in exaggerated warping and inaccurate
macbeth chart centre placement
"""
rect = cv2.minAreaRect(square)
square = cv2.boxPoints(rect).astype(np.float32)
"""
reorder vertices to prevent 'hourglass shape'
"""
square = sorted(square, key=lambda x: x[0])
square_1 = sorted(square[:2], key=lambda x: x[1])
square_2 = sorted(square[2:], key=lambda x: -x[1])
square = np.array(np.concatenate((square_1, square_2)), np.float32)
square = np.reshape(square, (4, 2)).astype(np.float32)
squares[i] = square
"""
find 24 possible macbeth chart centres by trasnforming normalised
macbeth square vertices onto candidate square vertices found in image
"""
for j in range(len(square_verts)):
verts = square_verts[j]
p_mat = cv2.getPerspectiveTransform(verts, square)
mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
mac_guess = np.round(mac_guess).astype(np.int32)
"""
keep only if candidate macbeth is within image border
(deprecated)
"""
in_border = True
# for p in mac_guess[0]:
# pptest = cv2.pointPolygonTest(
# img_con,
# tuple(p),
# False
# )
# if pptest == -1:
# in_border = False
# break
if in_border:
mac_mid = np.mean(mac_guess,
axis=1)
mac_mids.append([mac_mid, (i, j)])
if len(mac_mids) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo possible macbeth charts found within image'
'\nPossible problems:\n'
'- Part of the macbeth chart is outside the image\n'
'- Quadrilaterals in image background\n'
)
"""
reshape data
"""
for i in range(len(mac_mids)):
mac_mids[i][0] = mac_mids[i][0][0]
"""
find where midpoints cluster to identify most likely macbeth centres
"""
clustering = cluster.AgglomerativeClustering(
n_clusters=None,
compute_full_tree=True,
distance_threshold=side*2
)
mac_mids_list = [x[0] for x in mac_mids]
if len(mac_mids_list) == 1:
"""
special case of only one valid centre found (probably not needed)
"""
clus_list = []
clus_list.append([mac_mids, len(mac_mids)])
else:
clustering.fit(mac_mids_list)
# try:
# clustering.fit(mac_mids_list)
# except RuntimeWarning as error:
# return(0, None, None, error)
"""
create list of all clusters
"""
clus_list = []
if clustering.n_clusters_ > 1:
for i in range(clustering.labels_.max()+1):
indices = [j for j, x in enumerate(clustering.labels_) if x == i]
clus = []
for index in indices:
clus.append(mac_mids[index])
clus_list.append([clus, len(clus)])
clus_list.sort(key=lambda x: -x[1])
elif clustering.n_clusters_ == 1:
"""
special case of only one cluster found
"""
# print('only 1 cluster')
clus_list.append([mac_mids, len(mac_mids)])
else:
raise MacbethError(
'\nWARNING: No macebth chart found!'
'\nNo clusters found'
'\nPossible problems:\n'
'- NA\n'
)
"""
keep only clusters with enough votes
"""
clus_len_max = clus_list[0][1]
clus_tol = 0.7
for i in range(len(clus_list)):
if clus_list[i][1] < clus_len_max * clus_tol:
clus_list = clus_list[:i]
break
cent = np.mean(clus_list[i][0], axis=0)[0]
clus_list[i].append(cent)
"""
represent most popular cluster centroids
"""
# copy = original_bw.copy()
# copy = cv2.cvtColor(copy, cv2.COLOR_GRAY2RGB)
# copy = cv2.resize(copy, None, fx=2, fy=2)
# for clus in clus_list:
# centroid = tuple(2*np.round(clus[2]).astype(np.int32))
# cv2.circle(copy, centroid, 7, (255, 0, 0), -1)
# cv2.circle(copy, centroid, 2, (0, 0, 255), -1)
# represent(copy)
"""
get centres of each normalised square
"""
reference = get_square_centres(0.06)
"""
for each possible macbeth chart, transform image into
normalised space and find correlation with reference
"""
max_cor = 0
best_map = None
best_fit = None
best_cen_fit = None
best_ref_mat = None
for clus in clus_list:
clus = clus[0]
sq_cents = []
ref_cents = []
i_list = [p[1][0] for p in clus]
for point in clus:
i, j = point[1]
"""
remove any square that voted for two different points within
the same cluster. This causes the same point in the image to be
mapped to two different reference square centres, resulting in
a very distorted perspective transform since cv2.findHomography
simply minimises error.
This phenomenon is not particularly likely to occur due to the
enforced distance threshold in the clustering fit but it is
best to keep this in just in case.
"""
if i_list.count(i) == 1:
square = squares_raw[i]
sq_cent = np.mean(square, axis=0)
ref_cent = reference[j]
sq_cents.append(sq_cent)
ref_cents.append(ref_cent)
"""
At least four squares need to have voted for a centre in
order for a transform to be found
"""
if len(sq_cents) < 4:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNot enough squares found'
'\nPossible problems:\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is too dark or bright\n'
)
ref_cents = np.array(ref_cents)
sq_cents = np.array(sq_cents)
"""
find best fit transform from normalised centres to image
"""
h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
if 'None' in str(type(h_mat)):
raise MacbethError(
'\nERROR\n'
)
"""
transform normalised corners and centres into image space
"""
mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
"""
transform located corners into reference space
"""
ref_mat = cv2.getPerspectiveTransform(
mac_fit,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
"""
normalise brigthness
"""
a = 125/np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
"""
find correlation with bw reference macbeth
"""
cor = correlate(map_to_ref, ref)
"""
keep only if best correlation
"""
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit
best_cen_fit = mac_cen_fit
best_ref_mat = ref_mat
"""
rotate macbeth by pi and recorrelate in case macbeth chart is
upside-down
"""
mac_fit_inv = np.array(
([[mac_fit[0][2], mac_fit[0][3],
mac_fit[0][0], mac_fit[0][1]]])
)
mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
ref_mat = cv2.getPerspectiveTransform(
mac_fit_inv,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
a = 125/np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
cor = correlate(map_to_ref, ref)
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit_inv
best_cen_fit = mac_cen_fit_inv
best_ref_mat = ref_mat
"""
Check best match is above threshold
"""
cor_thresh = 0.6
if max_cor < cor_thresh:
raise MacbethError(
'\nWARNING: Correlation too low'
'\nPossible problems:\n'
'- Bad lighting conditions\n'
'- Macbeth chart is occluded\n'
'- Background is too noisy\n'
'- Macbeth chart is out of camera plane\n'
)
"""
Following code is mostly representation for debugging purposes
"""
"""
draw macbeth corners and centres on image
"""
copy = original.copy()
copy = cv2.resize(original, None, fx=2, fy=2)
# print('correlation = {}'.format(round(max_cor, 2)))
for point in best_fit[0]:
point = np.array(point, np.float32)
point = tuple(2*np.round(point).astype(np.int32))
cv2.circle(copy, point, 4, (255, 0, 0), -1)
for point in best_cen_fit[0]:
point = np.array(point, np.float32)
point = tuple(2*np.round(point).astype(np.int32))
cv2.circle(copy, point, 4, (0, 0, 255), -1)
copy = copy.copy()
cv2.circle(copy, point, 4, (0, 0, 255), -1)
"""
represent coloured macbeth in reference space
"""
best_map_col = cv2.warpPerspective(
original, best_ref_mat, (ref_w, ref_h)
)
best_map_col = cv2.resize(
best_map_col, None, fx=4, fy=4
)
a = 125/np.average(best_map_col)
best_map_col_norm = cv2.convertScaleAbs(
best_map_col, alpha=a, beta=0
)
# cv2.imshow('Macbeth', best_map_col)
# represent(copy)
"""
rescale coordinates to original image size
"""
fit_coords = (best_fit/factor, best_cen_fit/factor)
return(max_cor, best_map_col_norm, fit_coords, success_msg)
"""
catch macbeth errors and continue with code
"""
except MacbethError as error:
return(0, None, None, error)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_image_load.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019-2020, Raspberry Pi Ltd
#
# camera tuning tool image loading
from ctt_tools import *
from ctt_macbeth_locator import *
import json
import pyexiv2 as pyexif
import rawpy as raw
"""
Image class load image from raw data and extracts metadata.
Once image is extracted from data, it finds 24 16x16 patches for each
channel, centred at the macbeth chart squares
"""
class Image:
def __init__(self, buf):
self.buf = buf
self.patches = None
self.saturated = False
'''
obtain metadata from buffer
'''
def get_meta(self):
self.ver = ba_to_b(self.buf[4:5])
self.w = ba_to_b(self.buf[0xd0:0xd2])
self.h = ba_to_b(self.buf[0xd2:0xd4])
self.pad = ba_to_b(self.buf[0xd4:0xd6])
self.fmt = self.buf[0xf5]
self.sigbits = 2*self.fmt + 4
self.pattern = self.buf[0xf4]
self.exposure = ba_to_b(self.buf[0x90:0x94])
self.againQ8 = ba_to_b(self.buf[0x94:0x96])
self.againQ8_norm = self.againQ8/256
camName = self.buf[0x10:0x10+128]
camName_end = camName.find(0x00)
self.camName = self.buf[0x10:0x10+128][:camName_end].decode()
"""
Channel order depending on bayer pattern
"""
bayer_case = {
0: (0, 1, 2, 3), # red
1: (2, 0, 3, 1), # green next to red
2: (3, 2, 1, 0), # green next to blue
3: (1, 0, 3, 2), # blue
128: (0, 1, 2, 3) # arbitrary order for greyscale casw
}
self.order = bayer_case[self.pattern]
'''
manual blacklevel - not robust
'''
if 'ov5647' in self.camName:
self.blacklevel = 16
else:
self.blacklevel = 64
self.blacklevel_16 = self.blacklevel << (6)
return 1
'''
print metadata for debug
'''
def print_meta(self):
print('\nData:')
print(' ver = {}'.format(self.ver))
print(' w = {}'.format(self.w))
print(' h = {}'.format(self.h))
print(' pad = {}'.format(self.pad))
print(' fmt = {}'.format(self.fmt))
print(' sigbits = {}'.format(self.sigbits))
print(' pattern = {}'.format(self.pattern))
print(' exposure = {}'.format(self.exposure))
print(' againQ8 = {}'.format(self.againQ8))
print(' againQ8_norm = {}'.format(self.againQ8_norm))
print(' camName = {}'.format(self.camName))
print(' blacklevel = {}'.format(self.blacklevel))
print(' blacklevel_16 = {}'.format(self.blacklevel_16))
return 1
"""
get image from raw scanline data
"""
def get_image(self, raw):
self.dptr = []
"""
check if data is 10 or 12 bits
"""
if self.sigbits == 10:
"""
calc length of scanline
"""
lin_len = ((((((self.w+self.pad+3)>>2)) * 5)+31)>>5) * 32
"""
stack scan lines into matrix
"""
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
"""
separate 5 bits in each package, stopping when w is satisfied
"""
ba0 = raw[..., 0:5*((self.w+3)>>2):5]
ba1 = raw[..., 1:5*((self.w+3)>>2):5]
ba2 = raw[..., 2:5*((self.w+3)>>2):5]
ba3 = raw[..., 3:5*((self.w+3)>>2):5]
ba4 = raw[..., 4:5*((self.w+3)>>2):5]
"""
assemble 10 bit numbers
"""
ch0 = np.left_shift((np.left_shift(ba0, 2) + (ba4 % 4)), 6)
ch1 = np.left_shift((np.left_shift(ba1, 2) + (np.right_shift(ba4, 2) % 4)), 6)
ch2 = np.left_shift((np.left_shift(ba2, 2) + (np.right_shift(ba4, 4) % 4)), 6)
ch3 = np.left_shift((np.left_shift(ba3, 2) + (np.right_shift(ba4, 6) % 4)), 6)
"""
interleave bits
"""
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::4] = ch0
mat[..., 1::4] = ch1
mat[..., 2::4] = ch2
mat[..., 3::4] = ch3
"""
There is som eleaking memory somewhere in the code. This code here
seemed to make things good enough that the code would run for
reasonable numbers of images, however this is techincally just a
workaround. (sorry)
"""
ba0, ba1, ba2, ba3, ba4 = None, None, None, None, None
del ba0, ba1, ba2, ba3, ba4
ch0, ch1, ch2, ch3 = None, None, None, None
del ch0, ch1, ch2, ch3
"""
same as before but 12 bit case
"""
elif self.sigbits == 12:
lin_len = ((((((self.w+self.pad+1)>>1)) * 3)+31)>>5) * 32
raw = np.array(raw).reshape(-1, lin_len).astype(np.int64)[:self.h, ...]
ba0 = raw[..., 0:3*((self.w+1)>>1):3]
ba1 = raw[..., 1:3*((self.w+1)>>1):3]
ba2 = raw[..., 2:3*((self.w+1)>>1):3]
ch0 = np.left_shift((np.left_shift(ba0, 4) + ba2 % 16), 4)
ch1 = np.left_shift((np.left_shift(ba1, 4) + (np.right_shift(ba2, 4)) % 16), 4)
mat = np.empty((self.h, self.w), dtype=ch0.dtype)
mat[..., 0::2] = ch0
mat[..., 1::2] = ch1
else:
"""
data is neither 10 nor 12 or incorrect data
"""
print('ERROR: wrong bit format, only 10 or 12 bit supported')
return 0
"""
separate bayer channels
"""
c0 = mat[0::2, 0::2]
c1 = mat[0::2, 1::2]
c2 = mat[1::2, 0::2]
c3 = mat[1::2, 1::2]
self.channels = [c0, c1, c2, c3]
return 1
"""
obtain 16x16 patch centred at macbeth square centre for each channel
"""
def get_patches(self, cen_coords, size=16):
"""
obtain channel widths and heights
"""
ch_w, ch_h = self.w, self.h
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
self.cen_coords = cen_coords
"""
squares are ordered by stacking macbeth chart columns from
left to right. Some useful patch indices:
white = 3
black = 23
'reds' = 9, 10
'blues' = 2, 5, 8, 20, 22
'greens' = 6, 12, 17
greyscale = 3, 7, 11, 15, 19, 23
"""
all_patches = []
for ch in self.channels:
ch_patches = []
for cen in cen_coords:
'''
macbeth centre is placed at top left of central 2x2 patch
to account for rounding
Patch pixels are sorted by pixel brightness so spatial
information is lost.
'''
patch = ch[cen[1]-7:cen[1]+9, cen[0]-7:cen[0]+9].flatten()
patch.sort()
if patch[-5] == (2**self.sigbits-1)*2**(16-self.sigbits):
self.saturated = True
ch_patches.append(patch)
# print('\nNew Patch\n')
all_patches.append(ch_patches)
# print('\n\nNew Channel\n\n')
self.patches = all_patches
return 1
def brcm_load_image(Cam, im_str):
"""
Load image where raw data and metadata is in the BRCM format
"""
try:
"""
create byte array
"""
with open(im_str, 'rb') as image:
f = image.read()
b = bytearray(f)
"""
return error if incorrect image address
"""
except FileNotFoundError:
print('\nERROR:\nInvalid image address')
Cam.log += '\nWARNING: Invalid image address'
return 0
"""
return error if problem reading file
"""
if f is None:
print('\nERROR:\nProblem reading file')
Cam.log += '\nWARNING: Problem readin file'
return 0
# print('\nLooking for EOI and BRCM header')
"""
find end of image followed by BRCM header by turning
bytearray into hex string and string matching with regexp
"""
start = -1
match = bytearray(b'\xff\xd9@BRCM')
match_str = binascii.hexlify(match)
b_str = binascii.hexlify(b)
"""
note index is divided by two to go from string to hex
"""
indices = [m.start()//2 for m in re.finditer(match_str, b_str)]
# print(indices)
try:
start = indices[0] + 3
except IndexError:
print('\nERROR:\nNo Broadcom header found')
Cam.log += '\nWARNING: No Broadcom header found!'
return 0
"""
extract data after header
"""
# print('\nExtracting data after header')
buf = b[start:start+32768]
Img = Image(buf)
Img.str = im_str
# print('Data found successfully')
"""
obtain metadata
"""
# print('\nReading metadata')
Img.get_meta()
Cam.log += '\nExposure : {} us'.format(Img.exposure)
Cam.log += '\nNormalised gain : {}'.format(Img.againQ8_norm)
# print('Metadata read successfully')
"""
obtain raw image data
"""
# print('\nObtaining raw image data')
raw = b[start+32768:]
Img.get_image(raw)
"""
delete raw to stop memory errors
"""
raw = None
del raw
# print('Raw image data obtained successfully')
return Img
def dng_load_image(Cam, im_str):
try:
Img = Image(None)
# RawPy doesn't load all the image tags that we need, so we use py3exiv2
metadata = pyexif.ImageMetadata(im_str)
metadata.read()
Img.ver = 100 # random value
"""
The DNG and TIFF/EP specifications use different IFDs to store the raw
image data and the Exif tags. DNG stores them in a SubIFD and in an Exif
IFD respectively (named "SubImage1" and "Photo" by pyexiv2), while
TIFF/EP stores them both in IFD0 (name "Image"). Both are used in "DNG"
files, with libcamera-apps following the DNG recommendation and
applications based on picamera2 following TIFF/EP.
This code detects which tags are being used, and therefore extracts the
correct values.
"""
try:
Img.w = metadata['Exif.SubImage1.ImageWidth'].value
subimage = "SubImage1"
photo = "Photo"
except KeyError:
Img.w = metadata['Exif.Image.ImageWidth'].value
subimage = "Image"
photo = "Image"
Img.pad = 0
Img.h = metadata[f'Exif.{subimage}.ImageLength'].value
white = metadata[f'Exif.{subimage}.WhiteLevel'].value
Img.sigbits = int(white).bit_length()
Img.fmt = (Img.sigbits - 4) // 2
Img.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
Img.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
Img.againQ8_norm = Img.againQ8 / 256
Img.camName = metadata['Exif.Image.Model'].value
Img.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
Img.blacklevel_16 = Img.blacklevel << (16 - Img.sigbits)
bayer_case = {
'0 1 1 2': (0, (0, 1, 2, 3)),
'1 2 0 1': (1, (2, 0, 3, 1)),
'2 1 1 0': (2, (3, 2, 1, 0)),
'1 0 2 1': (3, (1, 0, 3, 2))
}
cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
Img.pattern = bayer_case[cfa_pattern][0]
Img.order = bayer_case[cfa_pattern][1]
# Now use RawPy tp get the raw Bayer pixels
raw_im = raw.imread(im_str)
raw_data = raw_im.raw_image
shift = 16 - Img.sigbits
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
Img.channels = [c0, c1, c2, c3]
Img.rgb = raw_im.postprocess()
except Exception:
print("\nERROR: failed to load DNG file", im_str)
print("Either file does not exist or is incompatible")
Cam.log += '\nERROR: DNG file does not exist or is incompatible'
raise
return Img
'''
load image from file location and perform calibration
check correct filetype
mac boolean is true if image is expected to contain macbeth chart and false
if not (alsc images don't have macbeth charts)
'''
def load_image(Cam, im_str, mac_config=None, show=False, mac=True, show_meta=False):
"""
check image is correct filetype
"""
if '.jpg' in im_str or '.jpeg' in im_str or '.brcm' in im_str or '.dng' in im_str:
if '.dng' in im_str:
Img = dng_load_image(Cam, im_str)
else:
Img = brcm_load_image(Cam, im_str)
"""
handle errors smoothly if loading image failed
"""
if Img == 0:
return 0
if show_meta:
Img.print_meta()
if mac:
"""
find macbeth centres, discarding images that are too dark or light
"""
av_chan = (np.mean(np.array(Img.channels), axis=0)/(2**16))
av_val = np.mean(av_chan)
# print(av_val)
if av_val < Img.blacklevel_16/(2**16)+1/64:
macbeth = None
print('\nError: Image too dark!')
Cam.log += '\nWARNING: Image too dark!'
else:
macbeth = find_macbeth(Cam, av_chan, mac_config)
"""
if no macbeth found return error
"""
if macbeth is None:
print('\nERROR: No macbeth chart found')
return 0
mac_cen_coords = macbeth[1]
# print('\nMacbeth centres located successfully')
"""
obtain image patches
"""
# print('\nObtaining image patches')
Img.get_patches(mac_cen_coords)
if Img.saturated:
print('\nERROR: Macbeth patches have saturated')
Cam.log += '\nWARNING: Macbeth patches have saturated!'
return 0
"""
clear memory
"""
Img.buf = None
del Img.buf
# print('Image patches obtained successfully')
"""
optional debug
"""
if show and __name__ == '__main__':
copy = sum(Img.channels)/2**18
copy = np.reshape(copy, (Img.h//2, Img.w//2)).astype(np.float64)
copy, _ = reshape(copy, 800)
represent(copy)
return Img
"""
return error if incorrect filetype
"""
else:
# print('\nERROR:\nInvalid file extension')
return 0
"""
bytearray splice to number little endian
"""
def ba_to_b(b):
total = 0
for i in range(len(b)):
total += 256**i * b[i]
return total
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_awb.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for AWB
from ctt_image_load import *
import matplotlib.pyplot as plt
from bisect import bisect_left
from scipy.optimize import fmin
"""
obtain piecewise linear approximation for colour curve
"""
def awb(Cam, cal_cr_list, cal_cb_list, plot, grid_size):
imgs = Cam.imgs
"""
condense alsc calibration tables into one dictionary
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab/np.min(cr_tab)
cb_tab = cb_tab/np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
obtain data from greyscale macbeth patches
"""
rb_raw = []
rbs_hat = []
for Img in imgs:
Cam.log += '\nProcessing '+Img.name
"""
get greyscale patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and the
function will just return the greyscale patches
"""
r_patchs, b_patchs, g_patchs = get_alsc_patches(Img, colour_cals, grid_size=grid_size)
"""
calculate ratio of r, b to g
"""
r_g = np.mean(r_patchs/g_patchs)
b_g = np.mean(b_patchs/g_patchs)
Cam.log += '\n r : {:.4f} b : {:.4f}'.format(r_g, b_g)
"""
The curve tends to be better behaved in so-called hatspace.
R, B, G represent the individual channels. The colour curve is plotted in
r, b space, where:
r = R/G
b = B/G
This will be referred to as dehatspace... (sorry)
Hatspace is defined as:
r_hat = R/(R+B+G)
b_hat = B/(R+B+G)
To convert from dehatspace to hastpace (hat operation):
r_hat = r/(1+r+b)
b_hat = b/(1+r+b)
To convert from hatspace to dehatspace (dehat operation):
r = r_hat/(1-r_hat-b_hat)
b = b_hat/(1-r_hat-b_hat)
Proof is left as an excercise to the reader...
Throughout the code, r and b are sometimes referred to as r_g and b_g
as a reminder that they are ratios
"""
r_g_hat = r_g/(1+r_g+b_g)
b_g_hat = b_g/(1+r_g+b_g)
Cam.log += '\n r_hat : {:.4f} b_hat : {:.4f}'.format(r_g_hat, b_g_hat)
rbs_hat.append((r_g_hat, b_g_hat, Img.col))
rb_raw.append((r_g, b_g))
Cam.log += '\n'
Cam.log += '\nFinished processing images'
"""
sort all lits simultaneously by r_hat
"""
rbs_zip = list(zip(rbs_hat, rb_raw))
rbs_zip.sort(key=lambda x: x[0][0])
rbs_hat, rb_raw = list(zip(*rbs_zip))
"""
unzip tuples ready for processing
"""
rbs_hat = list(zip(*rbs_hat))
rb_raw = list(zip(*rb_raw))
"""
fit quadratic fit to r_g hat and b_g_hat
"""
a, b, c = np.polyfit(rbs_hat[0], rbs_hat[1], 2)
Cam.log += '\nFit quadratic curve in hatspace'
"""
the algorithm now approximates the shortest distance from each point to the
curve in dehatspace. Since the fit is done in hatspace, it is easier to
find the actual shortest distance in hatspace and use the projection back
into dehatspace as an overestimate.
The distance will be used for two things:
1) In the case that colour temperature does not strictly decrease with
increasing r/g, the closest point to the line will be chosen out of an
increasing pair of colours.
2) To calculate transverse negative an dpositive, the maximum positive
and negative distance from the line are chosen. This benefits from the
overestimate as the transverse pos/neg are upper bound values.
"""
"""
define fit function
"""
def f(x):
return a*x**2 + b*x + c
"""
iterate over points (R, B are x and y coordinates of points) and calculate
distance to line in dehatspace
"""
dists = []
for i, (R, B) in enumerate(zip(rbs_hat[0], rbs_hat[1])):
"""
define function to minimise as square distance between datapoint and
point on curve. Squaring is monotonic so minimising radius squared is
equivalent to minimising radius
"""
def f_min(x):
y = f(x)
return((x-R)**2+(y-B)**2)
"""
perform optimisation with scipy.optmisie.fmin
"""
x_hat = fmin(f_min, R, disp=0)[0]
y_hat = f(x_hat)
"""
dehat
"""
x = x_hat/(1-x_hat-y_hat)
y = y_hat/(1-x_hat-y_hat)
rr = R/(1-R-B)
bb = B/(1-R-B)
"""
calculate euclidean distance in dehatspace
"""
dist = ((x-rr)**2+(y-bb)**2)**0.5
"""
return negative if point is below the fit curve
"""
if (x+y) > (rr+bb):
dist *= -1
dists.append(dist)
Cam.log += '\nFound closest point on fit line to each point in dehatspace'
"""
calculate wiggle factors in awb. 10% added since this is an upper bound
"""
transverse_neg = - np.min(dists) * 1.1
transverse_pos = np.max(dists) * 1.1
Cam.log += '\nTransverse pos : {:.5f}'.format(transverse_pos)
Cam.log += '\nTransverse neg : {:.5f}'.format(transverse_neg)
"""
set minimum transverse wiggles to 0.1 .
Wiggle factors dictate how far off of the curve the algorithm searches. 0.1
is a suitable minimum that gives better results for lighting conditions not
within calibration dataset. Anything less will generalise poorly.
"""
if transverse_pos < 0.01:
transverse_pos = 0.01
Cam.log += '\nForced transverse pos to 0.01'
if transverse_neg < 0.01:
transverse_neg = 0.01
Cam.log += '\nForced transverse neg to 0.01'
"""
generate new b_hat values at each r_hat according to fit
"""
r_hat_fit = np.array(rbs_hat[0])
b_hat_fit = a*r_hat_fit**2 + b*r_hat_fit + c
"""
transform from hatspace to dehatspace
"""
r_fit = r_hat_fit/(1-r_hat_fit-b_hat_fit)
b_fit = b_hat_fit/(1-r_hat_fit-b_hat_fit)
c_fit = np.round(rbs_hat[2], 0)
"""
round to 4dp
"""
r_fit = np.where((1000*r_fit) % 1 <= 0.05, r_fit+0.0001, r_fit)
r_fit = np.where((1000*r_fit) % 1 >= 0.95, r_fit-0.0001, r_fit)
b_fit = np.where((1000*b_fit) % 1 <= 0.05, b_fit+0.0001, b_fit)
b_fit = np.where((1000*b_fit) % 1 >= 0.95, b_fit-0.0001, b_fit)
r_fit = np.round(r_fit, 4)
b_fit = np.round(b_fit, 4)
"""
The following code ensures that colour temperature decreases with
increasing r/g
"""
"""
iterate backwards over list for easier indexing
"""
i = len(c_fit) - 1
while i > 0:
if c_fit[i] > c_fit[i-1]:
Cam.log += '\nColour temperature increase found\n'
Cam.log += '{} K at r = {} to '.format(c_fit[i-1], r_fit[i-1])
Cam.log += '{} K at r = {}'.format(c_fit[i], r_fit[i])
"""
if colour temperature increases then discard point furthest from
the transformed fit (dehatspace)
"""
error_1 = abs(dists[i-1])
error_2 = abs(dists[i])
Cam.log += '\nDistances from fit:\n'
Cam.log += '{} K : {:.5f} , '.format(c_fit[i], error_1)
Cam.log += '{} K : {:.5f}'.format(c_fit[i-1], error_2)
"""
find bad index
note that in python false = 0 and true = 1
"""
bad = i - (error_1 < error_2)
Cam.log += '\nPoint at {} K deleted as '.format(c_fit[bad])
Cam.log += 'it is furthest from fit'
"""
delete bad point
"""
r_fit = np.delete(r_fit, bad)
b_fit = np.delete(b_fit, bad)
c_fit = np.delete(c_fit, bad).astype(np.uint16)
"""
note that if a point has been discarded then the length has decreased
by one, meaning that decreasing the index by one will reassess the kept
point against the next point. It is therefore possible, in theory, for
two adjacent points to be discarded, although probably rare
"""
i -= 1
"""
return formatted ct curve, ordered by increasing colour temperature
"""
ct_curve = list(np.array(list(zip(b_fit, r_fit, c_fit))).flatten())[::-1]
Cam.log += '\nFinal CT curve:'
for i in range(len(ct_curve)//3):
j = 3*i
Cam.log += '\n ct: {} '.format(ct_curve[j])
Cam.log += ' r: {} '.format(ct_curve[j+1])
Cam.log += ' b: {} '.format(ct_curve[j+2])
"""
plotting code for debug
"""
if plot:
x = np.linspace(np.min(rbs_hat[0]), np.max(rbs_hat[0]), 100)
y = a*x**2 + b*x + c
plt.subplot(2, 1, 1)
plt.title('hatspace')
plt.plot(rbs_hat[0], rbs_hat[1], ls='--', color='blue')
plt.plot(x, y, color='green', ls='-')
plt.scatter(rbs_hat[0], rbs_hat[1], color='red')
for i, ct in enumerate(rbs_hat[2]):
plt.annotate(str(ct), (rbs_hat[0][i], rbs_hat[1][i]))
plt.xlabel('$\\hat{r}$')
plt.ylabel('$\\hat{b}$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.grid()
plt.subplot(2, 1, 2)
plt.title('dehatspace - indoors?')
plt.plot(r_fit, b_fit, color='blue')
plt.scatter(rb_raw[0], rb_raw[1], color='green')
plt.scatter(r_fit, b_fit, color='red')
for i, ct in enumerate(c_fit):
plt.annotate(str(ct), (r_fit[i], b_fit[i]))
plt.xlabel('$r$')
plt.ylabel('$b$')
"""
optional set axes equal to shortest distance so line really does
looks perpendicular and everybody is happy
"""
# ax = plt.gca()
# ax.set_aspect('equal')
plt.subplots_adjust(hspace=0.5)
plt.grid()
plt.show()
"""
end of plotting code
"""
return(ct_curve, np.round(transverse_pos, 5), np.round(transverse_neg, 5))
"""
obtain greyscale patches and perform alsc colour correction
"""
def get_alsc_patches(Img, colour_cals, grey=True, grid_size=(16, 12)):
"""
get patch centre coordinates, image colour and the actual
patches for each channel, remembering to subtract blacklevel
If grey then only greyscale patches considered
"""
grid_w, grid_h = grid_size
if grey:
cen_coords = Img.cen_coords[3::4]
col = Img.col
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0][3::4] - Img.blacklevel_16
b_patchs = patches[3][3::4] - Img.blacklevel_16
"""
note two green channels are averages
"""
g_patchs = (patches[1][3::4]+patches[2][3::4])/2 - Img.blacklevel_16
else:
cen_coords = Img.cen_coords
col = Img.col
patches = [np.array(Img.patches[i]) for i in Img.order]
r_patchs = patches[0] - Img.blacklevel_16
b_patchs = patches[3] - Img.blacklevel_16
g_patchs = (patches[1]+patches[2])/2 - Img.blacklevel_16
if colour_cals is None:
return r_patchs, b_patchs, g_patchs
"""
find where image colour fits in alsc colour calibration tables
"""
cts = list(colour_cals.keys())
pos = bisect_left(cts, col)
"""
if img colour is below minimum or above maximum alsc calibration colour, simply
pick extreme closest to img colour
"""
if pos % len(cts) == 0:
"""
this works because -0 = 0 = first and -1 = last index
"""
col_tabs = np.array(colour_cals[cts[-pos//len(cts)]])
"""
else, perform linear interpolation between existing alsc colour
calibration tables
"""
else:
bef = cts[pos-1]
aft = cts[pos]
da = col-bef
db = aft-col
bef_tabs = np.array(colour_cals[bef])
aft_tabs = np.array(colour_cals[aft])
col_tabs = (bef_tabs*db + aft_tabs*da)/(da+db)
col_tabs = np.reshape(col_tabs, (2, grid_h, grid_w))
"""
calculate dx, dy used to calculate alsc table
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
make list of pairs of gains for each patch by selecting the correct value
in alsc colour calibration table
"""
patch_gains = []
for cen in cen_coords:
x, y = cen[0]//dx, cen[1]//dy
# We could probably do with some better spatial interpolation here?
col_gains = (col_tabs[0][y][x], col_tabs[1][y][x])
patch_gains.append(col_gains)
"""
multiply the r and b channels in each patch by the respective gain, finally
performing the alsc colour correction
"""
for i, gains in enumerate(patch_gains):
r_patchs[i] = r_patchs[i] * gains[0]
b_patchs[i] = b_patchs[i] * gains[1]
"""
return greyscale patches, g channel and correct r, b channels
"""
return r_patchs, b_patchs, g_patchs
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_visualise.py | """
Some code that will save virtual macbeth charts that show the difference between optimised matrices and non optimised matrices
The function creates an image that is 1550 by 1050 pixels wide, and fills it with patches which are 200x200 pixels in size
Each patch contains the ideal color, the color from the original matrix, and the color from the final matrix
_________________
| |
| Ideal Color |
|_______________|
| Old | new |
| Color | Color |
|_______|_______|
Nice way of showing how the optimisation helps change the colors and the color matricies
"""
import numpy as np
from PIL import Image
def visualise_macbeth_chart(macbeth_rgb, original_rgb, new_rgb, output_filename):
image = np.zeros((1050, 1550, 3), dtype=np.uint8)
colorindex = -1
for y in range(6):
for x in range(4): # Creates 6 x 4 grid of macbeth chart
colorindex += 1
xlocation = 50 + 250 * x # Means there is 50px of black gap between each square, more like the real macbeth chart.
ylocation = 50 + 250 * y
for g in range(200):
for i in range(100):
image[xlocation + i, ylocation + g] = macbeth_rgb[colorindex]
xlocation = 150 + 250 * x
ylocation = 50 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = original_rgb[colorindex] # Smaller squares below to compare the old colors with the new ones
xlocation = 150 + 250 * x
ylocation = 150 + 250 * y
for i in range(100):
for g in range(100):
image[xlocation + i, ylocation + g] = new_rgb[colorindex]
img = Image.fromarray(image, 'RGB')
img.save(str(output_filename) + 'Generated Macbeth Chart.png')
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/colors.py | # Program to convert from RGB to LAB color space
def RGB_to_LAB(RGB): # where RGB is a 1x3 array. e.g RGB = [100, 255, 230]
num = 0
XYZ = [0, 0, 0]
# converted all the three R, G, B to X, Y, Z
X = RGB[0] * 0.4124 + RGB[1] * 0.3576 + RGB[2] * 0.1805
Y = RGB[0] * 0.2126 + RGB[1] * 0.7152 + RGB[2] * 0.0722
Z = RGB[0] * 0.0193 + RGB[1] * 0.1192 + RGB[2] * 0.9505
XYZ[0] = X / 255 * 100
XYZ[1] = Y / 255 * 100 # XYZ Must be in range 0 -> 100, so scale down from 255
XYZ[2] = Z / 255 * 100
XYZ[0] = XYZ[0] / 95.047 # ref_X = 95.047 Observer= 2°, Illuminant= D65
XYZ[1] = XYZ[1] / 100.0 # ref_Y = 100.000
XYZ[2] = XYZ[2] / 108.883 # ref_Z = 108.883
num = 0
for value in XYZ:
if value > 0.008856:
value = value ** (0.3333333333333333)
else:
value = (7.787 * value) + (16 / 116)
XYZ[num] = value
num = num + 1
# L, A, B, values calculated below
L = (116 * XYZ[1]) - 16
a = 500 * (XYZ[0] - XYZ[1])
b = 200 * (XYZ[1] - XYZ[2])
return [L, a, b]
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_cac.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi Ltd
#
# ctt_cac.py - CAC (Chromatic Aberration Correction) tuning tool
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from ctt_dots_locator import find_dots_locations
# This is the wrapper file that creates a JSON entry for you to append
# to your camera tuning file.
# It calculates the chromatic aberration at different points throughout
# the image and uses that to produce a martix that can then be used
# in the camera tuning files to correct this aberration.
def pprint_array(array):
# Function to print the array in a tidier format
array = array
output = ""
for i in range(len(array)):
for j in range(len(array[0])):
output += str(round(array[i, j], 2)) + ", "
# Add the necessary indentation to the array
output += "\n "
# Cut off the end of the array (nicely formats it)
return output[:-22]
def plot_shifts(red_shifts, blue_shifts):
# If users want, they can pass a command line option to show the shifts on a graph
# Can be useful to check that the functions are all working, and that the sample
# images are doing the right thing
Xs = np.array(red_shifts)[:, 0]
Ys = np.array(red_shifts)[:, 1]
Zs = np.array(red_shifts)[:, 2]
Zs2 = np.array(red_shifts)[:, 3]
Zs3 = np.array(blue_shifts)[:, 2]
Zs4 = np.array(blue_shifts)[:, 3]
fig, axs = plt.subplots(2, 2)
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.scatter(Xs, Ys, Zs, cmap=cm.jet, linewidth=0)
ax.set_title('Red X Shift')
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.scatter(Xs, Ys, Zs2, cmap=cm.jet, linewidth=0)
ax.set_title('Red Y Shift')
ax = fig.add_subplot(2, 2, 3, projection='3d')
ax.scatter(Xs, Ys, Zs3, cmap=cm.jet, linewidth=0)
ax.set_title('Blue X Shift')
ax = fig.add_subplot(2, 2, 4, projection='3d')
ax.scatter(Xs, Ys, Zs4, cmap=cm.jet, linewidth=0)
ax.set_title('Blue Y Shift')
fig.tight_layout()
plt.show()
def shifts_to_yaml(red_shift, blue_shift, image_dimensions, output_grid_size=9):
# Convert the shifts to a numpy array for easier handling and initialise other variables
red_shifts = np.array(red_shift)
blue_shifts = np.array(blue_shift)
# create a grid that's smaller than the output grid, which we then interpolate from to get the output values
xrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
xbgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
yrgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
ybgrid = np.zeros((output_grid_size - 1, output_grid_size - 1))
xrsgrid = []
xbsgrid = []
yrsgrid = []
ybsgrid = []
xg = np.zeros((output_grid_size - 1, output_grid_size - 1))
yg = np.zeros((output_grid_size - 1, output_grid_size - 1))
# Format the grids - numpy doesn't work for this, it wants a
# nice uniformly spaced grid, which we don't know if we have yet, hence the rather mundane setup
for x in range(output_grid_size - 1):
xrsgrid.append([])
yrsgrid.append([])
xbsgrid.append([])
ybsgrid.append([])
for y in range(output_grid_size - 1):
xrsgrid[x].append([])
yrsgrid[x].append([])
xbsgrid[x].append([])
ybsgrid[x].append([])
image_size = (image_dimensions[0], image_dimensions[1])
gridxsize = image_size[0] / (output_grid_size - 1)
gridysize = image_size[1] / (output_grid_size - 1)
# Iterate through each dot, and it's shift values and put these into the correct grid location
for red_shift in red_shifts:
xgridloc = int(red_shift[0] / gridxsize)
ygridloc = int(red_shift[1] / gridysize)
xrsgrid[xgridloc][ygridloc].append(red_shift[2])
yrsgrid[xgridloc][ygridloc].append(red_shift[3])
for blue_shift in blue_shifts:
xgridloc = int(blue_shift[0] / gridxsize)
ygridloc = int(blue_shift[1] / gridysize)
xbsgrid[xgridloc][ygridloc].append(blue_shift[2])
ybsgrid[xgridloc][ygridloc].append(blue_shift[3])
# Now calculate the average pixel shift for each square in the grid
for x in range(output_grid_size - 1):
for y in range(output_grid_size - 1):
xrgrid[x, y] = np.mean(xrsgrid[x][y])
yrgrid[x, y] = np.mean(yrsgrid[x][y])
xbgrid[x, y] = np.mean(xbsgrid[x][y])
ybgrid[x, y] = np.mean(ybsgrid[x][y])
# Next, we start to interpolate the central points of the grid that gets passed to the tuning file
input_grids = np.array([xrgrid, yrgrid, xbgrid, ybgrid])
output_grids = np.zeros((4, output_grid_size, output_grid_size))
# Interpolate the centre of the grid
output_grids[:, 1:-1, 1:-1] = (input_grids[:, 1:, :-1] + input_grids[:, 1:, 1:] + input_grids[:, :-1, 1:] + input_grids[:, :-1, :-1]) / 4
# Edge cases:
output_grids[:, 1:-1, 0] = ((input_grids[:, :-1, 0] + input_grids[:, 1:, 0]) / 2 - output_grids[:, 1:-1, 1]) * 2 + output_grids[:, 1:-1, 1]
output_grids[:, 1:-1, -1] = ((input_grids[:, :-1, 7] + input_grids[:, 1:, 7]) / 2 - output_grids[:, 1:-1, -2]) * 2 + output_grids[:, 1:-1, -2]
output_grids[:, 0, 1:-1] = ((input_grids[:, 0, :-1] + input_grids[:, 0, 1:]) / 2 - output_grids[:, 1, 1:-1]) * 2 + output_grids[:, 1, 1:-1]
output_grids[:, -1, 1:-1] = ((input_grids[:, 7, :-1] + input_grids[:, 7, 1:]) / 2 - output_grids[:, -2, 1:-1]) * 2 + output_grids[:, -2, 1:-1]
# Corner Cases:
output_grids[:, 0, 0] = (output_grids[:, 0, 1] - output_grids[:, 1, 1]) + (output_grids[:, 1, 0] - output_grids[:, 1, 1]) + output_grids[:, 1, 1]
output_grids[:, 0, -1] = (output_grids[:, 0, -2] - output_grids[:, 1, -2]) + (output_grids[:, 1, -1] - output_grids[:, 1, -2]) + output_grids[:, 1, -2]
output_grids[:, -1, 0] = (output_grids[:, -1, 1] - output_grids[:, -2, 1]) + (output_grids[:, -2, 0] - output_grids[:, -2, 1]) + output_grids[:, -2, 1]
output_grids[:, -1, -1] = (output_grids[:, -2, -1] - output_grids[:, -2, -2]) + (output_grids[:, -1, -2] - output_grids[:, -2, -2]) + output_grids[:, -2, -2]
# Below, we swap the x and the y coordinates, and also multiply by a factor of -1
# This is due to the PiSP (standard) dimensions being flipped in comparison to
# PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
# and the PiSP block asks for the values it should shift by (hence the * -1, to convert from colour shift to a pixel shift)
output_grid_yr, output_grid_xr, output_grid_yb, output_grid_xb = output_grids * -1
return output_grid_xr, output_grid_yr, output_grid_xb, output_grid_yb
def analyse_dot(dot, dot_location=[0, 0]):
# Scan through the dot, calculate the centroid of each colour channel by doing:
# pixel channel brightness * distance from top left corner
# Sum these, and divide by the sum of each channel's brightnesses to get a centroid for each channel
red_channel = np.array(dot)[:, :, 0]
y_num_pixels = len(red_channel[0])
x_num_pixels = len(red_channel)
yred_weight = np.sum(np.dot(red_channel, np.arange(y_num_pixels)))
xred_weight = np.sum(np.dot(np.arange(x_num_pixels), red_channel))
red_sum = np.sum(red_channel)
green_channel = np.array(dot)[:, :, 1]
ygreen_weight = np.sum(np.dot(green_channel, np.arange(y_num_pixels)))
xgreen_weight = np.sum(np.dot(np.arange(x_num_pixels), green_channel))
green_sum = np.sum(green_channel)
blue_channel = np.array(dot)[:, :, 2]
yblue_weight = np.sum(np.dot(blue_channel, np.arange(y_num_pixels)))
xblue_weight = np.sum(np.dot(np.arange(x_num_pixels), blue_channel))
blue_sum = np.sum(blue_channel)
# We return this structure. It contains 2 arrays that contain:
# the locations of the dot center, along with the channel shifts in the x and y direction:
# [ [red_center_x, red_center_y, red_x_shift, red_y_shift], [blue_center_x, blue_center_y, blue_x_shift, blue_y_shift] ]
return [[int(dot_location[0]) + int(len(dot) / 2), int(dot_location[1]) + int(len(dot[0]) / 2), xred_weight / red_sum - xgreen_weight / green_sum, yred_weight / red_sum - ygreen_weight / green_sum], [dot_location[0] + int(len(dot) / 2), dot_location[1] + int(len(dot[0]) / 2), xblue_weight / blue_sum - xgreen_weight / green_sum, yblue_weight / blue_sum - ygreen_weight / green_sum]]
def cac(Cam):
filelist = Cam.imgs_cac
Cam.log += '\nCAC analysing files: {}'.format(str(filelist))
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
# Create arrays to hold all the dots data and their colour offsets
red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
blue_shift = []
# Iterate through the files
# Multiple files is reccomended to average out the lens aberration through rotations
for file in filelist:
Cam.log += '\nCAC processing file'
print("\n Processing file")
# Read the raw RGB values
rgb = file.rgb
image_size = [file.h, file.w] # Image size, X, Y
# Create a colour copy of the RGB values to use later in the calibration
imout = Image.new(mode="RGB", size=image_size)
rgb_image = np.array(imout)
# The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
rgb.reshape((image_size[0], image_size[1], 3))
rgb_image = rgb
# Pass the RGB image through to the dots locating program
# Returns an array of the dots (colour rectangles around the dots), and an array of their locations
print("Finding dots")
Cam.log += '\nFinding dots'
dots, dots_locations = find_dots_locations(rgb_image)
# Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
# by how far the chromatic aberration has shifted each channel
Cam.log += '\nDots found: {}'.format(str(len(dots)))
print('Dots found: ' + str(len(dots)))
for dot, dot_location in zip(dots, dots_locations):
if len(dot) > 0:
if (dot_location[0] > 0) and (dot_location[1] > 0):
ret = analyse_dot(dot, dot_location)
red_shift.append(ret[0])
blue_shift.append(ret[1])
# Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
# for the CAC block to handle and then store these as a .json file to be added to the camera
# tuning file
print("\nCreating output grid")
Cam.log += '\nCreating output grid'
rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
print("CAC correction complete!")
Cam.log += '\nCAC correction complete!'
# Give the JSON dict back to the main ctt program
return {"strength": 1.0, "lut_rx": list(rx.round(2).reshape(81)), "lut_ry": list(ry.round(2).reshape(81)), "lut_bx": list(bx.round(2).reshape(81)), "lut_by": list(by.round(2).reshape(81))}
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_config_example.json | {
"disable": [],
"plot": [],
"alsc": {
"do_alsc_colour": 1,
"luminance_strength": 0.8,
"max_gain": 8.0
},
"awb": {
"greyworld": 0
},
"blacklevel": -1,
"macbeth": {
"small": 0,
"show": 0
}
}
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/convert_tuning.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Script to convert version 1.0 Raspberry Pi camera tuning files to version 2.0.
#
# Copyright 2022 Raspberry Pi Ltd
import argparse
import json
import numpy as np
import sys
from ctt_pretty_print_json import pretty_print
from ctt_pisp import grid_size as grid_size_pisp
from ctt_pisp import json_template as json_template_pisp
from ctt_vc4 import grid_size as grid_size_vc4
from ctt_vc4 import json_template as json_template_vc4
def interp_2d(in_ls, src_w, src_h, dst_w, dst_h):
out_ls = np.zeros((dst_h, dst_w))
for i in range(src_h):
out_ls[i] = np.interp(np.linspace(0, dst_w - 1, dst_w),
np.linspace(0, dst_w - 1, src_w),
in_ls[i])
for i in range(dst_w):
out_ls[:,i] = np.interp(np.linspace(0, dst_h - 1, dst_h),
np.linspace(0, dst_h - 1, src_h),
out_ls[:src_h, i])
return out_ls
def convert_target(in_json: dict, target: str):
src_w, src_h = grid_size_pisp if target == 'vc4' else grid_size_vc4
dst_w, dst_h = grid_size_vc4 if target == 'vc4' else grid_size_pisp
json_template = json_template_vc4 if target == 'vc4' else json_template_pisp
# ALSC grid sizes
alsc = next(algo for algo in in_json['algorithms'] if 'rpi.alsc' in algo)['rpi.alsc']
for colour in ['calibrations_Cr', 'calibrations_Cb']:
if colour not in alsc:
continue
for temperature in alsc[colour]:
in_ls = np.reshape(temperature['table'], (src_h, src_w))
out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
temperature['table'] = np.round(out_ls.flatten(), 3).tolist()
if 'luminance_lut' in alsc:
in_ls = np.reshape(alsc['luminance_lut'], (src_h, src_w))
out_ls = interp_2d(in_ls, src_w, src_h, dst_w, dst_h)
alsc['luminance_lut'] = np.round(out_ls.flatten(), 3).tolist()
# Denoise blocks
for i, algo in enumerate(in_json['algorithms']):
if list(algo.keys())[0] == 'rpi.sdn':
in_json['algorithms'][i] = {'rpi.denoise': json_template['rpi.sdn'] if target == 'vc4' else json_template['rpi.denoise']}
break
# AGC mode weights
agc = next(algo for algo in in_json['algorithms'] if 'rpi.agc' in algo)['rpi.agc']
if 'channels' in agc:
for i, channel in enumerate(agc['channels']):
target_agc_metering = json_template['rpi.agc']['channels'][i]['metering_modes']
for mode, v in channel['metering_modes'].items():
v['weights'] = target_agc_metering[mode]['weights']
else:
for mode, v in agc["metering_modes"].items():
target_agc_metering = json_template['rpi.agc']['channels'][0]['metering_modes']
v['weights'] = target_agc_metering[mode]['weights']
# HDR
if target == 'pisp':
for i, algo in enumerate(in_json['algorithms']):
if list(algo.keys())[0] == 'rpi.hdr':
in_json['algorithms'][i] = {'rpi.hdr': json_template['rpi.hdr']}
return in_json
def convert_v2(in_json: dict, target: str) -> str:
if 'version' in in_json.keys() and in_json['version'] == 1.0:
converted = {
'version': 2.0,
'target': target,
'algorithms': [{algo: config} for algo, config in in_json.items()]
}
else:
converted = in_json
# Convert between vc4 <-> pisp targets. This is a best effort thing.
if converted['target'] != target:
converted = convert_target(converted, target)
converted['target'] = target
grid_size = grid_size_vc4[0] if target == 'vc4' else grid_size_pisp[0]
return pretty_print(converted, custom_elems={'table': grid_size, 'luminance_lut': grid_size})
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Convert the format of the Raspberry Pi camera tuning file from v1.0 to v2.0 and/or the vc4 <-> pisp targets.\n')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('-t', '--target', type=str, help='Target platform.',
choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
args = parser.parse_args()
with open(args.input, 'r') as f:
in_json = json.load(f)
out_json = convert_v2(in_json, args.target)
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_alsc.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for ALSC (auto lens shading correction)
from ctt_image_load import *
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
"""
preform alsc calibration on a set of images
"""
def alsc_all(Cam, do_alsc_colour, plot, grid_size=(16, 12), max_gain=8.0):
imgs_alsc = Cam.imgs_alsc
grid_w, grid_h = grid_size
"""
create list of colour temperatures and associated calibration tables
"""
list_col = []
list_cr = []
list_cb = []
list_cg = []
for Img in imgs_alsc:
col, cr, cb, cg, size = alsc(Cam, Img, do_alsc_colour, plot, grid_size=grid_size, max_gain=max_gain)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cg.append(cg)
Cam.log += '\n'
Cam.log += '\nFinished processing images'
w, h, dx, dy = size
Cam.log += '\nChannel dimensions: w = {} h = {}'.format(int(w), int(h))
Cam.log += '\n16x12 grid rectangle size: w = {} h = {}'.format(dx, dy)
"""
convert to numpy array for data manipulation
"""
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cg = np.array(list_cg)
cal_cr_list = []
cal_cb_list = []
"""
only do colour calculations if required
"""
if do_alsc_colour:
Cam.log += '\nALSC colour tables'
for ct in sorted(set(list_col)):
Cam.log += '\nColour temperature: {} K'.format(ct)
"""
average tables for the same colour temperature
"""
indices = np.where(list_col == ct)
ct = int(ct)
t_r = np.mean(list_cr[indices], axis=0)
t_b = np.mean(list_cb[indices], axis=0)
"""
force numbers to be stored to 3dp.... :(
"""
t_r = np.where((100*t_r) % 1 <= 0.05, t_r+0.001, t_r)
t_b = np.where((100*t_b) % 1 <= 0.05, t_b+0.001, t_b)
t_r = np.where((100*t_r) % 1 >= 0.95, t_r-0.001, t_r)
t_b = np.where((100*t_b) % 1 >= 0.95, t_b-0.001, t_b)
t_r = np.round(t_r, 3)
t_b = np.round(t_b, 3)
r_corners = (t_r[0], t_r[grid_w - 1], t_r[-1], t_r[-grid_w])
b_corners = (t_b[0], t_b[grid_w - 1], t_b[-1], t_b[-grid_w])
middle_pos = (grid_h // 2 - 1) * grid_w + grid_w - 1
r_cen = t_r[middle_pos]+t_r[middle_pos + 1]+t_r[middle_pos + grid_w]+t_r[middle_pos + grid_w + 1]
r_cen = round(r_cen/4, 3)
b_cen = t_b[middle_pos]+t_b[middle_pos + 1]+t_b[middle_pos + grid_w]+t_b[middle_pos + grid_w + 1]
b_cen = round(b_cen/4, 3)
Cam.log += '\nRed table corners: {}'.format(r_corners)
Cam.log += '\nRed table centre: {}'.format(r_cen)
Cam.log += '\nBlue table corners: {}'.format(b_corners)
Cam.log += '\nBlue table centre: {}'.format(b_cen)
cr_dict = {
'ct': ct,
'table': list(t_r)
}
cb_dict = {
'ct': ct,
'table': list(t_b)
}
cal_cr_list.append(cr_dict)
cal_cb_list.append(cb_dict)
Cam.log += '\n'
else:
cal_cr_list, cal_cb_list = None, None
"""
average all values for luminance shading and return one table for all temperatures
"""
lum_lut = np.mean(list_cg, axis=0)
lum_lut = np.where((100*lum_lut) % 1 <= 0.05, lum_lut+0.001, lum_lut)
lum_lut = np.where((100*lum_lut) % 1 >= 0.95, lum_lut-0.001, lum_lut)
lum_lut = list(np.round(lum_lut, 3))
"""
calculate average corner for lsc gain calculation further on
"""
corners = (lum_lut[0], lum_lut[15], lum_lut[-1], lum_lut[-16])
Cam.log += '\nLuminance table corners: {}'.format(corners)
l_cen = lum_lut[5*16+7]+lum_lut[5*16+8]+lum_lut[6*16+7]+lum_lut[6*16+8]
l_cen = round(l_cen/4, 3)
Cam.log += '\nLuminance table centre: {}'.format(l_cen)
av_corn = np.sum(corners)/4
return cal_cr_list, cal_cb_list, lum_lut, av_corn
"""
calculate g/r and g/b for 32x32 points arranged in a grid for a single image
"""
def alsc(Cam, Img, do_alsc_colour, plot=False, grid_size=(16, 12), max_gain=8.0):
Cam.log += '\nProcessing image: ' + Img.name
grid_w, grid_h = grid_size
"""
get channel in correct order
"""
channels = [Img.channels[i] for i in Img.order]
"""
calculate size of single rectangle.
-(-(w-1)//32) is a ceiling division. w-1 is to deal robustly with the case
where w is a multiple of 32.
"""
w, h = Img.w/2, Img.h/2
dx, dy = int(-(-(w-1)//grid_w)), int(-(-(h-1)//grid_h))
"""
average the green channels into one
"""
av_ch_g = np.mean((channels[1:3]), axis=0)
if do_alsc_colour:
"""
obtain grid_w x grid_h grid of intensities for each channel and subtract black level
"""
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
r = get_grid(channels[0], dx, dy, grid_size) - Img.blacklevel_16
b = get_grid(channels[3], dx, dy, grid_size) - Img.blacklevel_16
"""
calculate ratios as 32 bit in order to be supported by medianBlur function
"""
cr = np.reshape(g/r, (grid_h, grid_w)).astype('float32')
cb = np.reshape(g/b, (grid_h, grid_w)).astype('float32')
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
"""
median blur to remove peaks and save as float 64
"""
cr = cv2.medianBlur(cr, 3).astype('float64')
cr = cr/np.min(cr) # gain tables are easier for humans to read if the minimum is 1.0
cb = cv2.medianBlur(cb, 3).astype('float64')
cb = cb/np.min(cb)
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
"""
debugging code showing 2D surface plot of vignetting. Quite useful for
for sanity check
"""
if plot:
hf = plt.figure(figsize=(8, 8))
ha = hf.add_subplot(311, projection='3d')
"""
note Y is plotted as -Y so plot has same axes as image
"""
X, Y = np.meshgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cr, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot\nImg: {}\n\ncr'.format(Img.str))
hb = hf.add_subplot(312, projection='3d')
hb.plot_surface(X, -Y, cb, cmap=cm.coolwarm, linewidth=0)
hb.set_title('cb')
hc = hf.add_subplot(313, projection='3d')
hc.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
hc.set_title('g')
# print(Img.str)
plt.show()
return Img.col, cr.flatten(), cb.flatten(), cg, (w, h, dx, dy)
else:
"""
only perform calculations for luminance shading
"""
g = get_grid(av_ch_g, dx, dy, grid_size) - Img.blacklevel_16
cg = np.reshape(1/g, (grid_h, grid_w)).astype('float32')
cg = cv2.medianBlur(cg, 3).astype('float64')
cg = cg/np.min(cg)
cg = [min(v, max_gain) for v in cg.flatten()] # never exceed the max luminance gain
if plot:
hf = plt.figure(figssize=(8, 8))
ha = hf.add_subplot(1, 1, 1, projection='3d')
X, Y = np.meashgrid(range(grid_w), range(grid_h))
ha.plot_surface(X, -Y, cg, cmap=cm.coolwarm, linewidth=0)
ha.set_title('ALSC Plot (Luminance only!)\nImg: {}\n\ncg').format(Img.str)
plt.show()
return Img.col, None, None, cg.flatten(), (w, h, dx, dy)
"""
Compresses channel down to a grid of the requested size
"""
def get_grid(chan, dx, dy, grid_size):
grid_w, grid_h = grid_size
grid = []
"""
since left and bottom border will not necessarily have rectangles of
dimension dx x dy, the 32nd iteration has to be handled separately.
"""
for i in range(grid_h - 1):
for j in range(grid_w - 1):
grid.append(np.mean(chan[dy*i:dy*(1+i), dx*j:dx*(1+j)]))
grid.append(np.mean(chan[dy*i:dy*(1+i), (grid_w - 1)*dx:]))
for j in range(grid_w - 1):
grid.append(np.mean(chan[(grid_h - 1)*dy:, dx*j:dx*(1+j)]))
grid.append(np.mean(chan[(grid_h - 1)*dy:, (grid_w - 1)*dx:]))
"""
return as np.array, ready for further manipulation
"""
return np.array(grid)
"""
obtains sigmas for red and blue, effectively a measure of the 'error'
"""
def get_sigma(Cam, cal_cr_list, cal_cb_list, grid_size):
Cam.log += '\nCalculating sigmas'
"""
provided colour alsc tables were generated for two different colour
temperatures sigma is calculated by comparing two calibration temperatures
adjacent in colour space
"""
"""
create list of colour temperatures
"""
cts = [cal['ct'] for cal in cal_cr_list]
# print(cts)
"""
calculate sigmas for each adjacent cts and return worst one
"""
sigma_rs = []
sigma_bs = []
for i in range(len(cts)-1):
sigma_rs.append(calc_sigma(cal_cr_list[i]['table'], cal_cr_list[i+1]['table'], grid_size))
sigma_bs.append(calc_sigma(cal_cb_list[i]['table'], cal_cb_list[i+1]['table'], grid_size))
Cam.log += '\nColour temperature interval {} - {} K'.format(cts[i], cts[i+1])
Cam.log += '\nSigma red: {}'.format(sigma_rs[-1])
Cam.log += '\nSigma blue: {}'.format(sigma_bs[-1])
"""
return maximum sigmas, not necessarily from the same colour temperature
interval
"""
sigma_r = max(sigma_rs) if sigma_rs else 0.005
sigma_b = max(sigma_bs) if sigma_bs else 0.005
Cam.log += '\nMaximum sigmas: Red = {} Blue = {}'.format(sigma_r, sigma_b)
# print(sigma_rs, sigma_bs)
# print(sigma_r, sigma_b)
return sigma_r, sigma_b
"""
calculate sigma from two adjacent gain tables
"""
def calc_sigma(g1, g2, grid_size):
grid_w, grid_h = grid_size
"""
reshape into 16x12 matrix
"""
g1 = np.reshape(g1, (grid_h, grid_w))
g2 = np.reshape(g2, (grid_h, grid_w))
"""
apply gains to gain table
"""
gg = g1/g2
if np.mean(gg) < 1:
gg = 1/gg
"""
for each internal patch, compute average difference between it and its 4
neighbours, then append to list
"""
diffs = []
for i in range(grid_h - 2):
for j in range(grid_w - 2):
"""
note indexing is incremented by 1 since all patches on borders are
not counted
"""
diff = np.abs(gg[i+1][j+1]-gg[i][j+1])
diff += np.abs(gg[i+1][j+1]-gg[i+2][j+1])
diff += np.abs(gg[i+1][j+1]-gg[i+1][j])
diff += np.abs(gg[i+1][j+1]-gg[i+1][j+2])
diffs.append(diff/4)
"""
return mean difference
"""
mean_diff = np.mean(diffs)
return(np.round(mean_diff, 5))
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/cac_only.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi (Trading) Ltd.
#
# cac_only.py - cac tuning tool
# This file allows you to tune only the chromatic aberration correction
# Specify any number of files in the command line args, and it shall iterate through
# and generate an averaged cac table from all the input images, which you can then
# input into your tuning file.
# Takes .dng files produced by the camera modules of the dots grid and calculates the chromatic abberation of each dot.
# Then takes each dot, and works out where it was in the image, and uses that to output a tables of the shifts
# across the whole image.
from PIL import Image
import numpy as np
import rawpy
import sys
import getopt
from ctt_cac import *
def cac(filelist, output_filepath, plot_results=False):
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
# Create arrays to hold all the dots data and their colour offsets
red_shift = [] # Format is: [[Dot Center X, Dot Center Y, x shift, y shift]]
blue_shift = []
# Iterate through the files
# Multiple files is reccomended to average out the lens aberration through rotations
for file in filelist:
print("\n Processing file " + str(file))
# Read the raw RGB values from the .dng file
with rawpy.imread(file) as raw:
rgb = raw.postprocess()
sizes = (raw.sizes)
image_size = [sizes[2], sizes[3]] # Image size, X, Y
# Create a colour copy of the RGB values to use later in the calibration
imout = Image.new(mode="RGB", size=image_size)
rgb_image = np.array(imout)
# The rgb values need reshaping from a 1d array to a 3d array to be worked with easily
rgb.reshape((image_size[0], image_size[1], 3))
rgb_image = rgb
# Pass the RGB image through to the dots locating program
# Returns an array of the dots (colour rectangles around the dots), and an array of their locations
print("Finding dots")
dots, dots_locations = find_dots_locations(rgb_image)
# Now, analyse each dot. Work out the centroid of each colour channel, and use that to work out
# by how far the chromatic aberration has shifted each channel
print('Dots found: ' + str(len(dots)))
for dot, dot_location in zip(dots, dots_locations):
if len(dot) > 0:
if (dot_location[0] > 0) and (dot_location[1] > 0):
ret = analyse_dot(dot, dot_location)
red_shift.append(ret[0])
blue_shift.append(ret[1])
# Take our arrays of red shifts and locations, push them through to be interpolated into a 9x9 matrix
# for the CAC block to handle and then store these as a .json file to be added to the camera
# tuning file
print("\nCreating output grid")
rx, ry, bx, by = shifts_to_yaml(red_shift, blue_shift, image_size)
print("CAC correction complete!")
# The json format that we then paste into the tuning file (manually)
sample = '''
{
"rpi.cac" :
{
"strength": 1.0,
"lut_rx" : [
rx_vals
],
"lut_ry" : [
ry_vals
],
"lut_bx" : [
bx_vals
],
"lut_by" : [
by_vals
]
}
}
'''
# Below, may look incorrect, however, the PiSP (standard) dimensions are flipped in comparison to
# PIL image coordinate directions, hence why xr -> yr. Also, the shifts calculated are colour shifts,
# and the PiSP block asks for the values it should shift (hence the * -1, to convert from colour shift to a pixel shift)
sample = sample.replace("rx_vals", pprint_array(ry * -1))
sample = sample.replace("ry_vals", pprint_array(rx * -1))
sample = sample.replace("bx_vals", pprint_array(by * -1))
sample = sample.replace("by_vals", pprint_array(bx * -1))
print("Successfully converted to JSON")
f = open(str(output_filepath), "w+")
f.write(sample)
f.close()
print("Successfully written to json file")
'''
If you wish to see a plot of the colour channel shifts, add the -p or --plots option
Can be a quick way of validating if the data/dots you've got are good, or if you need to
change some parameters/take some better images
'''
if plot_results:
plot_shifts(red_shift, blue_shift)
if __name__ == "__main__":
argv = sys.argv
# Detect the input and output file paths
arg_output = "output.json"
arg_help = "{0} -i <input> -o <output> -p <plot results>".format(argv[0])
opts, args = getopt.getopt(argv[1:], "hi:o:p", ["help", "input=", "output=", "plot"])
output_location = 0
input_location = 0
filelist = []
plot_results = False
for i in range(len(argv)):
if ("-h") in argv[i]:
print(arg_help) # print the help message
sys.exit(2)
if "-o" in argv[i]:
output_location = i
if ".dng" in argv[i]:
filelist.append(argv[i])
if "-p" in argv[i]:
plot_results = True
arg_output = argv[output_location + 1]
cac(filelist, arg_output, plot_results)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_ransac.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool RANSAC selector for Macbeth chart locator
import numpy as np
scale = 2
"""
constructs normalised macbeth chart corners for ransac algorithm
"""
def get_square_verts(c_err=0.05, scale=scale):
"""
define macbeth chart corners
"""
b_bord_x, b_bord_y = scale*8.5, scale*13
s_bord = 6*scale
side = 41*scale
x_max = side*6 + 5*s_bord + 2*b_bord_x
y_max = side*4 + 3*s_bord + 2*b_bord_y
c1 = (0, 0)
c2 = (0, y_max)
c3 = (x_max, y_max)
c4 = (x_max, 0)
mac_norm = np.array((c1, c2, c3, c4), np.float32)
mac_norm = np.array([mac_norm])
square_verts = []
square_0 = np.array(((0, 0), (0, side),
(side, side), (side, 0)), np.float32)
offset_0 = np.array((b_bord_x, b_bord_y), np.float32)
c_off = side * c_err
offset_cont = np.array(((c_off, c_off), (c_off, -c_off),
(-c_off, -c_off), (-c_off, c_off)), np.float32)
square_0 += offset_0
square_0 += offset_cont
"""
define macbeth square corners
"""
for i in range(6):
shift_i = np.array(((i*side, 0), (i*side, 0),
(i*side, 0), (i*side, 0)), np.float32)
shift_bord = np.array(((i*s_bord, 0), (i*s_bord, 0),
(i*s_bord, 0), (i*s_bord, 0)), np.float32)
square_i = square_0 + shift_i + shift_bord
for j in range(4):
shift_j = np.array(((0, j*side), (0, j*side),
(0, j*side), (0, j*side)), np.float32)
shift_bord = np.array(((0, j*s_bord),
(0, j*s_bord), (0, j*s_bord),
(0, j*s_bord)), np.float32)
square_j = square_i + shift_j + shift_bord
square_verts.append(square_j)
# print('square_verts')
# print(square_verts)
return np.array(square_verts, np.float32), mac_norm
def get_square_centres(c_err=0.05, scale=scale):
"""
define macbeth square centres
"""
verts, mac_norm = get_square_verts(c_err, scale=scale)
centres = np.mean(verts, axis=1)
# print('centres')
# print(centres)
return np.array(centres, np.float32)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_pisp.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_pisp.py - camera tuning tool data for PiSP platforms
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.dpc": {
"strength": 1
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.denoise":
{
"normal":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 0.8,
"threshold": 0.05
}
},
"hdr":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 1.3,
"threshold": 0.1
}
},
"night":
{
"sdn":
{
"deviation": 1.6,
"strength": 0.5,
"deviation2": 3.2,
"deviation_no_tdn": 3.2,
"strength_no_tdn": 0.75
},
"cdn":
{
"deviation": 200,
"strength": 0.3
},
"tdn":
{
"deviation": 1.3,
"threshold": 0.1
}
}
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 7700},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8000}
},
"bayes": 1
},
"rpi.agc":
{
"channels":
[
{
"comment": "Channel 0 is normal AGC",
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 10000, 30000, 60000, 66666 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
},
"short":
{
"shutter": [ 100, 5000, 10000, 20000, 60000 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0 ]
},
"long":
{
"shutter": [ 100, 10000, 30000, 60000, 90000, 120000 ],
"gain": [ 1.0, 1.5, 2.0, 4.0, 8.0, 12.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.8,
1000, 0.8
]
},
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.5,
"y_target":
[
0, 0.17,
1000, 0.17
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 1 is the HDR short channel",
"desaturate": 0,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
},
"short":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
},
"long":
{
"shutter": [ 100, 20000, 60000 ],
"gain": [ 1.0, 1.0, 1.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.95,
"q_hi": 1.0,
"y_target":
[
0, 0.7,
1000, 0.7
]
},
{
"bound": "LOWER",
"q_lo": 0.0,
"q_hi": 0.2,
"y_target":
[
0, 0.002,
1000, 0.002
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 2 is the HDR long channel",
"desaturate": 0,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
},
"short":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
},
"long":
{
"shutter": [ 100, 20000, 30000, 60000 ],
"gain": [ 1.0, 2.0, 4.0, 8.0 ]
}
},
"constraint_modes":
{
"normal": [
],
"highlight": [
],
"shadows": [
]
},
"channel_constraints":
[
{
"bound": "UPPER",
"channel": 4,
"factor": 8
},
{
"bound": "LOWER",
"channel": 4,
"factor": 2
}
],
"y_target":
[
0, 0.16,
1000, 0.165,
10000, 0.17
]
},
{
"comment": "Channel 3 is the night mode channel",
"base_ev": 0.33,
"metering_modes":
{
"centre-weighted":
{
"weights":
[
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 4, 4, 4, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 3, 3, 3, 4, 3, 3, 3, 2, 2, 1, 1,
1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 1, 1,
1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1,
1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1,
0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0
]
},
"spot":
{
"weights":
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 2, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
},
"matrix":
{
"weights":
[
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
}
},
"exposure_modes":
{
"normal":
{
"shutter": [ 100, 20000, 66666 ],
"gain": [ 1.0, 2.0, 4.0 ]
},
"short":
{
"shutter": [ 100, 20000, 33333 ],
"gain": [ 1.0, 2.0, 4.0 ]
},
"long":
{
"shutter": [ 100, 20000, 66666, 120000 ],
"gain": [ 1.0, 2.0, 4.0, 4.0 ]
}
},
"constraint_modes":
{
"normal": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
],
"highlight": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
},
{
"bound": "UPPER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.8,
1000, 0.8
]
}
],
"shadows": [
{
"bound": "LOWER",
"q_lo": 0.98,
"q_hi": 1.0,
"y_target":
[
0, 0.5,
1000, 0.5
]
}
]
},
"y_target":
[
0, 0.16,
1000, 0.16,
10000, 0.17
]
}
]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.8,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.cac": {
},
"rpi.sharpen": {
"threshold": 0.25,
"limit": 1.0,
"strength": 1.0
},
"rpi.hdr":
{
"Off":
{
"cadence": [ 0 ]
},
"MultiExposureUnmerged":
{
"cadence": [ 1, 2 ],
"channel_map": { "short": 1, "long": 2 }
},
"SingleExposure":
{
"cadence": [1],
"channel_map": { "short": 1 },
"spatial_gain": 2.0,
"tonemap_enable": 1
},
"MultiExposure":
{
"cadence": [1, 2],
"channel_map": { "short": 1, "long": 2 },
"stitch_enable": 1,
"spatial_gain": 2.0,
"tonemap_enable": 1
},
"Night":
{
"cadence": [ 3 ],
"channel_map": { "night": 3 },
"tonemap_enable": 1,
"tonemap":
[
0, 0,
5000, 20000,
10000, 30000,
20000, 47000,
30000, 55000,
65535, 65535
]
}
}
}
grid_size = (32, 32)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_lux.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for lux level
from ctt_tools import *
"""
Find lux values from metadata and calculate Y
"""
def lux(Cam, Img):
shutter_speed = Img.exposure
gain = Img.againQ8_norm
aperture = 1
Cam.log += '\nShutter speed = {}'.format(shutter_speed)
Cam.log += '\nGain = {}'.format(gain)
Cam.log += '\nAperture = {}'.format(aperture)
patches = [Img.patches[i] for i in Img.order]
channels = [Img.channels[i] for i in Img.order]
return lux_calc(Cam, Img, patches, channels), shutter_speed, gain
"""
perform lux calibration on bayer channels
"""
def lux_calc(Cam, Img, patches, channels):
"""
find means color channels on grey patches
"""
ap_r = np.mean(patches[0][3::4])
ap_g = (np.mean(patches[1][3::4])+np.mean(patches[2][3::4]))/2
ap_b = np.mean(patches[3][3::4])
Cam.log += '\nAverage channel values on grey patches:'
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(ap_r, ap_b, ap_g)
# print(ap_r, ap_g, ap_b)
"""
calculate channel gains
"""
gr = ap_g/ap_r
gb = ap_g/ap_b
Cam.log += '\nChannel gains: Red = {:.3f} Blue = {:.3f}'.format(gr, gb)
"""
find means color channels on image and scale by gain
note greens are averaged together (treated as one channel)
"""
a_r = np.mean(channels[0])*gr
a_g = (np.mean(channels[1])+np.mean(channels[2]))/2
a_b = np.mean(channels[3])*gb
Cam.log += '\nAverage channel values over entire image scaled by channel gains:'
Cam.log += '\nRed = {:.0f} Green = {:.0f} Blue = {:.0f}'.format(a_r, a_b, a_g)
# print(a_r, a_g, a_b)
"""
Calculate y with top row of yuv matrix
"""
y = 0.299*a_r + 0.587*a_g + 0.114*a_b
Cam.log += '\nY value calculated: {}'.format(int(y))
# print(y)
return int(y)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/alsc_only.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2022, Raspberry Pi Ltd
#
# alsc tuning tool
import sys
from ctt import *
from ctt_tools import parse_input
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Lens Shading Camera Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output, target = parse_input()
if target == 'pisp':
from ctt_pisp import json_template, grid_size
elif target == 'vc4':
from ctt_vc4 import json_template, grid_size
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=True)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool
import os
import sys
from ctt_image_load import *
from ctt_cac import *
from ctt_ccm import *
from ctt_awb import *
from ctt_alsc import *
from ctt_lux import *
from ctt_noise import *
from ctt_geq import *
from ctt_pretty_print_json import pretty_print
import random
import json
import re
"""
This file houses the camera object, which is used to perform the calibrations.
The camera object houses all the calibration images as attributes in three lists:
- imgs (macbeth charts)
- imgs_alsc (alsc correction images)
- imgs_cac (cac correction images)
Various calibrations are methods of the camera object, and the output is stored
in a dictionary called self.json.
Once all the caibration has been completed, the Camera.json is written into a
json file.
The camera object initialises its json dictionary by reading from a pre-written
blank json file. This has been done to avoid reproducing the entire json file
in the code here, thereby avoiding unecessary clutter.
"""
"""
Get the colour and lux values from the strings of each inidvidual image
"""
def get_col_lux(string):
"""
Extract colour and lux values from filename
"""
col = re.search(r'([0-9]+)[kK](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
lux = re.search(r'([0-9]+)[lL](\.(jpg|jpeg|brcm|dng)|_.*\.(jpg|jpeg|brcm|dng))$', string)
try:
col = col.group(1)
except AttributeError:
"""
Catch error if images labelled incorrectly and pass reasonable defaults
"""
return None, None
try:
lux = lux.group(1)
except AttributeError:
"""
Catch error if images labelled incorrectly and pass reasonable defaults
Still returns colour if that has been found.
"""
return col, None
return int(col), int(lux)
"""
Camera object that is the backbone of the tuning tool.
Input is the desired path of the output json.
"""
class Camera:
def __init__(self, jfile, json):
self.path = os.path.dirname(os.path.expanduser(__file__)) + '/'
if self.path == '/':
self.path = ''
self.imgs = []
self.imgs_alsc = []
self.imgs_cac = []
self.log = 'Log created : ' + time.asctime(time.localtime(time.time()))
self.log_separator = '\n'+'-'*70+'\n'
self.jf = jfile
"""
initial json dict populated by uncalibrated values
"""
self.json = json
"""
Perform colour correction calibrations by comparing macbeth patch colours
to standard macbeth chart colours.
"""
def ccm_cal(self, do_alsc_colour, grid_size):
if 'rpi.ccm' in self.disable:
return 1
print('\nStarting CCM calibration')
self.log_new_sec('CCM')
"""
if image is greyscale then CCm makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do CCM on greyscale image!')
self.log += '\nERROR: Cannot perform CCM calibration '
self.log += 'on greyscale image!\nCCM aborted!'
del self.json['rpi.ccm']
return 0
a = time.time()
"""
Check if alsc tables have been generated, if not then do ccm without
alsc
"""
if ("rpi.alsc" not in self.disable) and do_alsc_colour:
"""
case where ALSC colour has been done, so no errors should be
expected...
"""
try:
cal_cr_list = self.json['rpi.alsc']['calibrations_Cr']
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
self.log += '\nALSC tables found successfully'
except KeyError:
cal_cr_list, cal_cb_list = None, None
print('WARNING! No ALSC tables found for CCM!')
print('Performing CCM calibrations without ALSC correction...')
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
self.log += 'performed without ALSC correction...'
else:
"""
case where config options result in CCM done without ALSC colour tables
"""
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nCCM calibration '
self.log += 'performed without ALSC correction...'
"""
Do CCM calibration
"""
try:
ccms = ccm(self, cal_cr_list, cal_cb_list, grid_size)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
self.log += '\nCCM aborted!'
return 1
"""
Write output to json
"""
self.json['rpi.ccm']['ccms'] = ccms
self.log += '\nCCM calibration written to json file'
print('Finished CCM calibration')
"""
Perform chromatic abberation correction using multiple dots images.
"""
def cac_cal(self, do_alsc_colour):
if 'rpi.cac' in self.disable:
return 1
print('\nStarting CAC calibration')
self.log_new_sec('CAC')
"""
check if cac images have been taken
"""
if len(self.imgs_cac) == 0:
print('\nError:\nNo cac calibration images found')
self.log += '\nERROR: No CAC calibration images found!'
self.log += '\nCAC calibration aborted!'
return 1
"""
if image is greyscale then CAC makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do CAC on greyscale image!')
self.log += '\nERROR: Cannot perform CAC calibration '
self.log += 'on greyscale image!\nCAC aborted!'
del self.json['rpi.cac']
return 0
a = time.time()
"""
Check if camera is greyscale or color. If not greyscale, then perform cac
"""
if do_alsc_colour:
"""
Here we have a color sensor. Perform cac
"""
try:
cacs = cac(self)
except ArithmeticError:
print('ERROR: Matrix is singular!\nTake new pictures and try again...')
self.log += '\nERROR: Singular matrix encountered during fit!'
self.log += '\nCAC aborted!'
return 1
else:
"""
case where config options suggest greyscale camera. No point in doing CAC
"""
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nCAC calibration '
self.log += 'performed without ALSC correction...'
"""
Write output to json
"""
self.json['rpi.cac']['cac'] = cacs
self.log += '\nCAC calibration written to json file'
print('Finished CAC calibration')
"""
Auto white balance calibration produces a colour curve for
various colour temperatures, as well as providing a maximum 'wiggle room'
distance from this curve (transverse_neg/pos).
"""
def awb_cal(self, greyworld, do_alsc_colour, grid_size):
if 'rpi.awb' in self.disable:
return 1
print('\nStarting AWB calibration')
self.log_new_sec('AWB')
"""
if image is greyscale then AWB makes no sense
"""
if self.grey:
print('\nERROR: Can\'t do AWB on greyscale image!')
self.log += '\nERROR: Cannot perform AWB calibration '
self.log += 'on greyscale image!\nAWB aborted!'
del self.json['rpi.awb']
return 0
"""
optional set greyworld (e.g. for noir cameras)
"""
if greyworld:
self.json['rpi.awb']['bayes'] = 0
self.log += '\nGreyworld set'
"""
Check if alsc tables have been generated, if not then do awb without
alsc correction
"""
if ("rpi.alsc" not in self.disable) and do_alsc_colour:
try:
cal_cr_list = self.json['rpi.alsc']['calibrations_Cr']
cal_cb_list = self.json['rpi.alsc']['calibrations_Cb']
self.log += '\nALSC tables found successfully'
except KeyError:
cal_cr_list, cal_cb_list = None, None
print('ERROR, no ALSC calibrations found for AWB')
print('Performing AWB without ALSC tables')
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
self.log += 'performed without ALSC correction...'
else:
cal_cr_list, cal_cb_list = None, None
self.log += '\nWARNING: No ALSC tables found.\nAWB calibration '
self.log += 'performed without ALSC correction...'
"""
call calibration function
"""
plot = "rpi.awb" in self.plot
awb_out = awb(self, cal_cr_list, cal_cb_list, plot, grid_size)
ct_curve, transverse_neg, transverse_pos = awb_out
"""
write output to json
"""
self.json['rpi.awb']['ct_curve'] = ct_curve
self.json['rpi.awb']['sensitivity_r'] = 1.0
self.json['rpi.awb']['sensitivity_b'] = 1.0
self.json['rpi.awb']['transverse_pos'] = transverse_pos
self.json['rpi.awb']['transverse_neg'] = transverse_neg
self.log += '\nAWB calibration written to json file'
print('Finished AWB calibration')
"""
Auto lens shading correction completely mitigates the effects of lens shading for ech
colour channel seperately, and then partially corrects for vignetting.
The extent of the correction depends on the 'luminance_strength' parameter.
"""
def alsc_cal(self, luminance_strength, do_alsc_colour, grid_size, max_gain=8.0):
if 'rpi.alsc' in self.disable:
return 1
print('\nStarting ALSC calibration')
self.log_new_sec('ALSC')
"""
check if alsc images have been taken
"""
if len(self.imgs_alsc) == 0:
print('\nError:\nNo alsc calibration images found')
self.log += '\nERROR: No ALSC calibration images found!'
self.log += '\nALSC calibration aborted!'
return 1
self.json['rpi.alsc']['luminance_strength'] = luminance_strength
if self.grey and do_alsc_colour:
print('Greyscale camera so only luminance_lut calculated')
do_alsc_colour = False
self.log += '\nWARNING: ALSC colour correction cannot be done on '
self.log += 'greyscale image!\nALSC colour corrections forced off!'
"""
call calibration function
"""
plot = "rpi.alsc" in self.plot
alsc_out = alsc_all(self, do_alsc_colour, plot, grid_size, max_gain=max_gain)
cal_cr_list, cal_cb_list, luminance_lut, av_corn = alsc_out
"""
write output to json and finish if not do_alsc_colour
"""
if not do_alsc_colour:
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
self.json['rpi.alsc']['n_iter'] = 0
self.log += '\nALSC calibrations written to json file'
self.log += '\nNo colour calibrations performed'
print('Finished ALSC calibrations')
return 1
self.json['rpi.alsc']['calibrations_Cr'] = cal_cr_list
self.json['rpi.alsc']['calibrations_Cb'] = cal_cb_list
self.json['rpi.alsc']['luminance_lut'] = luminance_lut
self.log += '\nALSC colour and luminance tables written to json file'
"""
The sigmas determine the strength of the adaptive algorithm, that
cleans up any lens shading that has slipped through the alsc. These are
determined by measuring a 'worst-case' difference between two alsc tables
that are adjacent in colour space. If, however, only one colour
temperature has been provided, then this difference can not be computed
as only one table is available.
To determine the sigmas you would have to estimate the error of an alsc
table with only the image it was taken on as a check. To avoid circularity,
dfault exaggerated sigmas are used, which can result in too much alsc and
is therefore not advised.
In general, just take another alsc picture at another colour temperature!
"""
if len(self.imgs_alsc) == 1:
self.json['rpi.alsc']['sigma'] = 0.005
self.json['rpi.alsc']['sigma_Cb'] = 0.005
print('\nWarning:\nOnly one alsc calibration found'
'\nStandard sigmas used for adaptive algorithm.')
print('Finished ALSC calibrations')
self.log += '\nWARNING: Only one colour temperature found in '
self.log += 'calibration images.\nStandard sigmas used for adaptive '
self.log += 'algorithm!'
return 1
"""
obtain worst-case scenario residual sigmas
"""
sigma_r, sigma_b = get_sigma(self, cal_cr_list, cal_cb_list, grid_size)
"""
write output to json
"""
self.json['rpi.alsc']['sigma'] = np.round(sigma_r, 5)
self.json['rpi.alsc']['sigma_Cb'] = np.round(sigma_b, 5)
self.log += '\nCalibrated sigmas written to json file'
print('Finished ALSC calibrations')
"""
Green equalisation fixes problems caused by discrepancies in green
channels. This is done by measuring the effect on macbeth chart patches,
which ideally would have the same green values throughout.
An upper bound linear model is fit, fixing a threshold for the green
differences that are corrected.
"""
def geq_cal(self):
if 'rpi.geq' in self.disable:
return 1
print('\nStarting GEQ calibrations')
self.log_new_sec('GEQ')
"""
perform calibration
"""
plot = 'rpi.geq' in self.plot
slope, offset = geq_fit(self, plot)
"""
write output to json
"""
self.json['rpi.geq']['offset'] = offset
self.json['rpi.geq']['slope'] = slope
self.log += '\nGEQ calibrations written to json file'
print('Finished GEQ calibrations')
"""
Lux calibrations allow the lux level of a scene to be estimated by a ratio
calculation. Lux values are used in the pipeline for algorithms such as AGC
and AWB
"""
def lux_cal(self):
if 'rpi.lux' in self.disable:
return 1
print('\nStarting LUX calibrations')
self.log_new_sec('LUX')
"""
The lux calibration is done on a single image. For best effects, the
image with lux level closest to 1000 is chosen.
"""
luxes = [Img.lux for Img in self.imgs]
argmax = luxes.index(min(luxes, key=lambda l: abs(1000-l)))
Img = self.imgs[argmax]
self.log += '\nLux found closest to 1000: {} lx'.format(Img.lux)
self.log += '\nImage used: ' + Img.name
if Img.lux < 50:
self.log += '\nWARNING: Low lux could cause inaccurate calibrations!'
"""
do calibration
"""
lux_out, shutter_speed, gain = lux(self, Img)
"""
write output to json
"""
self.json['rpi.lux']['reference_shutter_speed'] = shutter_speed
self.json['rpi.lux']['reference_gain'] = gain
self.json['rpi.lux']['reference_lux'] = Img.lux
self.json['rpi.lux']['reference_Y'] = lux_out
self.log += '\nLUX calibrations written to json file'
print('Finished LUX calibrations')
"""
Noise alibration attempts to describe the noise profile of the sensor. The
calibration is run on macbeth images and the final output is taken as the average
"""
def noise_cal(self):
if 'rpi.noise' in self.disable:
return 1
print('\nStarting NOISE calibrations')
self.log_new_sec('NOISE')
"""
run calibration on all images and sort by slope.
"""
plot = "rpi.noise" in self.plot
noise_out = sorted([noise(self, Img, plot) for Img in self.imgs], key=lambda x: x[0])
self.log += '\nFinished processing images'
"""
take the average of the interquartile
"""
length = len(noise_out)
noise_out = np.mean(noise_out[length//4:1+3*length//4], axis=0)
self.log += '\nAverage noise profile: constant = {} '.format(int(noise_out[1]))
self.log += 'slope = {:.3f}'.format(noise_out[0])
"""
write to json
"""
self.json['rpi.noise']['reference_constant'] = int(noise_out[1])
self.json['rpi.noise']['reference_slope'] = round(noise_out[0], 3)
self.log += '\nNOISE calibrations written to json'
print('Finished NOISE calibrations')
"""
Removes json entries that are turned off
"""
def json_remove(self, disable):
self.log_new_sec('Disabling Options', cal=False)
if len(self.disable) == 0:
self.log += '\nNothing disabled!'
return 1
for key in disable:
try:
del self.json[key]
self.log += '\nDisabled: ' + key
except KeyError:
self.log += '\nERROR: ' + key + ' not found!'
"""
writes the json dictionary to the raw json file then make pretty
"""
def write_json(self, version=2.0, target='bcm2835', grid_size=(16, 12)):
"""
Write json dictionary to file using our version 2 format
"""
out_json = {
"version": version,
'target': target if target != 'vc4' else 'bcm2835',
"algorithms": [{name: data} for name, data in self.json.items()],
}
with open(self.jf, 'w') as f:
f.write(pretty_print(out_json,
custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]}))
"""
add a new section to the log file
"""
def log_new_sec(self, section, cal=True):
self.log += '\n'+self.log_separator
self.log += section
if cal:
self.log += ' Calibration'
self.log += self.log_separator
"""
write script arguments to log file
"""
def log_user_input(self, json_output, directory, config, log_output):
self.log_new_sec('User Arguments', cal=False)
self.log += '\nJson file output: ' + json_output
self.log += '\nCalibration images directory: ' + directory
if config is None:
self.log += '\nNo configuration file input... using default options'
elif config is False:
self.log += '\nWARNING: Invalid configuration file path...'
self.log += ' using default options'
elif config is True:
self.log += '\nWARNING: Invalid syntax in configuration file...'
self.log += ' using default options'
else:
self.log += '\nConfiguration file: ' + config
if log_output is None:
self.log += '\nNo log file path input... using default: ctt_log.txt'
else:
self.log += '\nLog file output: ' + log_output
# if log_output
"""
write log file
"""
def write_log(self, filename):
if filename is None:
filename = 'ctt_log.txt'
self.log += '\n' + self.log_separator
with open(filename, 'w') as logfile:
logfile.write(self.log)
"""
Add all images from directory, pass into relevant list of images and
extrace lux and temperature values.
"""
def add_imgs(self, directory, mac_config, blacklevel=-1):
self.log_new_sec('Image Loading', cal=False)
img_suc_msg = 'Image loaded successfully!'
print('\n\nLoading images from '+directory)
self.log += '\nDirectory: ' + directory
"""
get list of files
"""
filename_list = get_photos(directory)
print("Files found: {}".format(len(filename_list)))
self.log += '\nFiles found: {}'.format(len(filename_list))
"""
iterate over files
"""
filename_list.sort()
for filename in filename_list:
address = directory + filename
print('\nLoading image: '+filename)
self.log += '\n\nImage: ' + filename
"""
obtain colour and lux value
"""
col, lux = get_col_lux(filename)
"""
Check if image is an alsc calibration image
"""
if 'alsc' in filename:
Img = load_image(self, address, mac=False)
self.log += '\nIdentified as an ALSC image'
"""
check if imagae data has been successfully unpacked
"""
if Img == 0:
print('\nDISCARDED')
self.log += '\nImage discarded!'
continue
"""
check that image colour temperature has been successfuly obtained
"""
elif col is not None:
"""
if successful, append to list and continue to next image
"""
Img.col = col
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.imgs_alsc.append(Img)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
continue
else:
print('Error! No colour temperature found!')
self.log += '\nWARNING: Error reading colour temperature'
self.log += '\nImage discarded!'
print('DISCARDED')
elif 'cac' in filename:
Img = load_image(self, address, mac=False)
self.log += '\nIdentified as an CAC image'
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.imgs_cac.append(Img)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
continue
else:
self.log += '\nIdentified as macbeth chart image'
"""
if image isn't an alsc correction then it must have a lux and a
colour temperature value to be useful
"""
if lux is None:
print('DISCARDED')
self.log += '\nWARNING: Error reading lux value'
self.log += '\nImage discarded!'
continue
Img = load_image(self, address, mac_config)
"""
check that image data has been successfuly unpacked
"""
if Img == 0:
print('DISCARDED')
self.log += '\nImage discarded!'
continue
else:
"""
if successful, append to list and continue to next image
"""
Img.col, Img.lux = col, lux
Img.name = filename
self.log += '\nColour temperature: {} K'.format(col)
self.log += '\nLux value: {} lx'.format(lux)
if blacklevel != -1:
Img.blacklevel_16 = blacklevel
print(img_suc_msg)
self.imgs.append(Img)
print('\nFinished loading images')
"""
Check that usable images have been found
Possible errors include:
- no macbeth chart
- incorrect filename/extension
- images from different cameras
"""
def check_imgs(self, macbeth=True):
self.log += '\n\nImages found:'
self.log += '\nMacbeth : {}'.format(len(self.imgs))
self.log += '\nALSC : {} '.format(len(self.imgs_alsc))
self.log += '\nCAC: {} '.format(len(self.imgs_cac))
self.log += '\n\nCamera metadata'
"""
check usable images found
"""
if len(self.imgs) == 0 and macbeth:
print('\nERROR: No usable macbeth chart images found')
self.log += '\nERROR: No usable macbeth chart images found'
return 0
elif len(self.imgs) == 0 and len(self.imgs_alsc) == 0 and len(self.imgs_cac) == 0:
print('\nERROR: No usable images found')
self.log += '\nERROR: No usable images found'
return 0
"""
Double check that every image has come from the same camera...
"""
all_imgs = self.imgs + self.imgs_alsc + self.imgs_cac
camNames = list(set([Img.camName for Img in all_imgs]))
patterns = list(set([Img.pattern for Img in all_imgs]))
sigbitss = list(set([Img.sigbits for Img in all_imgs]))
blacklevels = list(set([Img.blacklevel_16 for Img in all_imgs]))
sizes = list(set([(Img.w, Img.h) for Img in all_imgs]))
if 1:
self.grey = (patterns[0] == 128)
self.blacklevel_16 = blacklevels[0]
self.log += '\nName: {}'.format(camNames[0])
self.log += '\nBayer pattern case: {}'.format(patterns[0])
if self.grey:
self.log += '\nGreyscale camera identified'
self.log += '\nSignificant bits: {}'.format(sigbitss[0])
self.log += '\nBlacklevel: {}'.format(blacklevels[0])
self.log += '\nImage size: w = {} h = {}'.format(sizes[0][0], sizes[0][1])
return 1
else:
print('\nERROR: Images from different cameras')
self.log += '\nERROR: Images are from different cameras'
return 0
def run_ctt(json_output, directory, config, log_output, json_template, grid_size, target, alsc_only=False):
"""
check input files are jsons
"""
if json_output[-5:] != '.json':
raise ArgError('\n\nError: Output must be a json file!')
if config is not None:
"""
check if config file is actually a json
"""
if config[-5:] != '.json':
raise ArgError('\n\nError: Config file must be a json file!')
"""
read configurations
"""
try:
with open(config, 'r') as config_json:
configs = json.load(config_json)
except FileNotFoundError:
configs = {}
config = False
except json.decoder.JSONDecodeError:
configs = {}
config = True
else:
configs = {}
"""
load configurations from config file, if not given then set default
"""
disable = get_config(configs, "disable", [], 'list')
plot = get_config(configs, "plot", [], 'list')
awb_d = get_config(configs, "awb", {}, 'dict')
greyworld = get_config(awb_d, "greyworld", 0, 'bool')
alsc_d = get_config(configs, "alsc", {}, 'dict')
do_alsc_colour = get_config(alsc_d, "do_alsc_colour", 1, 'bool')
luminance_strength = get_config(alsc_d, "luminance_strength", 0.8, 'num')
lsc_max_gain = get_config(alsc_d, "max_gain", 8.0, 'num')
blacklevel = get_config(configs, "blacklevel", -1, 'num')
macbeth_d = get_config(configs, "macbeth", {}, 'dict')
mac_small = get_config(macbeth_d, "small", 0, 'bool')
mac_show = get_config(macbeth_d, "show", 0, 'bool')
mac_config = (mac_small, mac_show)
print("Read lsc_max_gain", lsc_max_gain)
if blacklevel < -1 or blacklevel >= 2**16:
print('\nInvalid blacklevel, defaulted to 64')
blacklevel = -1
if luminance_strength < 0 or luminance_strength > 1:
print('\nInvalid luminance_strength strength, defaulted to 0.5')
luminance_strength = 0.5
"""
sanitise directory path
"""
if directory[-1] != '/':
directory += '/'
"""
initialise tuning tool and load images
"""
try:
Cam = Camera(json_output, json=json_template)
Cam.log_user_input(json_output, directory, config, log_output)
if alsc_only:
disable = set(Cam.json.keys()).symmetric_difference({"rpi.alsc"})
Cam.disable = disable
Cam.plot = plot
Cam.add_imgs(directory, mac_config, blacklevel)
except FileNotFoundError:
raise ArgError('\n\nError: Input image directory not found!')
"""
preform calibrations as long as check_imgs returns True
If alsc is activated then it must be done before awb and ccm since the alsc
tables are used in awb and ccm calibrations
ccm also technically does an awb but it measures this from the macbeth
chart in the image rather than using calibration data
"""
if Cam.check_imgs(macbeth=not alsc_only):
if not alsc_only:
Cam.json['rpi.black_level']['black_level'] = Cam.blacklevel_16
Cam.json_remove(disable)
print('\nSTARTING CALIBRATIONS')
Cam.alsc_cal(luminance_strength, do_alsc_colour, grid_size, max_gain=lsc_max_gain)
Cam.geq_cal()
Cam.lux_cal()
Cam.noise_cal()
if "rpi.cac" in json_template:
Cam.cac_cal(do_alsc_colour)
Cam.awb_cal(greyworld, do_alsc_colour, grid_size)
Cam.ccm_cal(do_alsc_colour, grid_size)
print('\nFINISHED CALIBRATIONS')
Cam.write_json(target=target, grid_size=grid_size)
Cam.write_log(log_output)
print('\nCalibrations written to: '+json_output)
if log_output is None:
log_output = 'ctt_log.txt'
print('Log file written to: '+log_output)
pass
else:
Cam.write_log(log_output)
if __name__ == '__main__':
"""
initialise calibration
"""
if len(sys.argv) == 1:
print("""
PiSP Tuning Tool version 1.0
Required Arguments:
'-i' : Calibration image directory.
'-o' : Name of output json file.
Optional Arguments:
'-t' : Target platform - 'pisp' or 'vc4'. Default 'vc4'
'-c' : Config file for the CTT. If not passed, default parameters used.
'-l' : Name of output log file. If not passed, 'ctt_log.txt' used.
""")
quit(0)
else:
"""
parse input arguments
"""
json_output, directory, config, log_output, target = parse_input()
if target == 'pisp':
from ctt_pisp import json_template, grid_size
elif target == 'vc4':
from ctt_vc4 import json_template, grid_size
run_ctt(json_output, directory, config, log_output, json_template, grid_size, target)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_dots_locator.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2023, Raspberry Pi Ltd
#
# find_dots.py - Used by CAC algorithm to convert image to set of dots
'''
This file takes the black and white version of the image, along with
the color version. It then located the black dots on the image by
thresholding dark pixels.
In a rather fun way, the algorithm bounces around the thresholded area in a random path
We then use the maximum and minimum of these paths to determine the dot shape and size
This info is then used to return colored dots and locations back to the main file
'''
import numpy as np
import random
from PIL import Image, ImageEnhance, ImageFilter
def find_dots_locations(rgb_image, color_threshold=100, dots_edge_avoid=75, image_edge_avoid=10, search_path_length=500, grid_scan_step_size=10, logfile=open("log.txt", "a+")):
# Initialise some starting variables
pixels = Image.fromarray(rgb_image)
pixels = pixels.convert("L")
enhancer = ImageEnhance.Contrast(pixels)
im_output = enhancer.enhance(1.4)
# We smooth it slightly to make it easier for the dot recognition program to locate the dots
im_output = im_output.filter(ImageFilter.GaussianBlur(radius=2))
bw_image = np.array(im_output)
location = [0, 0]
dots = []
dots_location = []
# the program takes away the edges - we don't want a dot that is half a circle, the
# centroids would all be wrong
for x in range(dots_edge_avoid, len(bw_image) - dots_edge_avoid, grid_scan_step_size):
for y in range(dots_edge_avoid, len(bw_image[0]) - dots_edge_avoid, grid_scan_step_size):
location = [x, y]
scrap_dot = False # A variable used to make sure that this is a valid dot
if (bw_image[location[0], location[1]] < color_threshold) and not (scrap_dot):
heading = "south" # Define a starting direction to move in
coords = []
for i in range(search_path_length): # Creates a path of length `search_path_length`. This turns out to always be enough to work out the rough shape of the dot.
# Now make sure that the thresholded area doesn't come within 10 pixels of the edge of the image, ensures we capture all the CA
if ((image_edge_avoid < location[0] < len(bw_image) - image_edge_avoid) and (image_edge_avoid < location[1] < len(bw_image[0]) - image_edge_avoid)) and not (scrap_dot):
if heading == "south":
if bw_image[location[0] + 1, location[1]] < color_threshold:
# Here, notice it does not go south, but actually goes southeast
# This is crucial in ensuring that we make our way around the majority of the dot
location[0] = location[0] + 1
location[1] = location[1] + 1
heading = "south"
else:
# This happens when we reach a thresholded edge. We now randomly change direction and keep searching
dir = random.randint(1, 2)
if dir == 1:
heading = "west"
if dir == 2:
heading = "east"
if heading == "east":
if bw_image[location[0], location[1] + 1] < color_threshold:
location[1] = location[1] + 1
heading = "east"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "north"
if dir == 2:
heading = "south"
if heading == "west":
if bw_image[location[0], location[1] - 1] < color_threshold:
location[1] = location[1] - 1
heading = "west"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "north"
if dir == 2:
heading = "south"
if heading == "north":
if bw_image[location[0] - 1, location[1]] < color_threshold:
location[0] = location[0] - 1
heading = "north"
else:
dir = random.randint(1, 2)
if dir == 1:
heading = "west"
if dir == 2:
heading = "east"
# Log where our particle travels across the dot
coords.append([location[0], location[1]])
else:
scrap_dot = True # We just don't have enough space around the dot, discard this one, and move on
if not scrap_dot:
# get the size of the dot surrounding the dot
x_coords = np.array(coords)[:, 0]
y_coords = np.array(coords)[:, 1]
hsquaresize = max(list(x_coords)) - min(list(x_coords))
vsquaresize = max(list(y_coords)) - min(list(y_coords))
# Create the bounding coordinates of the rectangle surrounding the dot
# Program uses the dotsize + half of the dotsize to ensure we get all that color fringing
extra_space_factor = 0.45
top_left_x = (min(list(x_coords)) - int(hsquaresize * extra_space_factor))
btm_right_x = max(list(x_coords)) + int(hsquaresize * extra_space_factor)
top_left_y = (min(list(y_coords)) - int(vsquaresize * extra_space_factor))
btm_right_y = max(list(y_coords)) + int(vsquaresize * extra_space_factor)
# Overwrite the area of the dot to ensure we don't use it again
bw_image[top_left_x:btm_right_x, top_left_y:btm_right_y] = 255
# Add the color version of the dot to the list to send off, along with some coordinates.
dots.append(rgb_image[top_left_x:btm_right_x, top_left_y:btm_right_y])
dots_location.append([top_left_x, top_left_y])
else:
# Dot was too close to the image border to be useable
pass
return dots, dots_location
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_noise.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool noise calibration
from ctt_image_load import *
import matplotlib.pyplot as plt
"""
Find noise standard deviation and fit to model:
noise std = a + b*sqrt(pixel mean)
"""
def noise(Cam, Img, plot):
Cam.log += '\nProcessing image: {}'.format(Img.name)
stds = []
means = []
"""
iterate through macbeth square patches
"""
for ch_patches in Img.patches:
for patch in ch_patches:
"""
renormalise patch
"""
patch = np.array(patch)
patch = (patch-Img.blacklevel_16)/Img.againQ8_norm
std = np.std(patch)
mean = np.mean(patch)
stds.append(std)
means.append(mean)
"""
clean data and ensure all means are above 0
"""
stds = np.array(stds)
means = np.array(means)
means = np.clip(np.array(means), 0, None)
sq_means = np.sqrt(means)
"""
least squares fit model
"""
fit = np.polyfit(sq_means, stds, 1)
Cam.log += '\nBlack level = {}'.format(Img.blacklevel_16)
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
Cam.log += ' slope = {:.3f}'.format(fit[0])
"""
remove any values further than std from the fit
anomalies most likely caused by:
> ucharacteristically noisy white patch
> saturation in the white patch
"""
fit_score = np.abs(stds - fit[0]*sq_means - fit[1])
fit_std = np.std(stds)
fit_score_norm = fit_score - fit_std
anom_ind = np.where(fit_score_norm > 1)
fit_score_norm.sort()
sq_means_clean = np.delete(sq_means, anom_ind)
stds_clean = np.delete(stds, anom_ind)
removed = len(stds) - len(stds_clean)
if removed != 0:
Cam.log += '\nIdentified and removed {} anomalies.'.format(removed)
Cam.log += '\nRecalculating fit'
"""
recalculate fit with outliers removed
"""
fit = np.polyfit(sq_means_clean, stds_clean, 1)
Cam.log += '\nNoise profile: offset = {}'.format(int(fit[1]))
Cam.log += ' slope = {:.3f}'.format(fit[0])
"""
if fit const is < 0 then force through 0 by
dividing by sq_means and fitting poly order 0
"""
corrected = 0
if fit[1] < 0:
corrected = 1
ones = np.ones(len(means))
y_data = stds/sq_means
fit2 = np.polyfit(ones, y_data, 0)
Cam.log += '\nOffset below zero. Fit recalculated with zero offset'
Cam.log += '\nNoise profile: offset = 0'
Cam.log += ' slope = {:.3f}'.format(fit2[0])
# print('new fit')
# print(fit2)
"""
plot fit for debug
"""
if plot:
x = np.arange(sq_means.max()//0.88)
fit_plot = x*fit[0] + fit[1]
plt.scatter(sq_means, stds, label='data', color='blue')
plt.scatter(sq_means[anom_ind], stds[anom_ind], color='orange', label='anomalies')
plt.plot(x, fit_plot, label='fit', color='red', ls=':')
if fit[1] < 0:
fit_plot_2 = x*fit2[0]
plt.plot(x, fit_plot_2, label='fit 0 intercept', color='green', ls='--')
plt.plot(0, 0)
plt.title('Noise Plot\nImg: {}'.format(Img.str))
plt.legend(loc='upper left')
plt.xlabel('Sqrt Pixel Value')
plt.ylabel('Noise Standard Deviation')
plt.grid()
plt.show()
"""
End of plotting code
"""
"""
format output to include forced 0 constant
"""
Cam.log += '\n'
if corrected:
fit = [fit2[0], 0]
return fit
else:
return fit
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_ccm.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# camera tuning tool for CCM (colour correction matrix)
from ctt_image_load import *
from ctt_awb import get_alsc_patches
import colors
from scipy.optimize import minimize
from ctt_visualise import visualise_macbeth_chart
import numpy as np
"""
takes 8-bit macbeth chart values, degammas and returns 16 bit
"""
'''
This program has many options from which to derive the color matrix from.
The first is average. This minimises the average delta E across all patches of
the macbeth chart. Testing across all cameras yeilded this as the most color
accurate and vivid. Other options are avalible however.
Maximum minimises the maximum Delta E of the patches. It iterates through till
a minimum maximum is found (so that there is
not one patch that deviates wildly.)
This yields generally good results but overall the colors are less accurate
Have a fiddle with maximum and see what you think.
The final option allows you to select the patches for which to average across.
This means that you can bias certain patches, for instance if you want the
reds to be more accurate.
'''
matrix_selection_types = ["average", "maximum", "patches"]
typenum = 0 # select from array above, 0 = average, 1 = maximum, 2 = patches
test_patches = [1, 2, 5, 8, 9, 12, 14]
'''
Enter patches to test for. Can also be entered twice if you
would like twice as much bias on one patch.
'''
def degamma(x):
x = x / ((2 ** 8) - 1) # takes 255 and scales it down to one
x = np.where(x < 0.04045, x / 12.92, ((x + 0.055) / 1.055) ** 2.4)
x = x * ((2 ** 16) - 1) # takes one and scales up to 65535, 16 bit color
return x
def gamma(x):
# Take 3 long array of color values and gamma them
return [((colour / 255) ** (1 / 2.4) * 1.055 - 0.055) * 255 for colour in x]
"""
FInds colour correction matrices for list of images
"""
def ccm(Cam, cal_cr_list, cal_cb_list, grid_size):
global matrix_selection_types, typenum
imgs = Cam.imgs
"""
standard macbeth chart colour values
"""
m_rgb = np.array([ # these are in RGB
[116, 81, 67], # dark skin
[199, 147, 129], # light skin
[91, 122, 156], # blue sky
[90, 108, 64], # foliage
[130, 128, 176], # blue flower
[92, 190, 172], # bluish green
[224, 124, 47], # orange
[68, 91, 170], # purplish blue
[198, 82, 97], # moderate red
[94, 58, 106], # purple
[159, 189, 63], # yellow green
[230, 162, 39], # orange yellow
[35, 63, 147], # blue
[67, 149, 74], # green
[180, 49, 57], # red
[238, 198, 20], # yellow
[193, 84, 151], # magenta
[0, 136, 170], # cyan (goes out of gamut)
[245, 245, 243], # white 9.5
[200, 202, 202], # neutral 8
[161, 163, 163], # neutral 6.5
[121, 121, 122], # neutral 5
[82, 84, 86], # neutral 3.5
[49, 49, 51] # black 2
])
"""
convert reference colours from srgb to rgb
"""
m_srgb = degamma(m_rgb) # now in 16 bit color.
# Produce array of LAB values for ideal color chart
m_lab = [colors.RGB_to_LAB(color / 256) for color in m_srgb]
"""
reorder reference values to match how patches are ordered
"""
m_srgb = np.array([m_srgb[i::6] for i in range(6)]).reshape((24, 3))
m_lab = np.array([m_lab[i::6] for i in range(6)]).reshape((24, 3))
m_rgb = np.array([m_rgb[i::6] for i in range(6)]).reshape((24, 3))
"""
reformat alsc correction tables or set colour_cals to None if alsc is
deactivated
"""
if cal_cr_list is None:
colour_cals = None
else:
colour_cals = {}
for cr, cb in zip(cal_cr_list, cal_cb_list):
cr_tab = cr['table']
cb_tab = cb['table']
"""
normalise tables so min value is 1
"""
cr_tab = cr_tab / np.min(cr_tab)
cb_tab = cb_tab / np.min(cb_tab)
colour_cals[cr['ct']] = [cr_tab, cb_tab]
"""
for each image, perform awb and alsc corrections.
Then calculate the colour correction matrix for that image, recording the
ccm and the colour tempertaure.
"""
ccm_tab = {}
for Img in imgs:
Cam.log += '\nProcessing image: ' + Img.name
"""
get macbeth patches with alsc applied if alsc enabled.
Note: if alsc is disabled then colour_cals will be set to None and no
the function will simply return the macbeth patches
"""
r, b, g = get_alsc_patches(Img, colour_cals, grey=False, grid_size=grid_size)
"""
do awb
Note: awb is done by measuring the macbeth chart in the image, rather
than from the awb calibration. This is done so the awb will be perfect
and the ccm matrices will be more accurate.
"""
r_greys, b_greys, g_greys = r[3::4], b[3::4], g[3::4]
r_g = np.mean(r_greys / g_greys)
b_g = np.mean(b_greys / g_greys)
r = r / r_g
b = b / b_g
"""
normalise brightness wrt reference macbeth colours and then average
each channel for each patch
"""
gain = np.mean(m_srgb) / np.mean((r, g, b))
Cam.log += '\nGain with respect to standard colours: {:.3f}'.format(gain)
r = np.mean(gain * r, axis=1)
b = np.mean(gain * b, axis=1)
g = np.mean(gain * g, axis=1)
"""
calculate ccm matrix
"""
# ==== All of below should in sRGB ===##
sumde = 0
ccm = do_ccm(r, g, b, m_srgb)
# This is the initial guess that our optimisation code works with.
original_ccm = ccm
r1 = ccm[0]
r2 = ccm[1]
g1 = ccm[3]
g2 = ccm[4]
b1 = ccm[6]
b2 = ccm[7]
'''
COLOR MATRIX LOOKS AS BELOW
R1 R2 R3 Rval Outr
G1 G2 G3 * Gval = G
B1 B2 B3 Bval B
Will be optimising 6 elements and working out the third element using 1-r1-r2 = r3
'''
x0 = [r1, r2, g1, g2, b1, b2]
'''
We use our old CCM as the initial guess for the program to find the
optimised matrix
'''
result = minimize(guess, x0, args=(r, g, b, m_lab), tol=0.01)
'''
This produces a color matrix which has the lowest delta E possible,
based off the input data. Note it is impossible for this to reach
zero since the input data is imperfect
'''
Cam.log += ("\n \n Optimised Matrix Below: \n \n")
[r1, r2, g1, g2, b1, b2] = result.x
# The new, optimised color correction matrix values
optimised_ccm = [r1, r2, (1 - r1 - r2), g1, g2, (1 - g1 - g2), b1, b2, (1 - b1 - b2)]
# This is the optimised Color Matrix (preserving greys by summing rows up to 1)
Cam.log += str(optimised_ccm)
Cam.log += "\n Old Color Correction Matrix Below \n"
Cam.log += str(ccm)
formatted_ccm = np.array(original_ccm).reshape((3, 3))
'''
below is a whole load of code that then applies the latest color
matrix, and returns LAB values for color. This can then be used
to calculate the final delta E
'''
optimised_ccm_rgb = [] # Original Color Corrected Matrix RGB / LAB
optimised_ccm_lab = []
formatted_optimised_ccm = np.array(optimised_ccm).reshape((3, 3))
after_gamma_rgb = []
after_gamma_lab = []
for RGB in zip(r, g, b):
ccm_applied_rgb = np.dot(formatted_ccm, (np.array(RGB) / 256))
optimised_ccm_rgb.append(gamma(ccm_applied_rgb))
optimised_ccm_lab.append(colors.RGB_to_LAB(ccm_applied_rgb))
optimised_ccm_applied_rgb = np.dot(formatted_optimised_ccm, np.array(RGB) / 256)
after_gamma_rgb.append(gamma(optimised_ccm_applied_rgb))
after_gamma_lab.append(colors.RGB_to_LAB(optimised_ccm_applied_rgb))
'''
Gamma After RGB / LAB - not used in calculations, only used for visualisation
We now want to spit out some data that shows
how the optimisation has improved the color matrices
'''
Cam.log += "Here are the Improvements"
# CALCULATE WORST CASE delta e
old_worst_delta_e = 0
before_average = transform_and_evaluate(formatted_ccm, r, g, b, m_lab)
new_worst_delta_e = 0
after_average = transform_and_evaluate(formatted_optimised_ccm, r, g, b, m_lab)
for i in range(24):
old_delta_e = deltae(optimised_ccm_lab[i], m_lab[i]) # Current Old Delta E
new_delta_e = deltae(after_gamma_lab[i], m_lab[i]) # Current New Delta E
if old_delta_e > old_worst_delta_e:
old_worst_delta_e = old_delta_e
if new_delta_e > new_worst_delta_e:
new_worst_delta_e = new_delta_e
Cam.log += "Before color correction matrix was optimised, we got an average delta E of " + str(before_average) + " and a maximum delta E of " + str(old_worst_delta_e)
Cam.log += "After color correction matrix was optimised, we got an average delta E of " + str(after_average) + " and a maximum delta E of " + str(new_worst_delta_e)
visualise_macbeth_chart(m_rgb, optimised_ccm_rgb, after_gamma_rgb, str(Img.col) + str(matrix_selection_types[typenum]))
'''
The program will also save some visualisations of improvements.
Very pretty to look at. Top rectangle is ideal, Left square is
before optimisation, right square is after.
'''
"""
if a ccm has already been calculated for that temperature then don't
overwrite but save both. They will then be averaged later on
""" # Now going to use optimised color matrix, optimised_ccm
if Img.col in ccm_tab.keys():
ccm_tab[Img.col].append(optimised_ccm)
else:
ccm_tab[Img.col] = [optimised_ccm]
Cam.log += '\n'
Cam.log += '\nFinished processing images'
"""
average any ccms that share a colour temperature
"""
for k, v in ccm_tab.items():
tab = np.mean(v, axis=0)
tab = np.where((10000 * tab) % 1 <= 0.05, tab + 0.00001, tab)
tab = np.where((10000 * tab) % 1 >= 0.95, tab - 0.00001, tab)
ccm_tab[k] = list(np.round(tab, 5))
Cam.log += '\nMatrix calculated for colour temperature of {} K'.format(k)
"""
return all ccms with respective colour temperature in the correct format,
sorted by their colour temperature
"""
sorted_ccms = sorted(ccm_tab.items(), key=lambda kv: kv[0])
ccms = []
for i in sorted_ccms:
ccms.append({
'ct': i[0],
'ccm': i[1]
})
return ccms
def guess(x0, r, g, b, m_lab): # provides a method of numerical feedback for the optimisation code
[r1, r2, g1, g2, b1, b2] = x0
ccm = np.array([r1, r2, (1 - r1 - r2),
g1, g2, (1 - g1 - g2),
b1, b2, (1 - b1 - b2)]).reshape((3, 3)) # format the matrix correctly
return transform_and_evaluate(ccm, r, g, b, m_lab)
def transform_and_evaluate(ccm, r, g, b, m_lab): # Transforms colors to LAB and applies the correction matrix
# create list of matrix changed colors
realrgb = []
for RGB in zip(r, g, b):
rgb_post_ccm = np.dot(ccm, np.array(RGB) / 256) # This is RGB values after the color correction matrix has been applied
realrgb.append(colors.RGB_to_LAB(rgb_post_ccm))
# now compare that with m_lab and return numeric result, averaged for each patch
return (sumde(realrgb, m_lab) / 24) # returns an average result of delta E
def sumde(listA, listB):
global typenum, test_patches
sumde = 0
maxde = 0
patchde = [] # Create array of the delta E values for each patch. useful for optimisation of certain patches
for listA_item, listB_item in zip(listA, listB):
if maxde < (deltae(listA_item, listB_item)):
maxde = deltae(listA_item, listB_item)
patchde.append(deltae(listA_item, listB_item))
sumde += deltae(listA_item, listB_item)
'''
The different options specified at the start allow for
the maximum to be returned, average or specific patches
'''
if typenum == 0:
return sumde
if typenum == 1:
return maxde
if typenum == 2:
output = sum([patchde[test_patch] for test_patch in test_patches])
# Selects only certain patches and returns the output for them
return output
"""
calculates the ccm for an individual image.
ccms are calculated in rgb space, and are fit by hand. Although it is a 3x3
matrix, each row must add up to 1 in order to conserve greyness, simplifying
calculation.
The initial CCM is calculated in RGB, and then optimised in LAB color space
This simplifies the initial calculation but then gets us the accuracy of
using LAB color space.
"""
def do_ccm(r, g, b, m_srgb):
rb = r-b
gb = g-b
rb_2s = (rb * rb)
rb_gbs = (rb * gb)
gb_2s = (gb * gb)
r_rbs = rb * (m_srgb[..., 0] - b)
r_gbs = gb * (m_srgb[..., 0] - b)
g_rbs = rb * (m_srgb[..., 1] - b)
g_gbs = gb * (m_srgb[..., 1] - b)
b_rbs = rb * (m_srgb[..., 2] - b)
b_gbs = gb * (m_srgb[..., 2] - b)
"""
Obtain least squares fit
"""
rb_2 = np.sum(rb_2s)
gb_2 = np.sum(gb_2s)
rb_gb = np.sum(rb_gbs)
r_rb = np.sum(r_rbs)
r_gb = np.sum(r_gbs)
g_rb = np.sum(g_rbs)
g_gb = np.sum(g_gbs)
b_rb = np.sum(b_rbs)
b_gb = np.sum(b_gbs)
det = rb_2 * gb_2 - rb_gb * rb_gb
"""
Raise error if matrix is singular...
This shouldn't really happen with real data but if it does just take new
pictures and try again, not much else to be done unfortunately...
"""
if det < 0.001:
raise ArithmeticError
r_a = (gb_2 * r_rb - rb_gb * r_gb) / det
r_b = (rb_2 * r_gb - rb_gb * r_rb) / det
"""
Last row can be calculated by knowing the sum must be 1
"""
r_c = 1 - r_a - r_b
g_a = (gb_2 * g_rb - rb_gb * g_gb) / det
g_b = (rb_2 * g_gb - rb_gb * g_rb) / det
g_c = 1 - g_a - g_b
b_a = (gb_2 * b_rb - rb_gb * b_gb) / det
b_b = (rb_2 * b_gb - rb_gb * b_rb) / det
b_c = 1 - b_a - b_b
"""
format ccm
"""
ccm = [r_a, r_b, r_c, g_a, g_b, g_c, b_a, b_b, b_c]
return ccm
def deltae(colorA, colorB):
return ((colorA[0] - colorB[0]) ** 2 + (colorA[1] - colorB[1]) ** 2 + (colorA[2] - colorB[2]) ** 2) ** 0.5
# return ((colorA[1]-colorB[1]) * * 2 + (colorA[2]-colorB[2]) * * 2) * * 0.5
# UNCOMMENT IF YOU WANT TO NEGLECT LUMINANCE FROM CALCULATION OF DELTA E
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_vc4.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# ctt_vc4.py - camera tuning tool data for VC4 platforms
json_template = {
"rpi.black_level": {
"black_level": 4096
},
"rpi.dpc": {
},
"rpi.lux": {
"reference_shutter_speed": 10000,
"reference_gain": 1,
"reference_aperture": 1.0
},
"rpi.noise": {
},
"rpi.geq": {
},
"rpi.sdn": {
},
"rpi.awb": {
"priors": [
{"lux": 0, "prior": [2000, 1.0, 3000, 0.0, 13000, 0.0]},
{"lux": 800, "prior": [2000, 0.0, 6000, 2.0, 13000, 2.0]},
{"lux": 1500, "prior": [2000, 0.0, 4000, 1.0, 6000, 6.0, 6500, 7.0, 7000, 1.0, 13000, 1.0]}
],
"modes": {
"auto": {"lo": 2500, "hi": 8000},
"incandescent": {"lo": 2500, "hi": 3000},
"tungsten": {"lo": 3000, "hi": 3500},
"fluorescent": {"lo": 4000, "hi": 4700},
"indoor": {"lo": 3000, "hi": 5000},
"daylight": {"lo": 5500, "hi": 6500},
"cloudy": {"lo": 7000, "hi": 8600}
},
"bayes": 1
},
"rpi.agc": {
"metering_modes": {
"centre-weighted": {
"weights": [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0]
},
"spot": {
"weights": [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
},
"matrix": {
"weights": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
},
"exposure_modes": {
"normal": {
"shutter": [100, 10000, 30000, 60000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
},
"short": {
"shutter": [100, 5000, 10000, 20000, 120000],
"gain": [1.0, 2.0, 4.0, 6.0, 6.0]
}
},
"constraint_modes": {
"normal": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]}
],
"highlight": [
{"bound": "LOWER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.5, 1000, 0.5]},
{"bound": "UPPER", "q_lo": 0.98, "q_hi": 1.0, "y_target": [0, 0.8, 1000, 0.8]}
]
},
"y_target": [0, 0.16, 1000, 0.165, 10000, 0.17]
},
"rpi.alsc": {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7,
},
"rpi.contrast": {
"ce_enable": 1,
"gamma_curve": [
0, 0,
1024, 5040,
2048, 9338,
3072, 12356,
4096, 15312,
5120, 18051,
6144, 20790,
7168, 23193,
8192, 25744,
9216, 27942,
10240, 30035,
11264, 32005,
12288, 33975,
13312, 35815,
14336, 37600,
15360, 39168,
16384, 40642,
18432, 43379,
20480, 45749,
22528, 47753,
24576, 49621,
26624, 51253,
28672, 52698,
30720, 53796,
32768, 54876,
36864, 57012,
40960, 58656,
45056, 59954,
49152, 61183,
53248, 62355,
57344, 63419,
61440, 64476,
65535, 65535
]
},
"rpi.ccm": {
},
"rpi.sharpen": {
}
}
grid_size = (16, 12)
|
0 | repos/libcamera/utils/raspberrypi | repos/libcamera/utils/raspberrypi/ctt/ctt_pretty_print_json.py | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright 2022 Raspberry Pi Ltd
#
# Script to pretty print a Raspberry Pi tuning config JSON structure in
# version 2.0 and later formats.
import argparse
import json
import textwrap
class Encoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
'weights': 15,
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
'lut_rx': 9,
'lut_bx': 9,
'lut_by': 9,
'lut_ry': 9,
'gamma_curve': 2,
'y_target': 2,
'prior': 2,
'tonemap': 2
}
def encode(self, o, node_key=None):
if isinstance(o, (list, tuple)):
# Check if we are a flat list of numbers.
if not any(isinstance(el, (list, tuple, dict)) for el in o):
s = ', '.join(json.dumps(el) for el in o)
if node_key in self.custom_elems.keys():
# Special case handling to specify number of elements in a row for tables, ccm, etc.
self.indentation_level += 1
sl = s.split(', ')
num = self.custom_elems[node_key]
chunk = [self.indent_str + ', '.join(sl[x:x + num]) for x in range(0, len(sl), num)]
t = ',\n'.join(chunk)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
elif len(s) > self.hard_break - len(self.indent_str):
# Break a long list with wraps.
self.indentation_level += 1
t = textwrap.fill(s, self.hard_break, break_long_words=False,
initial_indent=self.indent_str, subsequent_indent=self.indent_str)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
else:
# Smaller lists can remain on a single line.
output = f' [ {s} ]'
return output
else:
# Sub-structures in the list case.
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
output = ',\n'.join(output)
return f' [\n{output}\n{self.indent_str}]'
elif isinstance(o, dict):
self.indentation_level += 1
output = []
for k, v in o.items():
if isinstance(v, dict) and len(v) == 0:
# Empty config block special case.
output.append(self.indent_str + f'{json.dumps(k)}: {{ }}')
else:
# Only linebreak if the next node is a config block.
sep = f'\n{self.indent_str}' if isinstance(v, dict) else ''
output.append(self.indent_str + f'{json.dumps(k)}:{sep}{self.encode(v, k)}')
output = ',\n'.join(output)
self.indentation_level -= 1
return f'{{\n{output}\n{self.indent_str}}}'
else:
return ' ' + json.dumps(o)
@property
def indent_str(self) -> str:
return ' ' * self.indentation_level * self.indent
def iterencode(self, o, **kwargs):
return self.encode(o)
def pretty_print(in_json: dict, custom_elems={}) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
'algorithms' not in in_json or \
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
encoder = Encoder(indent=4, sort_keys=False)
encoder.custom_elems |= custom_elems
return encoder.encode(in_json) #json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=
'Prettify a version 2.0 camera tuning config JSON file.')
parser.add_argument('-t', '--target', type=str, help='Target platform', choices=['pisp', 'vc4'], default='vc4')
parser.add_argument('input', type=str, help='Input tuning file.')
parser.add_argument('output', type=str, nargs='?',
help='Output converted tuning file. If not provided, the input file will be updated in-place.',
default=None)
args = parser.parse_args()
with open(args.input, 'r') as f:
in_json = json.load(f)
if args.target == 'pisp':
from ctt_pisp import grid_size
elif args.target == 'vc4':
from ctt_vc4 import grid_size
out_json = pretty_print(in_json, custom_elems={'table': grid_size[0], 'luminance_lut': grid_size[0]})
with open(args.output if args.output is not None else args.input, 'w') as f:
f.write(out_json)
|
0 | repos/libcamera | repos/libcamera/test/unique-fd.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* UniqueFD test
*/
#include <fcntl.h>
#include <iostream>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <libcamera/base/unique_fd.h>
#include <libcamera/base/utils.h>
#include "test.h"
using namespace libcamera;
using namespace std;
class UniqueFDTest : public Test
{
protected:
int init() override
{
return createFd();
}
int run() override
{
/* Test creating empty UniqueFD. */
UniqueFD fd;
if (fd.isValid() || fd.get() != -1) {
std::cout << "Failed fd check (default constructor)"
<< std::endl;
return TestFail;
}
/* Test creating UniqueFD from numerical file descriptor. */
UniqueFD fd2(fd_);
if (!fd2.isValid() || fd2.get() != fd_) {
std::cout << "Failed fd check (fd constructor)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (fd constructor)"
<< std::endl;
return TestFail;
}
/* Test move constructor. */
UniqueFD fd3(std::move(fd2));
if (!fd3.isValid() || fd3.get() != fd_) {
std::cout << "Failed fd check (move constructor)"
<< std::endl;
return TestFail;
}
if (fd2.isValid() || fd2.get() != -1) {
std::cout << "Failed moved fd check (move constructor)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (move constructor)"
<< std::endl;
return TestFail;
}
/* Test move assignment operator. */
fd = std::move(fd3);
if (!fd.isValid() || fd.get() != fd_) {
std::cout << "Failed fd check (move assignment)"
<< std::endl;
return TestFail;
}
if (fd3.isValid() || fd3.get() != -1) {
std::cout << "Failed moved fd check (move assignment)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (move assignment)"
<< std::endl;
return TestFail;
}
/* Test swapping. */
fd2.swap(fd);
if (!fd2.isValid() || fd2.get() != fd_) {
std::cout << "Failed fd check (swap)"
<< std::endl;
return TestFail;
}
if (fd.isValid() || fd.get() != -1) {
std::cout << "Failed swapped fd check (swap)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (swap)"
<< std::endl;
return TestFail;
}
/* Test release. */
int numFd = fd2.release();
if (fd2.isValid() || fd2.get() != -1) {
std::cout << "Failed fd check (release)"
<< std::endl;
return TestFail;
}
if (numFd != fd_) {
std::cout << "Failed released fd check (release)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (release)"
<< std::endl;
return TestFail;
}
/* Test reset assignment. */
fd.reset(numFd);
if (!fd.isValid() || fd.get() != fd_) {
std::cout << "Failed fd check (reset assignment)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_)) {
std::cout << "Failed fd validity (reset assignment)"
<< std::endl;
return TestFail;
}
/* Test reset destruction. */
fd.reset();
if (fd.isValid() || fd.get() != -1) {
std::cout << "Failed fd check (reset destruction)"
<< std::endl;
return TestFail;
}
if (isValidFd(fd_)) {
std::cout << "Failed fd validity (reset destruction)"
<< std::endl;
return TestFail;
}
/* Test destruction. */
if (createFd() == TestFail) {
std::cout << "Failed to recreate test fd"
<< std::endl;
return TestFail;
}
{
UniqueFD fd4(fd_);
}
if (isValidFd(fd_)) {
std::cout << "Failed fd validity (destruction)"
<< std::endl;
return TestFail;
}
return TestPass;
}
void cleanup() override
{
if (fd_ > 0)
close(fd_);
}
private:
int createFd()
{
fd_ = open("/tmp", O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR);
if (fd_ < 0)
return TestFail;
/* Cache inode number of temp file. */
struct stat s;
if (fstat(fd_, &s))
return TestFail;
inodeNr_ = s.st_ino;
return 0;
}
bool isValidFd(int fd)
{
struct stat s;
if (fstat(fd, &s))
return false;
/* Check that inode number matches cached temp file. */
return s.st_ino == inodeNr_;
}
int fd_;
ino_t inodeNr_;
};
TEST_REGISTER(UniqueFDTest)
|
0 | repos/libcamera | repos/libcamera/test/mapped-buffer.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* libcamera internal MappedBuffer tests
*/
#include <iostream>
#include <libcamera/framebuffer_allocator.h>
#include "libcamera/internal/mapped_framebuffer.h"
#include "camera_test.h"
#include "test.h"
using namespace libcamera;
using namespace std;
namespace {
class MappedBufferTest : public CameraTest, public Test
{
public:
MappedBufferTest()
: CameraTest("platform/vimc.0 Sensor B")
{
}
protected:
int init() override
{
if (status_ != TestPass)
return status_;
config_ = camera_->generateConfiguration({ StreamRole::VideoRecording });
if (!config_ || config_->size() != 1) {
cout << "Failed to generate default configuration" << endl;
return TestFail;
}
allocator_ = new FrameBufferAllocator(camera_);
StreamConfiguration &cfg = config_->at(0);
if (camera_->acquire()) {
cout << "Failed to acquire the camera" << endl;
return TestFail;
}
if (camera_->configure(config_.get())) {
cout << "Failed to set default configuration" << endl;
return TestFail;
}
stream_ = cfg.stream();
int ret = allocator_->allocate(stream_);
if (ret < 0)
return TestFail;
return TestPass;
}
void cleanup() override
{
delete allocator_;
}
int run() override
{
const std::unique_ptr<FrameBuffer> &buffer = allocator_->buffers(stream_).front();
std::vector<MappedBuffer> maps;
MappedFrameBuffer map(buffer.get(), MappedFrameBuffer::MapFlag::Read);
if (!map.isValid()) {
cout << "Failed to successfully map buffer" << endl;
return TestFail;
}
/* Make sure we can move it. */
maps.emplace_back(std::move(map));
/* But copying is prevented, it would cause double-unmap. */
// MappedFrameBuffer map_copy = map;
/* Local map should be invalid (after move). */
if (map.isValid()) {
cout << "Post-move map should not be valid" << endl;
return TestFail;
}
/* Test for multiple successful maps on the same buffer. */
MappedFrameBuffer write_map(buffer.get(), MappedFrameBuffer::MapFlag::Write);
if (!write_map.isValid()) {
cout << "Failed to map write buffer" << endl;
return TestFail;
}
MappedFrameBuffer rw_map(buffer.get(), MappedFrameBuffer::MapFlag::ReadWrite);
if (!rw_map.isValid()) {
cout << "Failed to map RW buffer" << endl;
return TestFail;
}
return TestPass;
}
private:
std::unique_ptr<CameraConfiguration> config_;
FrameBufferAllocator *allocator_;
Stream *stream_;
};
} /* namespace */
TEST_REGISTER(MappedBufferTest)
|
0 | repos/libcamera | repos/libcamera/test/shared-fd.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* SharedFD test
*/
#include <fcntl.h>
#include <iostream>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/base/utils.h>
#include "test.h"
using namespace libcamera;
using namespace std;
class SharedFDTest : public Test
{
protected:
int init()
{
desc1_ = nullptr;
desc2_ = nullptr;
fd_ = open("/tmp", O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR);
if (fd_ < 0)
return TestFail;
/* Cache inode number of temp file. */
struct stat s;
if (fstat(fd_, &s))
return TestFail;
inodeNr_ = s.st_ino;
return 0;
}
int run()
{
/* Test creating empty SharedFD. */
desc1_ = new SharedFD();
if (desc1_->get() != -1) {
std::cout << "Failed fd numerical check (default constructor)"
<< std::endl;
return TestFail;
}
delete desc1_;
desc1_ = nullptr;
/*
* Test creating SharedFD by copying numerical file
* descriptor.
*/
desc1_ = new SharedFD(fd_);
if (desc1_->get() == fd_) {
std::cout << "Failed fd numerical check (lvalue ref constructor)"
<< std::endl;
return TestFail;
}
if (!isValidFd(fd_) || !isValidFd(desc1_->get())) {
std::cout << "Failed fd validity after construction (lvalue ref constructor)"
<< std::endl;
return TestFail;
}
int fd = desc1_->get();
delete desc1_;
desc1_ = nullptr;
if (!isValidFd(fd_) || isValidFd(fd)) {
std::cout << "Failed fd validity after destruction (lvalue ref constructor)"
<< std::endl;
return TestFail;
}
/*
* Test creating SharedFD by taking ownership of
* numerical file descriptor.
*/
int dupFd = dup(fd_);
int dupFdCopy = dupFd;
desc1_ = new SharedFD(std::move(dupFd));
if (desc1_->get() != dupFdCopy) {
std::cout << "Failed fd numerical check (rvalue ref constructor)"
<< std::endl;
return TestFail;
}
if (dupFd != -1 || !isValidFd(fd_) || !isValidFd(desc1_->get())) {
std::cout << "Failed fd validity after construction (rvalue ref constructor)"
<< std::endl;
return TestFail;
}
fd = desc1_->get();
delete desc1_;
desc1_ = nullptr;
if (!isValidFd(fd_) || isValidFd(fd)) {
std::cout << "Failed fd validity after destruction (rvalue ref constructor)"
<< std::endl;
return TestFail;
}
/* Test creating SharedFD from other SharedFD. */
desc1_ = new SharedFD(fd_);
desc2_ = new SharedFD(*desc1_);
if (desc1_->get() == fd_ || desc2_->get() == fd_ ||
desc1_->get() != desc2_->get()) {
std::cout << "Failed fd numerical check (copy constructor)"
<< std::endl;
return TestFail;
}
if (!isValidFd(desc1_->get()) || !isValidFd(desc2_->get())) {
std::cout << "Failed fd validity after construction (copy constructor)"
<< std::endl;
return TestFail;
}
delete desc1_;
desc1_ = nullptr;
if (!isValidFd(desc2_->get())) {
std::cout << "Failed fd validity after destruction (copy constructor)"
<< std::endl;
return TestFail;
}
delete desc2_;
desc2_ = nullptr;
/* Test creating SharedFD by taking over other SharedFD. */
desc1_ = new SharedFD(fd_);
fd = desc1_->get();
desc2_ = new SharedFD(std::move(*desc1_));
if (desc1_->get() != -1 || desc2_->get() != fd) {
std::cout << "Failed fd numerical check (move constructor)"
<< std::endl;
return TestFail;
}
if (!isValidFd(desc2_->get())) {
std::cout << "Failed fd validity after construction (move constructor)"
<< std::endl;
return TestFail;
}
delete desc1_;
desc1_ = nullptr;
delete desc2_;
desc2_ = nullptr;
/* Test creating SharedFD by copy assignment. */
desc1_ = new SharedFD();
desc2_ = new SharedFD(fd_);
fd = desc2_->get();
*desc1_ = *desc2_;
if (desc1_->get() != fd || desc2_->get() != fd) {
std::cout << "Failed fd numerical check (copy assignment)"
<< std::endl;
return TestFail;
}
if (!isValidFd(desc1_->get()) || !isValidFd(desc2_->get())) {
std::cout << "Failed fd validity after construction (copy assignment)"
<< std::endl;
return TestFail;
}
delete desc1_;
desc1_ = nullptr;
delete desc2_;
desc2_ = nullptr;
/* Test creating SharedFD by move assignment. */
desc1_ = new SharedFD();
desc2_ = new SharedFD(fd_);
fd = desc2_->get();
*desc1_ = std::move(*desc2_);
if (desc1_->get() != fd || desc2_->get() != -1) {
std::cout << "Failed fd numerical check (move assignment)"
<< std::endl;
return TestFail;
}
if (!isValidFd(desc1_->get())) {
std::cout << "Failed fd validity after construction (move assignment)"
<< std::endl;
return TestFail;
}
delete desc1_;
desc1_ = nullptr;
delete desc2_;
desc2_ = nullptr;
return TestPass;
}
void cleanup()
{
delete desc2_;
delete desc1_;
if (fd_ > 0)
close(fd_);
}
private:
bool isValidFd(int fd)
{
struct stat s;
if (fstat(fd, &s))
return false;
/* Check that inode number matches cached temp file. */
return s.st_ino == inodeNr_;
}
int fd_;
ino_t inodeNr_;
SharedFD *desc1_, *desc2_;
};
TEST_REGISTER(SharedFDTest)
|
0 | repos/libcamera | repos/libcamera/test/byte-stream-buffer.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* ByteStreamBuffer tests
*/
#include <array>
#include <iostream>
#include "libcamera/internal/byte_stream_buffer.h"
#include "test.h"
using namespace std;
using namespace libcamera;
class ByteStreamBufferTest : public Test
{
protected:
int run()
{
/*
* gcc 11.1.0 incorrectly raises a maybe-uninitialized warning
* when calling data.size() below (if the address sanitizer is
* disabled). Silence it by initializing the array.
*/
std::array<uint8_t, 100> data = {};
unsigned int i;
uint32_t value;
int ret;
/*
* Write mode.
*/
ByteStreamBuffer wbuf(data.data(), data.size());
if (wbuf.base() != data.data() || wbuf.size() != data.size() ||
wbuf.offset() != 0 || wbuf.overflow()) {
cerr << "Write buffer incorrectly constructed" << endl;
return TestFail;
}
/* Test write. */
value = 0x12345678;
ret = wbuf.write(&value);
if (ret || wbuf.offset() != 4 || wbuf.overflow() ||
*reinterpret_cast<uint32_t *>(data.data()) != 0x12345678) {
cerr << "Write failed on write buffer" << endl;
return TestFail;
}
/* Test write carve out. */
ByteStreamBuffer wco = wbuf.carveOut(10);
if (wco.base() != wbuf.base() + 4 || wco.size() != 10 ||
wco.offset() != 0 || wco.overflow() || wbuf.offset() != 14 ||
wbuf.overflow()) {
cerr << "Carving out write buffer failed" << endl;
return TestFail;
}
/* Test write on the carved out buffer. */
value = 0x87654321;
ret = wco.write(&value);
if (ret || wco.offset() != 4 || wco.overflow() ||
*reinterpret_cast<uint32_t *>(data.data() + 4) != 0x87654321) {
cerr << "Write failed on carve out buffer" << endl;
return TestFail;
}
if (wbuf.offset() != 14 || wbuf.overflow()) {
cerr << "Write on carve out buffer modified write buffer" << endl;
return TestFail;
}
/* Test read, this should fail. */
ret = wbuf.read(&value);
if (!ret || wbuf.overflow()) {
cerr << "Read should fail on write buffer" << endl;
return TestFail;
}
/* Test overflow on carved out buffer. */
for (i = 0; i < 2; ++i) {
ret = wco.write(&value);
if (ret < 0)
break;
}
if (i != 1 || !wco.overflow() || !wbuf.overflow()) {
cerr << "Write on carve out buffer failed to overflow" << endl;
return TestFail;
}
/* Test reinitialization of the buffer. */
wbuf = ByteStreamBuffer(data.data(), data.size());
if (wbuf.overflow() || wbuf.base() != data.data() ||
wbuf.offset() != 0) {
cerr << "Write buffer reinitialization failed" << endl;
return TestFail;
}
/*
* Read mode.
*/
ByteStreamBuffer rbuf(const_cast<const uint8_t *>(data.data()),
data.size());
if (rbuf.base() != data.data() || rbuf.size() != data.size() ||
rbuf.offset() != 0 || rbuf.overflow()) {
cerr << "Read buffer incorrectly constructed" << endl;
return TestFail;
}
/* Test read. */
value = 0;
ret = rbuf.read(&value);
if (ret || rbuf.offset() != 4 || rbuf.overflow() ||
value != 0x12345678) {
cerr << "Write failed on write buffer" << endl;
return TestFail;
}
/* Test read carve out. */
ByteStreamBuffer rco = rbuf.carveOut(10);
if (rco.base() != rbuf.base() + 4 || rco.size() != 10 ||
rco.offset() != 0 || rco.overflow() || rbuf.offset() != 14 ||
rbuf.overflow()) {
cerr << "Carving out read buffer failed" << endl;
return TestFail;
}
/* Test read on the carved out buffer. */
value = 0;
ret = rco.read(&value);
if (ret || rco.offset() != 4 || rco.overflow() || value != 0x87654321) {
cerr << "Read failed on carve out buffer" << endl;
return TestFail;
}
if (rbuf.offset() != 14 || rbuf.overflow()) {
cerr << "Read on carve out buffer modified read buffer" << endl;
return TestFail;
}
/* Test write, this should fail. */
ret = rbuf.write(&value);
if (!ret || rbuf.overflow()) {
cerr << "Write should fail on read buffer" << endl;
return TestFail;
}
/* Test overflow on carved out buffer. */
for (i = 0; i < 2; ++i) {
ret = rco.read(&value);
if (ret < 0)
break;
}
if (i != 1 || !rco.overflow() || !rbuf.overflow()) {
cerr << "Read on carve out buffer failed to overflow" << endl;
return TestFail;
}
/* Test reinitialization of the buffer. */
rbuf = ByteStreamBuffer(const_cast<const uint8_t *>(data.data()),
data.size());
if (rbuf.overflow() || rbuf.base() != data.data() ||
rbuf.offset() != 0) {
cerr << "Read buffer reinitialization failed" << endl;
return TestFail;
}
return TestPass;
}
};
TEST_REGISTER(ByteStreamBufferTest)
|
0 | repos/libcamera | repos/libcamera/test/hotplug-cameras.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2020, Umang Jain <[email protected]>
*
* Test cameraAdded/cameraRemoved signals in CameraManager
*/
#include <dirent.h>
#include <fstream>
#include <iostream>
#include <string.h>
#include <unistd.h>
#include <libcamera/camera.h>
#include <libcamera/camera_manager.h>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/file.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include "test.h"
using namespace libcamera;
using namespace std::chrono_literals;
class HotplugTest : public Test
{
protected:
void cameraAddedHandler([[maybe_unused]] std::shared_ptr<Camera> cam)
{
cameraAdded_ = true;
}
void cameraRemovedHandler([[maybe_unused]] std::shared_ptr<Camera> cam)
{
cameraRemoved_ = true;
}
int init()
{
if (!File::exists("/sys/module/uvcvideo")) {
std::cout << "uvcvideo driver is not loaded, skipping" << std::endl;
return TestSkip;
}
if (geteuid() != 0) {
std::cout << "This test requires root permissions, skipping" << std::endl;
return TestSkip;
}
cm_ = new CameraManager();
if (cm_->start()) {
std::cout << "Failed to start camera manager" << std::endl;
return TestFail;
}
cameraAdded_ = false;
cameraRemoved_ = false;
cm_->cameraAdded.connect(this, &HotplugTest::cameraAddedHandler);
cm_->cameraRemoved.connect(this, &HotplugTest::cameraRemovedHandler);
return 0;
}
int run()
{
DIR *dir;
struct dirent *dirent;
std::string uvcDeviceDir;
dir = opendir(uvcDriverDir_.c_str());
/* Find a UVC device directory, which we can bind/unbind. */
while ((dirent = readdir(dir)) != nullptr) {
if (!File::exists(uvcDriverDir_ + dirent->d_name + "/video4linux"))
continue;
uvcDeviceDir = dirent->d_name;
break;
}
closedir(dir);
/* If no UVC device found, skip the test. */
if (uvcDeviceDir.empty())
return TestSkip;
/* Unbind a camera and process events. */
std::ofstream(uvcDriverDir_ + "unbind", std::ios::binary)
<< uvcDeviceDir;
Timer timer;
timer.start(1000ms);
while (timer.isRunning() && !cameraRemoved_)
Thread::current()->eventDispatcher()->processEvents();
if (!cameraRemoved_) {
std::cout << "Camera unplug not detected" << std::endl;
return TestFail;
}
/* Bind the camera again and process events. */
std::ofstream(uvcDriverDir_ + "bind", std::ios::binary)
<< uvcDeviceDir;
timer.start(1000ms);
while (timer.isRunning() && !cameraAdded_)
Thread::current()->eventDispatcher()->processEvents();
if (!cameraAdded_) {
std::cout << "Camera plug not detected" << std::endl;
return TestFail;
}
return TestPass;
}
void cleanup()
{
cm_->stop();
delete cm_;
}
private:
CameraManager *cm_;
static const std::string uvcDriverDir_;
bool cameraRemoved_;
bool cameraAdded_;
};
const std::string HotplugTest::uvcDriverDir_ = "/sys/bus/usb/drivers/uvcvideo/";
TEST_REGISTER(HotplugTest)
|
0 | repos/libcamera | repos/libcamera/test/geometry.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Geometry classes tests
*/
#include <iostream>
#include <libcamera/geometry.h>
#include "test.h"
using namespace std;
using namespace libcamera;
class GeometryTest : public Test
{
protected:
template<typename T>
bool compare(const T &lhs, const T &rhs,
bool (*op)(const T &lhs, const T &rhs),
const char *opName, bool expect)
{
bool result = op(lhs, rhs);
if (result != expect) {
cout << lhs << opName << " " << rhs
<< "test failed" << std::endl;
return false;
}
return true;
}
int run()
{
/*
* Point tests
*/
/* Equality */
if (!compare(Point(50, 100), Point(50, 100), &operator==, "==", true))
return TestFail;
if (!compare(Point(-50, 100), Point(-50, 100), &operator==, "==", true))
return TestFail;
if (!compare(Point(50, -100), Point(50, -100), &operator==, "==", true))
return TestFail;
if (!compare(Point(-50, -100), Point(-50, -100), &operator==, "==", true))
return TestFail;
/* Inequality */
if (!compare(Point(50, 100), Point(50, 100), &operator!=, "!=", false))
return TestFail;
if (!compare(Point(-50, 100), Point(-50, 100), &operator!=, "!=", false))
return TestFail;
if (!compare(Point(50, -100), Point(50, -100), &operator!=, "!=", false))
return TestFail;
if (!compare(Point(-50, -100), Point(-50, -100), &operator!=, "!=", false))
return TestFail;
if (!compare(Point(-50, 100), Point(50, 100), &operator!=, "!=", true))
return TestFail;
if (!compare(Point(50, -100), Point(50, 100), &operator!=, "!=", true))
return TestFail;
if (!compare(Point(-50, -100), Point(50, 100), &operator!=, "!=", true))
return TestFail;
/* Negation */
if (Point(50, 100) != -Point(-50, -100) ||
Point(50, 100) == -Point(50, -100) ||
Point(50, 100) == -Point(-50, 100)) {
cout << "Point negation test failed" << endl;
return TestFail;
}
/* Default constructor */
if (Point() != Point(0, 0)) {
cout << "Default constructor test failed" << endl;
return TestFail;
}
/*
* Size tests
*/
if (!Size().isNull() || !Size(0, 0).isNull()) {
cout << "Null size incorrectly reported as not null" << endl;
return TestFail;
}
if (Size(0, 100).isNull() || Size(100, 0).isNull() || Size(100, 100).isNull()) {
cout << "Non-null size incorrectly reported as null" << endl;
return TestFail;
}
/*
* Test alignDownTo(), alignUpTo(), boundTo(), expandTo(),
* growBy() and shrinkBy()
*/
Size s(50, 50);
s.alignDownTo(16, 16);
if (s != Size(48, 48)) {
cout << "Size::alignDownTo() test failed" << endl;
return TestFail;
}
s.alignUpTo(32, 32);
if (s != Size(64, 64)) {
cout << "Size::alignUpTo() test failed" << endl;
return TestFail;
}
s.boundTo({ 40, 40 });
if (s != Size(40, 40)) {
cout << "Size::boundTo() test failed" << endl;
return TestFail;
}
s.expandTo({ 50, 50 });
if (s != Size(50, 50)) {
cout << "Size::expandTo() test failed" << endl;
return TestFail;
}
s.growBy({ 10, 20 });
if (s != Size(60, 70)) {
cout << "Size::growBy() test failed" << endl;
return TestFail;
}
s.shrinkBy({ 20, 10 });
if (s != Size(40, 60)) {
cout << "Size::shrinkBy() test failed" << endl;
return TestFail;
}
s.shrinkBy({ 100, 100 });
if (s != Size(0, 0)) {
cout << "Size::shrinkBy() clamp test failed" << endl;
return TestFail;
}
s = Size(50,50).alignDownTo(16, 16).alignUpTo(32, 32)
.boundTo({ 40, 80 }).expandTo({ 16, 80 })
.growBy({ 4, 4 }).shrinkBy({ 10, 20 });
if (s != Size(34, 64)) {
cout << "Size chained in-place modifiers test failed" << endl;
return TestFail;
}
/*
* Test alignedDownTo(), alignedUpTo(), boundedTo(),
* expandedTo(), grownBy() and shrunkBy()
*/
if (Size(0, 0).alignedDownTo(16, 8) != Size(0, 0) ||
Size(1, 1).alignedDownTo(16, 8) != Size(0, 0) ||
Size(16, 8).alignedDownTo(16, 8) != Size(16, 8)) {
cout << "Size::alignedDownTo() test failed" << endl;
return TestFail;
}
if (Size(0, 0).alignedUpTo(16, 8) != Size(0, 0) ||
Size(1, 1).alignedUpTo(16, 8) != Size(16, 8) ||
Size(16, 8).alignedUpTo(16, 8) != Size(16, 8)) {
cout << "Size::alignedUpTo() test failed" << endl;
return TestFail;
}
if (Size(0, 0).boundedTo({ 100, 100 }) != Size(0, 0) ||
Size(200, 50).boundedTo({ 100, 100 }) != Size(100, 50) ||
Size(50, 200).boundedTo({ 100, 100 }) != Size(50, 100)) {
cout << "Size::boundedTo() test failed" << endl;
return TestFail;
}
if (Size(0, 0).expandedTo({ 100, 100 }) != Size(100, 100) ||
Size(200, 50).expandedTo({ 100, 100 }) != Size(200, 100) ||
Size(50, 200).expandedTo({ 100, 100 }) != Size(100, 200)) {
cout << "Size::expandedTo() test failed" << endl;
return TestFail;
}
if (Size(0, 0).grownBy({ 10, 20 }) != Size(10, 20) ||
Size(200, 50).grownBy({ 10, 20 }) != Size(210, 70)) {
cout << "Size::grownBy() test failed" << endl;
return TestFail;
}
if (Size(200, 50).shrunkBy({ 10, 20 }) != Size(190, 30) ||
Size(200, 50).shrunkBy({ 10, 100 }) != Size(190, 0) ||
Size(200, 50).shrunkBy({ 300, 20 }) != Size(0, 30)) {
cout << "Size::shrunkBy() test failed" << endl;
return TestFail;
}
/* Aspect ratio tests */
if (Size(0, 0).boundedToAspectRatio(Size(4, 3)) != Size(0, 0) ||
Size(1920, 1440).boundedToAspectRatio(Size(16, 9)) != Size(1920, 1080) ||
Size(1920, 1440).boundedToAspectRatio(Size(65536, 36864)) != Size(1920, 1080) ||
Size(1440, 1920).boundedToAspectRatio(Size(9, 16)) != Size(1080, 1920) ||
Size(1920, 1080).boundedToAspectRatio(Size(4, 3)) != Size(1440, 1080) ||
Size(1920, 1080).boundedToAspectRatio(Size(65536, 49152)) != Size(1440, 1080) ||
Size(1024, 1024).boundedToAspectRatio(Size(1, 1)) != Size(1024, 1024) ||
Size(1920, 1080).boundedToAspectRatio(Size(16, 9)) != Size(1920, 1080) ||
Size(200, 100).boundedToAspectRatio(Size(16, 9)) != Size(177, 100) ||
Size(300, 200).boundedToAspectRatio(Size(16, 9)) != Size(300, 168)) {
cout << "Size::boundedToAspectRatio() test failed" << endl;
return TestFail;
}
if (Size(0, 0).expandedToAspectRatio(Size(4, 3)) != Size(0, 0) ||
Size(1920, 1440).expandedToAspectRatio(Size(16, 9)) != Size(2560, 1440) ||
Size(1920, 1440).expandedToAspectRatio(Size(65536, 36864)) != Size(2560, 1440) ||
Size(1440, 1920).expandedToAspectRatio(Size(9, 16)) != Size(1440, 2560) ||
Size(1920, 1080).expandedToAspectRatio(Size(4, 3)) != Size(1920, 1440) ||
Size(1920, 1080).expandedToAspectRatio(Size(65536, 49152)) != Size(1920, 1440) ||
Size(1024, 1024).expandedToAspectRatio(Size(1, 1)) != Size(1024, 1024) ||
Size(1920, 1080).expandedToAspectRatio(Size(16, 9)) != Size(1920, 1080) ||
Size(200, 100).expandedToAspectRatio(Size(16, 9)) != Size(200, 112) ||
Size(300, 200).expandedToAspectRatio(Size(16, 9)) != Size(355, 200)) {
cout << "Size::expandedToAspectRatio() test failed" << endl;
return TestFail;
}
/* Size::centeredTo() tests */
if (Size(0, 0).centeredTo(Point(50, 100)) != Rectangle(50, 100, 0, 0) ||
Size(0, 0).centeredTo(Point(-50, -100)) != Rectangle(-50, -100, 0, 0) ||
Size(100, 200).centeredTo(Point(50, 100)) != Rectangle(0, 0, 100, 200) ||
Size(100, 200).centeredTo(Point(-50, -100)) != Rectangle(-100, -200, 100, 200) ||
Size(101, 201).centeredTo(Point(-50, -100)) != Rectangle(-100, -200, 101, 201) ||
Size(101, 201).centeredTo(Point(-51, -101)) != Rectangle(-101, -201, 101, 201)) {
cout << "Size::centeredTo() test failed" << endl;
return TestFail;
}
/* Scale a size by a float */
if (Size(1000, 2000) * 2.0 != Size(2000, 4000) ||
Size(300, 100) * 0.5 != Size(150, 50) ||
Size(1, 2) * 1.6 != Size(1, 3)) {
cout << "Size::operator*() failed" << endl;
return TestFail;
}
if (Size(1000, 2000) / 2.0 != Size(500, 1000) ||
Size(300, 100) / 0.5 != Size(600, 200) ||
Size(1000, 2000) / 3.0 != Size(333, 666)) {
cout << "Size::operator*() failed" << endl;
return TestFail;
}
s = Size(300, 100);
s *= 0.3333;
if (s != Size(99, 33)) {
cout << "Size::operator*() test failed" << endl;
return TestFail;
}
s = Size(300, 100);
s /= 3;
if (s != Size(100, 33)) {
cout << "Size::operator*() test failed" << endl;
return TestFail;
}
/* Test Size equality and inequality. */
if (!compare(Size(100, 100), Size(100, 100), &operator==, "==", true))
return TestFail;
if (!compare(Size(100, 100), Size(100, 100), &operator!=, "!=", false))
return TestFail;
if (!compare(Size(100, 100), Size(200, 100), &operator==, "==", false))
return TestFail;
if (!compare(Size(100, 100), Size(200, 100), &operator!=, "!=", true))
return TestFail;
if (!compare(Size(100, 100), Size(100, 200), &operator==, "==", false))
return TestFail;
if (!compare(Size(100, 100), Size(100, 200), &operator!=, "!=", true))
return TestFail;
/* Test Size ordering based on combined with and height. */
if (!compare(Size(100, 100), Size(200, 200), &operator<, "<", true))
return TestFail;
if (!compare(Size(100, 100), Size(200, 200), &operator<=, "<=", true))
return TestFail;
if (!compare(Size(100, 100), Size(200, 200), &operator>, ">", false))
return TestFail;
if (!compare(Size(100, 100), Size(200, 200), &operator>=, ">=", false))
return TestFail;
if (!compare(Size(200, 200), Size(100, 100), &operator<, "<", false))
return TestFail;
if (!compare(Size(200, 200), Size(100, 100), &operator<=, "<=", false))
return TestFail;
if (!compare(Size(200, 200), Size(100, 100), &operator>, ">", true))
return TestFail;
if (!compare(Size(200, 200), Size(100, 100), &operator>=, ">=", true))
return TestFail;
/* Test Size ordering based on area (with overlapping sizes). */
if (!compare(Size(200, 100), Size(100, 400), &operator<, "<", true))
return TestFail;
if (!compare(Size(200, 100), Size(100, 400), &operator<=, "<=", true))
return TestFail;
if (!compare(Size(200, 100), Size(100, 400), &operator>, ">", false))
return TestFail;
if (!compare(Size(200, 100), Size(100, 400), &operator>=, ">=", false))
return TestFail;
if (!compare(Size(100, 400), Size(200, 100), &operator<, "<", false))
return TestFail;
if (!compare(Size(100, 400), Size(200, 100), &operator<=, "<=", false))
return TestFail;
if (!compare(Size(100, 400), Size(200, 100), &operator>, ">", true))
return TestFail;
if (!compare(Size(100, 400), Size(200, 100), &operator>=, ">=", true))
return TestFail;
/* Test Size ordering based on width (with identical areas). */
if (!compare(Size(100, 200), Size(200, 100), &operator<, "<", true))
return TestFail;
if (!compare(Size(100, 200), Size(200, 100), &operator<=, "<=", true))
return TestFail;
if (!compare(Size(100, 200), Size(200, 100), &operator>, ">", false))
return TestFail;
if (!compare(Size(100, 200), Size(200, 100), &operator>=, ">=", false))
return TestFail;
if (!compare(Size(200, 100), Size(100, 200), &operator<, "<", false))
return TestFail;
if (!compare(Size(200, 100), Size(100, 200), &operator<=, "<=", false))
return TestFail;
if (!compare(Size(200, 100), Size(100, 200), &operator>, ">", true))
return TestFail;
if (!compare(Size(200, 100), Size(100, 200), &operator>=, ">=", true))
return TestFail;
/*
* Rectangle tests
*/
/* Test Rectangle::isNull(). */
if (!Rectangle(0, 0, 0, 0).isNull() ||
!Rectangle(1, 1, 0, 0).isNull()) {
cout << "Null rectangle incorrectly reported as not null" << endl;
return TestFail;
}
if (Rectangle(0, 0, 0, 1).isNull() ||
Rectangle(0, 0, 1, 0).isNull() ||
Rectangle(0, 0, 1, 1).isNull()) {
cout << "Non-null rectangle incorrectly reported as null" << endl;
return TestFail;
}
/* Rectangle::size(), Rectangle::topLeft() and Rectangle::center() tests */
if (Rectangle(-1, -2, 3, 4).size() != Size(3, 4) ||
Rectangle(0, 0, 100000, 200000).size() != Size(100000, 200000)) {
cout << "Rectangle::size() test failed" << endl;
return TestFail;
}
if (Rectangle(1, 2, 3, 4).topLeft() != Point(1, 2) ||
Rectangle(-1, -2, 3, 4).topLeft() != Point(-1, -2)) {
cout << "Rectangle::topLeft() test failed" << endl;
return TestFail;
}
if (Rectangle(0, 0, 300, 400).center() != Point(150, 200) ||
Rectangle(-1000, -2000, 300, 400).center() != Point(-850, -1800) ||
Rectangle(10, 20, 301, 401).center() != Point(160, 220) ||
Rectangle(11, 21, 301, 401).center() != Point(161, 221) ||
Rectangle(-1011, -2021, 301, 401).center() != Point(-861, -1821)) {
cout << "Rectangle::center() test failed" << endl;
return TestFail;
}
/* Rectangle::boundedTo() (intersection function) */
if (Rectangle(0, 0, 1000, 2000).boundedTo(Rectangle(0, 0, 1000, 2000)) !=
Rectangle(0, 0, 1000, 2000) ||
Rectangle(-500, -1000, 1000, 2000).boundedTo(Rectangle(0, 0, 1000, 2000)) !=
Rectangle(0, 0, 500, 1000) ||
Rectangle(500, 1000, 1000, 2000).boundedTo(Rectangle(0, 0, 1000, 2000)) !=
Rectangle(500, 1000, 500, 1000) ||
Rectangle(300, 400, 50, 100).boundedTo(Rectangle(0, 0, 1000, 2000)) !=
Rectangle(300, 400, 50, 100) ||
Rectangle(0, 0, 1000, 2000).boundedTo(Rectangle(300, 400, 50, 100)) !=
Rectangle(300, 400, 50, 100) ||
Rectangle(0, 0, 100, 100).boundedTo(Rectangle(50, 100, 100, 100)) !=
Rectangle(50, 100, 50, 0) ||
Rectangle(0, 0, 100, 100).boundedTo(Rectangle(100, 50, 100, 100)) !=
Rectangle(100, 50, 0, 50) ||
Rectangle(-10, -20, 10, 20).boundedTo(Rectangle(10, 20, 100, 100)) !=
Rectangle(10, 20, 0, 0)) {
cout << "Rectangle::boundedTo() test failed" << endl;
return TestFail;
}
/* Rectangle::enclosedIn() tests */
if (Rectangle(10, 20, 300, 400).enclosedIn(Rectangle(-10, -20, 1300, 1400)) !=
Rectangle(10, 20, 300, 400) ||
Rectangle(-100, -200, 3000, 4000).enclosedIn(Rectangle(-10, -20, 1300, 1400)) !=
Rectangle(-10, -20, 1300, 1400) ||
Rectangle(-100, -200, 300, 400).enclosedIn(Rectangle(-10, -20, 1300, 1400)) !=
Rectangle(-10, -20, 300, 400) ||
Rectangle(5100, 6200, 300, 400).enclosedIn(Rectangle(-10, -20, 1300, 1400)) !=
Rectangle(990, 980, 300, 400) ||
Rectangle(100, -300, 150, 200).enclosedIn(Rectangle(50, 0, 200, 300)) !=
Rectangle(100, 0, 150, 200) ||
Rectangle(100, -300, 150, 1200).enclosedIn(Rectangle(50, 0, 200, 300)) !=
Rectangle(100, 0, 150, 300) ||
Rectangle(-300, 100, 200, 150).enclosedIn(Rectangle(0, 50, 300, 200)) !=
Rectangle(0, 100, 200, 150) ||
Rectangle(-300, 100, 1200, 150).enclosedIn(Rectangle(0, 50, 300, 200)) !=
Rectangle(0, 100, 300, 150)) {
cout << "Rectangle::enclosedIn() test failed" << endl;
return TestFail;
}
/* Rectange::scaledBy() tests */
if (Rectangle(10, 20, 300, 400).scaledBy(Size(0, 0), Size(1, 1)) !=
Rectangle(0, 0, 0, 0) ||
Rectangle(10, -20, 300, 400).scaledBy(Size(32768, 65536), Size(32768, 32768)) !=
Rectangle(10, -40, 300, 800) ||
Rectangle(-30000, 10000, 20000, 20000).scaledBy(Size(7, 7), Size(7, 7)) !=
Rectangle(-30000, 10000, 20000, 20000) ||
Rectangle(-20, -30, 320, 240).scaledBy(Size(1280, 960), Size(640, 480)) !=
Rectangle(-40, -60, 640, 480) ||
Rectangle(1, 1, 2026, 1510).scaledBy(Size(4056, 3024), Size(2028, 1512)) !=
Rectangle(2, 2, 4052, 3020)) {
cout << "Rectangle::scaledBy() test failed" << endl;
return TestFail;
}
/* Rectangle::translatedBy() tests */
if (Rectangle(10, -20, 300, 400).translatedBy(Point(-30, 40)) !=
Rectangle(-20, 20, 300, 400) ||
Rectangle(-10, 20, 400, 300).translatedBy(Point(50, -60)) !=
Rectangle(40, -40, 400, 300)) {
cout << "Rectangle::translatedBy() test failed" << endl;
return TestFail;
}
/* Rectangle::scaleBy() tests */
Rectangle r(-20, -30, 320, 240);
r.scaleBy(Size(1280, 960), Size(640, 480));
if (r != Rectangle(-40, -60, 640, 480)) {
cout << "Rectangle::scaleBy() test failed" << endl;
return TestFail;
}
r = Rectangle(1, 1, 2026, 1510);
r.scaleBy(Size(4056, 3024), Size(2028, 1512));
if (r != Rectangle(2, 2, 4052, 3020)) {
cout << "Rectangle::scaleBy() test failed" << endl;
return TestFail;
}
/* Rectangle::translateBy() tests */
r = Rectangle(10, -20, 300, 400);
r.translateBy(Point(-30, 40));
if (r != Rectangle(-20, 20, 300, 400)) {
cout << "Rectangle::translateBy() test failed" << endl;
return TestFail;
}
r = Rectangle(-10, 20, 400, 300);
r.translateBy(Point(50, -60));
if (r != Rectangle(40, -40, 400, 300)) {
cout << "Rectangle::translateBy() test failed" << endl;
return TestFail;
}
return TestPass;
}
};
TEST_REGISTER(GeometryTest)
|
0 | repos/libcamera | repos/libcamera/test/timer-fail.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Threaded timer failure test
*/
#include <chrono>
#include <iostream>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/object.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include "test.h"
using namespace libcamera;
using namespace std;
using namespace std::chrono_literals;
class TimeoutHandler : public Object
{
public:
TimeoutHandler()
: timer_(this), timeout_(false)
{
timer_.timeout.connect(this, &TimeoutHandler::timeoutHandler);
}
void start()
{
timer_.start(100ms);
}
bool timeout() const
{
return timeout_;
}
private:
void timeoutHandler()
{
timeout_ = true;
}
Timer timer_;
bool timeout_;
};
class TimerFailTest : public Test
{
protected:
int init()
{
thread_.start();
timeout_ = new TimeoutHandler();
timeout_->moveToThread(&thread_);
return TestPass;
}
int run()
{
/*
* Test that the forbidden operation of starting the timer from
* another thread results in a failure. We need to interrupt the
* event dispatcher to make sure we don't succeed simply because
* the event dispatcher hasn't noticed the timer restart.
*/
timeout_->start();
thread_.eventDispatcher()->interrupt();
this_thread::sleep_for(chrono::milliseconds(200));
/*
* The wrong start() call should result in an assertion in debug
* builds, and a timeout in release builds. The test is
* therefore marked in meson.build as expected to fail. We need
* to return TestPass in the unexpected (usually known as
* "fail") case, and TestFail otherwise.
*/
if (timeout_->timeout()) {
cout << "Timer start from wrong thread succeeded unexpectedly"
<< endl;
return TestPass;
}
return TestFail;
}
void cleanup()
{
/*
* Object class instances must be destroyed from the thread
* they live in.
*/
timeout_->deleteLater();
thread_.exit(0);
thread_.wait();
}
private:
TimeoutHandler *timeout_;
Thread thread_;
};
TEST_REGISTER(TimerFailTest)
|
0 | repos/libcamera | repos/libcamera/test/signal-threads.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Cross-thread signal delivery test
*/
#include <chrono>
#include <iostream>
#include <thread>
#include <libcamera/base/message.h>
#include <libcamera/base/object.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
#include "test.h"
using namespace std;
using namespace libcamera;
class SignalReceiver : public Object
{
public:
enum Status {
NoSignal,
InvalidThread,
SignalReceived,
};
SignalReceiver()
: status_(NoSignal)
{
}
Status status() const { return status_; }
int value() const { return value_; }
void reset()
{
status_ = NoSignal;
value_ = 0;
}
void slot(int value)
{
if (Thread::current() != thread())
status_ = InvalidThread;
else
status_ = SignalReceived;
value_ = value;
}
private:
Status status_;
int value_;
};
class SignalThreadsTest : public Test
{
protected:
int init()
{
receiver_ = new SignalReceiver();
signal_.connect(receiver_, &SignalReceiver::slot);
return TestPass;
}
int run()
{
/* Test that a signal is received in the main thread. */
signal_.emit(0);
switch (receiver_->status()) {
case SignalReceiver::NoSignal:
cout << "No signal received for direct connection" << endl;
return TestFail;
case SignalReceiver::InvalidThread:
cout << "Signal received in incorrect thread "
"for direct connection" << endl;
return TestFail;
default:
break;
}
/*
* Move the object to a thread and verify that the signal is
* correctly delivered, with the correct data.
*/
receiver_->reset();
receiver_->moveToThread(&thread_);
thread_.start();
signal_.emit(42);
this_thread::sleep_for(chrono::milliseconds(100));
switch (receiver_->status()) {
case SignalReceiver::NoSignal:
cout << "No signal received for message connection" << endl;
return TestFail;
case SignalReceiver::InvalidThread:
cout << "Signal received in incorrect thread "
"for message connection" << endl;
return TestFail;
default:
break;
}
if (receiver_->value() != 42) {
cout << "Signal received with incorrect value" << endl;
return TestFail;
}
return TestPass;
}
void cleanup()
{
receiver_->deleteLater();
thread_.exit(0);
thread_.wait();
}
private:
SignalReceiver *receiver_;
Thread thread_;
Signal<int> signal_;
};
TEST_REGISTER(SignalThreadsTest)
|
0 | repos/libcamera | repos/libcamera/test/fence.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Fence test
*/
#include <iostream>
#include <memory>
#include <sys/eventfd.h>
#include <unistd.h>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include <libcamera/base/unique_fd.h>
#include <libcamera/base/utils.h>
#include <libcamera/fence.h>
#include <libcamera/framebuffer_allocator.h>
#include "camera_test.h"
#include "test.h"
using namespace libcamera;
using namespace std;
using namespace std::chrono_literals;
class FenceTest : public CameraTest, public Test
{
public:
FenceTest();
protected:
int init() override;
int run() override;
private:
int validateExpiredRequest(Request *request);
int validateRequest(Request *request);
void requestComplete(Request *request);
void requestRequeue(Request *request);
void signalFence();
EventDispatcher *dispatcher_;
UniqueFD eventFd_;
UniqueFD eventFd2_;
Timer fenceTimer_;
std::vector<std::unique_ptr<Request>> requests_;
std::unique_ptr<CameraConfiguration> config_;
std::unique_ptr<FrameBufferAllocator> allocator_;
Stream *stream_;
bool expectedCompletionResult_ = true;
bool setFence_ = true;
/*
* Request IDs track the number of requests that have completed. They
* are one-based, and don't wrap.
*/
unsigned int completedRequestId_;
unsigned int signalledRequestId_;
unsigned int expiredRequestId_;
unsigned int nbuffers_;
int efd2_;
int efd_;
};
FenceTest::FenceTest()
: CameraTest("platform/vimc.0 Sensor B")
{
}
int FenceTest::init()
{
/* Make sure the CameraTest constructor succeeded. */
if (status_ != TestPass)
return status_;
dispatcher_ = Thread::current()->eventDispatcher();
/*
* Create two eventfds to model the fences. This is enough to support the
* needs of libcamera which only needs to wait for read events through
* poll(). Once native support for fences will be available in the
* backend kernel APIs this will need to be replaced by a sw_sync fence,
* but that requires debugfs.
*/
eventFd_ = UniqueFD(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
eventFd2_ = UniqueFD(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
if (!eventFd_.isValid() || !eventFd2_.isValid()) {
cerr << "Unable to create eventfd" << endl;
return TestFail;
}
efd_ = eventFd_.get();
efd2_ = eventFd2_.get();
config_ = camera_->generateConfiguration({ StreamRole::Viewfinder });
if (!config_ || config_->size() != 1) {
cerr << "Failed to generate default configuration" << endl;
return TestFail;
}
if (camera_->acquire()) {
cerr << "Failed to acquire the camera" << endl;
return TestFail;
}
if (camera_->configure(config_.get())) {
cerr << "Failed to set default configuration" << endl;
return TestFail;
}
StreamConfiguration &cfg = config_->at(0);
stream_ = cfg.stream();
allocator_ = std::make_unique<FrameBufferAllocator>(camera_);
if (allocator_->allocate(stream_) < 0)
return TestFail;
nbuffers_ = allocator_->buffers(stream_).size();
if (nbuffers_ < 2) {
cerr << "Not enough buffers available" << endl;
return TestFail;
}
completedRequestId_ = 0;
/*
* All but two requests are queued without a fence. Request
* expiredRequestId_ will be queued with a fence that we won't signal
* (which is then expected to expire), and request signalledRequestId_
* will be queued with a fence that gets signalled. Select nbuffers_
* and nbuffers_ * 2 for those two requests, to space them by a few
* frames while still not requiring a long time for the test to
* complete.
*/
expiredRequestId_ = nbuffers_;
signalledRequestId_ = nbuffers_ * 2;
return TestPass;
}
int FenceTest::validateExpiredRequest(Request *request)
{
/* The last request is expected to fail. */
if (request->status() != Request::RequestCancelled) {
cerr << "The last request should have failed: " << endl;
return TestFail;
}
FrameBuffer *buffer = request->buffers().begin()->second;
std::unique_ptr<Fence> fence = buffer->releaseFence();
if (!fence) {
cerr << "The expired fence should be present" << endl;
return TestFail;
}
if (!fence->isValid()) {
cerr << "The expired fence should be valid" << endl;
return TestFail;
}
UniqueFD fd = fence->release();
if (fd.get() != efd_) {
cerr << "The expired fence file descriptor should not change" << endl;
return TestFail;
}
return TestPass;
}
int FenceTest::validateRequest(Request *request)
{
uint64_t cookie = request->cookie();
/* All requests but the last are expected to succeed. */
if (request->status() != Request::RequestComplete) {
cerr << "Unexpected request failure: " << cookie << endl;
return TestFail;
}
/* A successfully completed request should have the Fence closed. */
const Request::BufferMap &buffers = request->buffers();
FrameBuffer *buffer = buffers.begin()->second;
std::unique_ptr<Fence> bufferFence = buffer->releaseFence();
if (bufferFence) {
cerr << "Unexpected valid fence in completed request" << endl;
return TestFail;
}
return TestPass;
}
void FenceTest::requestRequeue(Request *request)
{
const Request::BufferMap &buffers = request->buffers();
const Stream *stream = buffers.begin()->first;
FrameBuffer *buffer = buffers.begin()->second;
request->reuse();
if (completedRequestId_ == signalledRequestId_ - nbuffers_ && setFence_) {
/*
* This is the request that will be used to test fence
* signalling when it completes next time. Add a fence to it,
* using efd2_. The main loop will signal the fence by using a
* timer to write to the efd2_ file descriptor before the fence
* expires.
*/
std::unique_ptr<Fence> fence =
std::make_unique<Fence>(std::move(eventFd2_));
request->addBuffer(stream, buffer, std::move(fence));
} else {
/* All the other requests continue to operate without fences. */
request->addBuffer(stream, buffer);
}
camera_->queueRequest(request);
}
void FenceTest::requestComplete(Request *request)
{
completedRequestId_++;
/*
* Request expiredRequestId_ is expected to fail as its fence has not
* been signalled.
*
* Validate the fence status but do not re-queue it.
*/
if (completedRequestId_ == expiredRequestId_) {
if (validateExpiredRequest(request) != TestPass)
expectedCompletionResult_ = false;
dispatcher_->interrupt();
return;
}
/* Validate all other requests. */
if (validateRequest(request) != TestPass) {
expectedCompletionResult_ = false;
dispatcher_->interrupt();
return;
}
requestRequeue(request);
/*
* Interrupt the dispatcher to return control to the main loop and
* activate the fenceTimer.
*/
dispatcher_->interrupt();
}
/* Callback to signal a fence waiting on the eventfd file descriptor. */
void FenceTest::signalFence()
{
uint64_t value = 1;
int ret;
ret = write(efd2_, &value, sizeof(value));
if (ret != sizeof(value))
cerr << "Failed to signal fence" << endl;
setFence_ = false;
dispatcher_->processEvents();
}
int FenceTest::run()
{
for (const auto &[i, buffer] : utils::enumerate(allocator_->buffers(stream_))) {
std::unique_ptr<Request> request = camera_->createRequest(i);
if (!request) {
cerr << "Failed to create request" << endl;
return TestFail;
}
int ret;
if (i == expiredRequestId_ - 1) {
/* This request will have a fence, and it will expire. */
std::unique_ptr<Fence> fence =
std::make_unique<Fence>(std::move(eventFd_));
if (!fence->isValid()) {
cerr << "Fence should be valid" << endl;
return TestFail;
}
ret = request->addBuffer(stream_, buffer.get(), std::move(fence));
} else {
/* All other requests will have no Fence. */
ret = request->addBuffer(stream_, buffer.get());
}
if (ret) {
cerr << "Failed to associate buffer with request" << endl;
return TestFail;
}
requests_.push_back(std::move(request));
}
camera_->requestCompleted.connect(this, &FenceTest::requestComplete);
if (camera_->start()) {
cerr << "Failed to start camera" << endl;
return TestFail;
}
for (std::unique_ptr<Request> &request : requests_) {
if (camera_->queueRequest(request.get())) {
cerr << "Failed to queue request" << endl;
return TestFail;
}
}
expectedCompletionResult_ = true;
/* This timer serves to signal fences associated with "signalledRequestId_" */
Timer fenceTimer;
fenceTimer.timeout.connect(this, &FenceTest::signalFence);
/*
* Loop long enough for all requests to complete, allowing 500ms per
* request.
*/
Timer timer;
timer.start(500ms * (signalledRequestId_ + 1));
while (timer.isRunning() && expectedCompletionResult_ &&
completedRequestId_ <= signalledRequestId_ + 1) {
if (completedRequestId_ == signalledRequestId_ - 1 && setFence_)
/*
* The request just before signalledRequestId_ has just
* completed. Request signalledRequestId_ has been
* queued with a fence, and libcamera is likely already
* waiting on the fence, or will soon. Start the timer
* to signal the fence in 10 msec.
*/
fenceTimer.start(10ms);
dispatcher_->processEvents();
}
camera_->requestCompleted.disconnect();
if (camera_->stop()) {
cerr << "Failed to stop camera" << endl;
return TestFail;
}
return expectedCompletionResult_ ? TestPass : TestFail;
}
TEST_REGISTER(FenceTest)
|
0 | repos/libcamera | repos/libcamera/test/utils.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* Miscellaneous utility tests
*/
#include <iostream>
#include <map>
#include <optional>
#include <sstream>
#include <string>
#include <vector>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
#include <libcamera/geometry.h>
#include "test.h"
using namespace std;
using namespace libcamera;
using namespace std::literals::chrono_literals;
class UtilsTest : public Test
{
protected:
int testDirname()
{
static const std::vector<std::string> paths = {
"",
"///",
"/bin",
"/usr/bin",
"//etc////",
"//tmp//d//",
"current_file",
"./current_file",
"./current_dir/",
"current_dir/",
};
static const std::vector<std::string> expected = {
".",
"/",
"/",
"/usr",
"/",
"//tmp",
".",
".",
".",
".",
};
std::vector<std::string> results;
for (const auto &path : paths)
results.push_back(utils::dirname(path));
if (results != expected) {
cerr << "utils::dirname() tests failed" << endl;
cerr << "expected: " << endl;
for (const auto &path : expected)
cerr << "\t" << path << endl;
cerr << "results: " << endl;
for (const auto &path : results)
cerr << "\t" << path << endl;
return TestFail;
}
return TestPass;
}
int testEnumerate()
{
std::vector<unsigned int> integers{ 1, 2, 3, 4, 5 };
unsigned int i = 0;
for (auto [index, value] : utils::enumerate(integers)) {
if (index != i || value != i + 1) {
cerr << "utils::enumerate(<vector>) test failed: i=" << i
<< ", index=" << index << ", value=" << value
<< std::endl;
return TestFail;
}
/* Verify that we can modify the value. */
--value;
++i;
}
if (integers != std::vector<unsigned int>{ 0, 1, 2, 3, 4 }) {
cerr << "Failed to modify container in enumerated range loop" << endl;
return TestFail;
}
Span<const unsigned int> span{ integers };
i = 0;
for (auto [index, value] : utils::enumerate(span)) {
if (index != i || value != i) {
cerr << "utils::enumerate(<span>) test failed: i=" << i
<< ", index=" << index << ", value=" << value
<< std::endl;
return TestFail;
}
++i;
}
const unsigned int array[] = { 0, 2, 4, 6, 8 };
i = 0;
for (auto [index, value] : utils::enumerate(array)) {
if (index != i || value != i * 2) {
cerr << "utils::enumerate(<array>) test failed: i=" << i
<< ", index=" << index << ", value=" << value
<< std::endl;
return TestFail;
}
++i;
}
return TestPass;
}
int testDuration()
{
std::ostringstream os;
utils::Duration exposure;
double ratio;
exposure = 25ms + 25ms;
if (exposure.get<std::micro>() != 50000.0) {
cerr << "utils::Duration failed to return microsecond count";
return TestFail;
}
exposure = 1.0s / 4;
if (exposure != 250ms) {
cerr << "utils::Duration failed scalar divide test";
return TestFail;
}
exposure = 5000.5us;
if (!exposure) {
cerr << "utils::Duration failed boolean test";
return TestFail;
}
os << exposure;
if (os.str() != "5000.50us") {
cerr << "utils::Duration operator << failed";
return TestFail;
}
exposure = 100ms;
ratio = exposure / 25ms;
if (ratio != 4.0) {
cerr << "utils::Duration failed ratio test";
return TestFail;
}
return TestPass;
}
int run()
{
/* utils::hex() test. */
std::ostringstream os;
std::string ref;
os << utils::hex(static_cast<int32_t>(0x42)) << " ";
ref += "0x00000042 ";
os << utils::hex(static_cast<uint32_t>(0x42)) << " ";
ref += "0x00000042 ";
os << utils::hex(static_cast<int64_t>(0x42)) << " ";
ref += "0x0000000000000042 ";
os << utils::hex(static_cast<uint64_t>(0x42)) << " ";
ref += "0x0000000000000042 ";
os << utils::hex(static_cast<int32_t>(0x42), 4) << " ";
ref += "0x0042 ";
os << utils::hex(static_cast<uint32_t>(0x42), 1) << " ";
ref += "0x42 ";
os << utils::hex(static_cast<int64_t>(0x42), 4) << " ";
ref += "0x0042 ";
os << utils::hex(static_cast<uint64_t>(0x42), 1) << " ";
ref += "0x42 ";
std::string s = os.str();
if (s != ref) {
cerr << "utils::hex() test failed, expected '" << ref
<< "', got '" << s << "'";
return TestFail;
}
/* utils::join() and utils::split() test. */
std::vector<std::string> elements = {
"/bin",
"/usr/bin",
"",
"",
};
std::string path;
for (const auto &element : elements)
path += (path.empty() ? "" : ":") + element;
if (path != utils::join(elements, ":")) {
cerr << "utils::join() test failed" << endl;
return TestFail;
}
std::vector<std::string> dirs;
for (const auto &dir : utils::split(path, ":"))
dirs.push_back(dir);
if (dirs != elements) {
cerr << "utils::split() test failed" << endl;
return TestFail;
}
const auto &split = utils::split(path, ":");
dirs = std::vector<std::string>{ split.begin(), split.end() };
if (dirs != elements) {
cerr << "utils::split() LegacyInputIterator test failed" << endl;
return TestFail;
}
/* utils::join() with conversion function test. */
std::vector<Size> sizes = { { 0, 0 }, { 100, 100 } };
s = utils::join(sizes, "/", [](const Size &size) {
return size.toString();
});
if (s != "0x0/100x100") {
cerr << "utils::join() with conversion test failed" << endl;
return TestFail;
}
/* utils::dirname() tests. */
if (TestPass != testDirname())
return TestFail;
/* utils::map_keys() test. */
const std::map<std::string, unsigned int> map{
{ "zero", 0 },
{ "one", 1 },
{ "two", 2 },
};
std::vector<std::string> expectedKeys{
"zero",
"one",
"two",
};
std::sort(expectedKeys.begin(), expectedKeys.end());
const std::vector<std::string> keys = utils::map_keys(map);
if (keys != expectedKeys) {
cerr << "utils::map_keys() test failed" << endl;
return TestFail;
}
/* utils::alignUp() and utils::alignDown() tests. */
if (utils::alignDown(6, 3) != 6 || utils::alignDown(7, 3) != 6) {
cerr << "utils::alignDown test failed" << endl;
return TestFail;
}
if (utils::alignUp(6, 3) != 6 || utils::alignUp(7, 3) != 9) {
cerr << "utils::alignUp test failed" << endl;
return TestFail;
}
/* utils::enumerate() test. */
if (testEnumerate() != TestPass)
return TestFail;
/* utils::Duration test. */
if (testDuration() != TestPass)
return TestFail;
return TestPass;
}
};
TEST_REGISTER(UtilsTest)
|
0 | repos/libcamera | repos/libcamera/test/event-thread.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Threaded event test
*/
#include <chrono>
#include <iostream>
#include <string.h>
#include <unistd.h>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/object.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include "test.h"
using namespace std;
using namespace libcamera;
class EventHandler : public Object
{
public:
EventHandler()
: notified_(false)
{
int ret = pipe(pipefd_);
if (ret < 0) {
ret = errno;
cout << "pipe() failed: " << strerror(ret) << endl;
}
notifier_ = new EventNotifier(pipefd_[0], EventNotifier::Read, this);
notifier_->activated.connect(this, &EventHandler::readReady);
}
~EventHandler()
{
delete notifier_;
close(pipefd_[0]);
close(pipefd_[1]);
}
int notify()
{
std::string data("H2G2");
ssize_t ret;
memset(data_, 0, sizeof(data_));
size_ = 0;
ret = write(pipefd_[1], data.data(), data.size());
if (ret < 0) {
cout << "Pipe write failed" << endl;
return TestFail;
}
return TestPass;
}
bool notified() const
{
return notified_;
}
private:
void readReady()
{
size_ = read(notifier_->fd(), data_, sizeof(data_));
notified_ = true;
}
EventNotifier *notifier_;
int pipefd_[2];
bool notified_;
char data_[16];
ssize_t size_;
};
class EventThreadTest : public Test
{
protected:
int init()
{
thread_.start();
handler_ = new EventHandler();
return TestPass;
}
int run()
{
/*
* Fire the event notifier and then move the notifier to a
* different thread. The notifier will not notice the event
* immediately as there is no event dispatcher loop running in
* the main thread. This tests that a notifier being moved to a
* different thread will correctly process already pending
* events in the new thread.
*/
handler_->notify();
handler_->moveToThread(&thread_);
this_thread::sleep_for(chrono::milliseconds(100));
if (!handler_->notified()) {
cout << "Thread event handling test failed" << endl;
return TestFail;
}
return TestPass;
}
void cleanup()
{
/*
* Object class instances must be destroyed from the thread
* they live in.
*/
handler_->deleteLater();
thread_.exit(0);
thread_.wait();
}
private:
EventHandler *handler_;
Thread thread_;
};
TEST_REGISTER(EventThreadTest)
|
0 | repos/libcamera | repos/libcamera/test/object.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Object tests
*/
#include <iostream>
#include <libcamera/base/message.h>
#include <libcamera/base/object.h>
#include <libcamera/base/thread.h>
#include "test.h"
using namespace std;
using namespace libcamera;
class InstrumentedObject : public Object
{
public:
enum Status {
NoMessage,
MessageReceived,
};
InstrumentedObject(Object *parent = nullptr)
: Object(parent), status_(NoMessage)
{
}
Status status() const { return status_; }
void reset() { status_ = NoMessage; }
protected:
void message(Message *msg) override
{
if (msg->type() == Message::ThreadMoveMessage)
status_ = MessageReceived;
Object::message(msg);
}
private:
Status status_;
};
class ObjectTest : public Test
{
protected:
int init()
{
/*
* Create a hierarchy of objects:
* A -> B -> C
* \->D
* E
*/
a_ = new InstrumentedObject();
b_ = new InstrumentedObject(a_);
c_ = new InstrumentedObject(b_);
d_ = new InstrumentedObject(a_);
e_ = new InstrumentedObject();
f_ = nullptr;
return TestPass;
}
int run()
{
/* Verify the parent-child relationships. */
if (a_->parent() != nullptr || b_->parent() != a_ ||
c_->parent() != b_ || d_->parent() != a_ ||
e_->parent() != nullptr) {
cout << "Incorrect parent-child relationships" << endl;
return TestFail;
}
/*
* Verify that moving an object with no parent to a different
* thread succeeds.
*/
e_->moveToThread(&thread_);
if (e_->thread() != &thread_ || e_->thread() == Thread::current()) {
cout << "Failed to move object to thread" << endl;
return TestFail;
}
/*
* Verify that moving an object with a parent to a different
* thread fails. This results in an undefined behaviour, the
* test thus depends on the internal implementation returning
* without performing any change.
*/
b_->moveToThread(&thread_);
if (b_->thread() != Thread::current()) {
cout << "Moving object with parent to thread shouldn't succeed" << endl;
return TestFail;
}
/*
* Verify that moving an object with children to a different
* thread moves all the children.
*/
a_->moveToThread(&thread_);
if (a_->thread() != &thread_ || b_->thread() != &thread_ ||
c_->thread() != &thread_ || d_->thread() != &thread_) {
cout << "Failed to move children to thread" << endl;
return TestFail;
}
/* Verify that objects are bound to the thread of their parent. */
f_ = new InstrumentedObject(d_);
if (f_->thread() != &thread_) {
cout << "Failed to bind child to parent thread" << endl;
return TestFail;
}
/* Verify that objects receive a ThreadMoveMessage when moved. */
if (a_->status() != InstrumentedObject::MessageReceived ||
b_->status() != InstrumentedObject::MessageReceived ||
c_->status() != InstrumentedObject::MessageReceived ||
d_->status() != InstrumentedObject::MessageReceived ||
e_->status() != InstrumentedObject::MessageReceived) {
cout << "Moving object didn't deliver ThreadMoveMessage" << endl;
return TestFail;
}
return TestPass;
}
void cleanup()
{
delete a_;
delete b_;
delete c_;
delete d_;
delete e_;
delete f_;
}
private:
InstrumentedObject *a_;
InstrumentedObject *b_;
InstrumentedObject *c_;
InstrumentedObject *d_;
InstrumentedObject *e_;
InstrumentedObject *f_;
Thread thread_;
};
TEST_REGISTER(ObjectTest)
|
0 | repos/libcamera | repos/libcamera/test/camera-sensor.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Camera sensor tests
*/
#include <algorithm>
#include <iostream>
#include <linux/media-bus-format.h>
#include <libcamera/base/utils.h>
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "test.h"
using namespace std;
using namespace libcamera;
class CameraSensorTest : public Test
{
protected:
int init()
{
enumerator_ = DeviceEnumerator::create();
if (!enumerator_) {
cerr << "Failed to create device enumerator" << endl;
return TestFail;
}
if (enumerator_->enumerate()) {
cerr << "Failed to enumerate media devices" << endl;
return TestFail;
}
DeviceMatch dm("vimc");
media_ = enumerator_->search(dm);
if (!media_) {
cerr << "Unable to find \'vimc\' media device node" << endl;
return TestSkip;
}
MediaEntity *entity = media_->getEntityByName("Sensor A");
if (!entity) {
cerr << "Unable to find media entity 'Sensor A'" << endl;
return TestFail;
}
sensor_ = new CameraSensor(entity);
if (sensor_->init() < 0) {
cerr << "Unable to initialise camera sensor" << endl;
return TestFail;
}
lens_ = sensor_->focusLens();
if (lens_)
cout << "Found lens controller" << endl;
return TestPass;
}
int run()
{
if (sensor_->model() != "Sensor A") {
cerr << "Incorrect sensor model '" << sensor_->model()
<< "'" << endl;
return TestFail;
}
const std::vector<unsigned int> &codes = sensor_->mbusCodes();
auto iter = std::find(codes.begin(), codes.end(),
MEDIA_BUS_FMT_ARGB8888_1X32);
if (iter == codes.end()) {
cerr << "Sensor doesn't support ARGB8888_1X32" << endl;
return TestFail;
}
const std::vector<Size> &sizes = sensor_->sizes(*iter);
auto iter2 = std::find(sizes.begin(), sizes.end(),
Size(4096, 2160));
if (iter2 == sizes.end()) {
cerr << "Sensor doesn't support 4096x2160" << endl;
return TestFail;
}
const Size &resolution = sensor_->resolution();
if (resolution != Size(4096, 2160)) {
cerr << "Incorrect sensor resolution " << resolution << endl;
return TestFail;
}
/* Use an invalid format and make sure it's not selected. */
V4L2SubdeviceFormat format = sensor_->getFormat({ 0xdeadbeef,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_BGR888_1X24 },
Size(1024, 768));
if (format.code != MEDIA_BUS_FMT_SBGGR10_1X10 ||
format.size != Size(4096, 2160)) {
cerr << "Failed to get a suitable format, expected 4096x2160-0x"
<< utils::hex(MEDIA_BUS_FMT_SBGGR10_1X10)
<< ", got " << format << endl;
return TestFail;
}
if (lens_ && lens_->setFocusPosition(10)) {
cerr << "Failed to set lens focus position" << endl;
return TestFail;
}
return TestPass;
}
void cleanup()
{
delete sensor_;
}
private:
std::unique_ptr<DeviceEnumerator> enumerator_;
std::shared_ptr<MediaDevice> media_;
CameraSensor *sensor_;
CameraLens *lens_;
};
TEST_REGISTER(CameraSensorTest)
|
0 | repos/libcamera | repos/libcamera/test/transform.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2023, Ideas On Board Oy
*
* Transform and Orientation tests
*/
#include <iostream>
#include <libcamera/orientation.h>
#include <libcamera/transform.h>
#include "test.h"
using namespace std;
using namespace libcamera;
class TransformTest : public Test
{
protected:
int run();
};
int TransformTest::run()
{
/*
* RotationTestEntry collects two Orientation and one Transform that
* gets combined to validate that (o1 / o2 = T) and (o1 = o2 * T)
*
* o1 / o2 = t computes the Transform to apply to o2 to obtain o1
* o2 * t = o1 combines o2 with t by applying o2 first then t
*
* The comments on the (most complex) transform show how applying to
* an image with orientation o2 the Transform t allows to obtain o1.
*
* The image with basic rotation0 is assumed to be:
*
* AB
* CD
*
* And the Transform operators are:
*
* V = vertical flip
* H = horizontal flip
* T = transpose
*
* the operator '* (T|V)' applies V first then T.
*/
static const struct RotationTestEntry {
Orientation o1;
Orientation o2;
Transform t;
} testEntries[] = {
/* Test identities transforms first. */
{
Orientation::Rotate0, Orientation::Rotate0,
Transform::Identity,
},
{
Orientation::Rotate0Mirror, Orientation::Rotate0Mirror,
Transform::Identity,
},
{
Orientation::Rotate180, Orientation::Rotate180,
Transform::Identity,
},
{
Orientation::Rotate180Mirror, Orientation::Rotate180Mirror,
Transform::Identity,
},
{
Orientation::Rotate90, Orientation::Rotate90,
Transform::Identity,
},
{
Orientation::Rotate90Mirror, Orientation::Rotate90Mirror,
Transform::Identity,
},
{
Orientation::Rotate270, Orientation::Rotate270,
Transform::Identity,
},
{
Orientation::Rotate270Mirror, Orientation::Rotate270Mirror,
Transform::Identity,
},
/*
* Combine 0 and 180 degrees rotation as they're the most common
* ones.
*/
{
/*
* o2 t o1
* --------------------------
* CD * (H|V) = BA AB
* BA CD CD
*/
Orientation::Rotate0, Orientation::Rotate180,
Transform::Rot180,
},
{
/*
* o2 t o1
* --------------------------
* AB * (H|V) = CD DC
* CD AB BA
*/
Orientation::Rotate180, Orientation::Rotate0,
Transform::Rot180
},
/* Test that transpositions are handled correctly. */
{
/*
* o2 t o1
* --------------------------
* AB * (T|V) = CD CA
* CD AB DB
*/
Orientation::Rotate90, Orientation::Rotate0,
Transform::Rot90,
},
{
/*
* o2 t o1
* --------------------------
* CA * (T|H) = AC AB
* DB BD CD
*/
Orientation::Rotate0, Orientation::Rotate90,
Transform::Rot270,
},
{
/*
* o2 t o1
* --------------------------
* AB * (T|H) = BA BD
* CD DC AC
*/
Orientation::Rotate270, Orientation::Rotate0,
Transform::Rot270,
},
{
/*
* o2 t o1
* --------------------------
* BD * (T|V) = AC AB
* AC BD CD
*/
Orientation::Rotate0, Orientation::Rotate270,
Transform::Rot90,
},
{
/*
* o2 t o1
* --------------------------
* CD * (T|H) = DC DA
* BA AB CB
*/
Orientation::Rotate90, Orientation::Rotate180,
Transform::Rot270,
},
{
/*
* o2 t o1
* --------------------------
* DA * (T|V) = CB CD
* CB DA BA
*/
Orientation::Rotate180, Orientation::Rotate90,
Transform::Rot90,
},
{
/*
* o2 t o1
* --------------------------
* CD * (T|V) = BA BC
* BA CD AD
*/
Orientation::Rotate270, Orientation::Rotate180,
Transform::Rot90,
},
{
/*
* o2 t o1
* --------------------------
* BC * (T|H) = CB CD
* AD DA BA
*/
Orientation::Rotate180, Orientation::Rotate270,
Transform::Rot270,
},
{
/*
* o2 t o1
* --------------------------
* DA * (V|H) = AD BC
* CB BC AD
*/
Orientation::Rotate270, Orientation::Rotate90,
Transform::Rot180,
},
/* Test that mirroring is handled correctly. */
{
Orientation::Rotate0, Orientation::Rotate0Mirror,
Transform::HFlip
},
{
Orientation::Rotate0Mirror, Orientation::Rotate0,
Transform::HFlip
},
{
Orientation::Rotate180, Orientation::Rotate180Mirror,
Transform::HFlip
},
{
Orientation::Rotate180Mirror, Orientation::Rotate180,
Transform::HFlip
},
{
Orientation::Rotate90, Orientation::Rotate90Mirror,
Transform::HFlip
},
{
Orientation::Rotate90Mirror, Orientation::Rotate90,
Transform::HFlip
},
{
Orientation::Rotate270, Orientation::Rotate270Mirror,
Transform::HFlip
},
{
Orientation::Rotate270Mirror, Orientation::Rotate270,
Transform::HFlip
},
{
Orientation::Rotate0, Orientation::Rotate0Mirror,
Transform::HFlip
},
/*
* More exotic transforms which include Transpositions and
* mirroring.
*/
{
/*
* o2 t o1
* ------------------
* BC * (V) = AD
* AD BC
*/
Orientation::Rotate90Mirror, Orientation::Rotate270,
Transform::VFlip,
},
{
/*
* o2 t o1
* ------------------
* CB * (T) = CD
* DA BA
*/
Orientation::Rotate180, Orientation::Rotate270Mirror,
Transform::Transpose,
},
{
/*
* o2 t o1
* ------------------
* AD * (T) = AB
* BC DC
*/
Orientation::Rotate0, Orientation::Rotate90Mirror,
Transform::Transpose,
},
{
/*
* o2 t o1
* ------------------
* AD * (V) = BC
* BC AD
*/
Orientation::Rotate270, Orientation::Rotate90Mirror,
Transform::VFlip,
},
{
/*
* o2 t o1
* ------------------
* DA * (V) = CB
* CB DA
*/
Orientation::Rotate270Mirror, Orientation::Rotate90,
Transform::VFlip,
},
{
/*
* o2 t o1
* --------------------------
* CB * (V|H) = BC AD
* DA AD BC
*/
Orientation::Rotate90Mirror, Orientation::Rotate270Mirror,
Transform::Rot180,
},
};
for (const auto &entry : testEntries) {
Transform transform = entry.o1 / entry.o2;
if (transform != entry.t) {
cerr << "Failed to validate: " << entry.o1
<< " / " << entry.o2
<< " = " << transformToString(entry.t) << endl;
cerr << "Got back: "
<< transformToString(transform) << endl;
return TestFail;
}
Orientation adjusted = entry.o2 * entry.t;
if (adjusted != entry.o1) {
cerr << "Failed to validate: " << entry.o2
<< " * " << transformToString(entry.t)
<< " = " << entry.o1 << endl;
cerr << "Got back: " << adjusted << endl;
return TestFail;
}
}
return TestPass;
}
TEST_REGISTER(TransformTest)
|
0 | repos/libcamera | repos/libcamera/test/signal.cpp | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Signal test
*/
#include <iostream>
#include <string.h>
#include <libcamera/base/object.h>
#include <libcamera/base/signal.h>
#include "test.h"
using namespace std;
using namespace libcamera;
static int valueStatic_ = 0;
static void slotStatic(int value)
{
valueStatic_ = value;
}
static int slotStaticReturn()
{
return 0;
}
class SlotObject : public Object
{
public:
void slot()
{
valueStatic_ = 1;
}
};
class BaseClass
{
public:
/*
* A virtual function is required in the base class, otherwise the
* compiler will always store Object before BaseClass in memory.
*/
virtual ~BaseClass()
{
}
unsigned int data_[32];
};
class SlotMulti : public BaseClass, public Object
{
public:
void slot()
{
valueStatic_ = 1;
}
};
class SignalTest : public Test
{
protected:
void slotVoid()
{
called_ = true;
}
void slotDisconnect()
{
called_ = true;
signalVoid_.disconnect(this, &SignalTest::slotDisconnect);
}
void slotInteger1(int value)
{
values_[0] = value;
}
void slotInteger2(int value)
{
values_[1] = value;
}
void slotMultiArgs(int value, const std::string &name)
{
values_[2] = value;
name_ = name;
}
int slotReturn()
{
return 0;
}
int init()
{
return 0;
}
int run()
{
/* ----------------- Signal -> !Object tests ---------------- */
/* Test signal emission and reception. */
called_ = false;
signalVoid_.connect(this, &SignalTest::slotVoid);
signalVoid_.emit();
if (!called_) {
cout << "Signal emission test failed" << endl;
return TestFail;
}
/* Test signal with parameters. */
values_[2] = 0;
name_.clear();
signalMultiArgs_.connect(this, &SignalTest::slotMultiArgs);
signalMultiArgs_.emit(42, "H2G2");
if (values_[2] != 42 || name_ != "H2G2") {
cout << "Signal parameters test failed" << endl;
return TestFail;
}
/* Test signal connected to multiple slots. */
memset(values_, 0, sizeof(values_));
valueStatic_ = 0;
signalInt_.connect(this, &SignalTest::slotInteger1);
signalInt_.connect(this, &SignalTest::slotInteger2);
signalInt_.connect(&slotStatic);
signalInt_.emit(42);
if (values_[0] != 42 || values_[1] != 42 || values_[2] != 0 ||
valueStatic_ != 42) {
cout << "Signal multi slot test failed" << endl;
return TestFail;
}
/* Test disconnection of a single slot. */
memset(values_, 0, sizeof(values_));
signalInt_.disconnect(this, &SignalTest::slotInteger2);
signalInt_.emit(42);
if (values_[0] != 42 || values_[1] != 0 || values_[2] != 0) {
cout << "Signal slot disconnection test failed" << endl;
return TestFail;
}
/* Test disconnection of a whole object. */
memset(values_, 0, sizeof(values_));
signalInt_.disconnect(this);
signalInt_.emit(42);
if (values_[0] != 0 || values_[1] != 0 || values_[2] != 0) {
cout << "Signal object disconnection test failed" << endl;
return TestFail;
}
/* Test disconnection of a whole signal. */
memset(values_, 0, sizeof(values_));
signalInt_.connect(this, &SignalTest::slotInteger1);
signalInt_.connect(this, &SignalTest::slotInteger2);
signalInt_.disconnect();
signalInt_.emit(42);
if (values_[0] != 0 || values_[1] != 0 || values_[2] != 0) {
cout << "Signal object disconnection test failed" << endl;
return TestFail;
}
/* Test disconnection from slot. */
signalVoid_.disconnect();
signalVoid_.connect(this, &SignalTest::slotDisconnect);
signalVoid_.emit();
called_ = false;
signalVoid_.emit();
if (called_) {
cout << "Signal disconnection from slot test failed" << endl;
return TestFail;
}
/*
* Test connecting to slots that return a value. This targets
* compilation, there's no need to check runtime results.
*/
signalVoid_.connect(slotStaticReturn);
signalVoid_.connect(this, &SignalTest::slotReturn);
/* Test signal connection to a lambda. */
int value = 0;
signalInt_.connect(this, [&](int v) { value = v; });
signalInt_.emit(42);
if (value != 42) {
cout << "Signal connection to lambda failed" << endl;
return TestFail;
}
signalInt_.disconnect(this);
signalInt_.emit(0);
if (value != 42) {
cout << "Signal disconnection from lambda failed" << endl;
return TestFail;
}
/* ----------------- Signal -> Object tests ----------------- */
/*
* Test automatic disconnection on object deletion. Connect two
* signals to ensure all instances are disconnected.
*/
signalVoid_.disconnect();
signalVoid2_.disconnect();
SlotObject *slotObject = new SlotObject();
signalVoid_.connect(slotObject, &SlotObject::slot);
signalVoid2_.connect(slotObject, &SlotObject::slot);
delete slotObject;
valueStatic_ = 0;
signalVoid_.emit();
signalVoid2_.emit();
if (valueStatic_ != 0) {
cout << "Signal disconnection on object deletion test failed" << endl;
return TestFail;
}
/*
* Test that signal deletion disconnects objects. This shall
* not generate any valgrind warning.
*/
Signal<> *dynamicSignal = new Signal<>();
slotObject = new SlotObject();
dynamicSignal->connect(slotObject, &SlotObject::slot);
delete dynamicSignal;
delete slotObject;
/*
* Test that signal manual disconnection from Object removes
* the signal for the object. This shall not generate any
* valgrind warning.
*/
dynamicSignal = new Signal<>();
slotObject = new SlotObject();
dynamicSignal->connect(slotObject, &SlotObject::slot);
dynamicSignal->disconnect(slotObject);
delete dynamicSignal;
delete slotObject;
/*
* Test that signal manual disconnection from all slots removes
* the signal for the object. This shall not generate any
* valgrind warning.
*/
dynamicSignal = new Signal<>();
slotObject = new SlotObject();
dynamicSignal->connect(slotObject, &SlotObject::slot);
dynamicSignal->disconnect();
delete dynamicSignal;
delete slotObject;
/* Exercise the Object slot code paths. */
slotObject = new SlotObject();
signalVoid_.connect(slotObject, &SlotObject::slot);
valueStatic_ = 0;
signalVoid_.emit();
if (valueStatic_ == 0) {
cout << "Signal delivery for Object test failed" << endl;
return TestFail;
}
delete slotObject;
/* Test signal connection to a lambda. */
slotObject = new SlotObject();
value = 0;
signalInt_.connect(slotObject, [&](int v) { value = v; });
signalInt_.emit(42);
if (value != 42) {
cout << "Signal connection to Object lambda failed" << endl;
return TestFail;
}
signalInt_.disconnect(slotObject);
signalInt_.emit(0);
if (value != 42) {
cout << "Signal disconnection from Object lambda failed" << endl;
return TestFail;
}
delete slotObject;
/* --------- Signal -> Object (multiple inheritance) -------- */
/*
* Test automatic disconnection on object deletion. Connect two
* signals to ensure all instances are disconnected.
*/
signalVoid_.disconnect();
signalVoid2_.disconnect();
SlotMulti *slotMulti = new SlotMulti();
signalVoid_.connect(slotMulti, &SlotMulti::slot);
signalVoid2_.connect(slotMulti, &SlotMulti::slot);
delete slotMulti;
valueStatic_ = 0;
signalVoid_.emit();
signalVoid2_.emit();
if (valueStatic_ != 0) {
cout << "Signal disconnection on object deletion test failed" << endl;
return TestFail;
}
/*
* Test that signal deletion disconnects objects. This shall
* not generate any valgrind warning.
*/
dynamicSignal = new Signal<>();
slotMulti = new SlotMulti();
dynamicSignal->connect(slotMulti, &SlotMulti::slot);
delete dynamicSignal;
delete slotMulti;
/* Exercise the Object slot code paths. */
slotMulti = new SlotMulti();
signalVoid_.connect(slotMulti, &SlotMulti::slot);
valueStatic_ = 0;
signalVoid_.emit();
if (valueStatic_ == 0) {
cout << "Signal delivery for Object test failed" << endl;
return TestFail;
}
delete slotMulti;
return TestPass;
}
void cleanup()
{
}
private:
Signal<> signalVoid_;
Signal<> signalVoid2_;
Signal<int> signalInt_;
Signal<int, const std::string &> signalMultiArgs_;
bool called_;
int values_[3];
std::string name_;
};
TEST_REGISTER(SignalTest)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.