Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/ipc_pipe.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Image Processing Algorithm IPC module for IPA proxies
*/
#include "libcamera/internal/ipc_pipe.h"
#include <libcamera/base/log.h>
/**
* \file ipc_pipe.h
* \brief IPC mechanism for IPA isolation
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(IPCPipe)
/**
* \struct IPCMessage::Header
* \brief Container for an IPCMessage header
*
* Holds a cmd code for the IPC message, and a cookie.
*/
/**
* \var IPCMessage::Header::cmd
* \brief Type of IPCMessage
*
* Typically used to carry a command code for an RPC.
*/
/**
* \var IPCMessage::Header::cookie
* \brief Cookie to identify the message and a corresponding reply.
*
* Populated and used by IPCPipe implementations for matching calls with
* replies.
*/
/**
* \class IPCMessage
* \brief IPC message to be passed through IPC message pipe
*/
/**
* \brief Construct an empty IPCMessage instance
*/
IPCMessage::IPCMessage()
: header_(Header{ 0, 0 })
{
}
/**
* \brief Construct an IPCMessage instance with a given command code
* \param[in] cmd The command code
*/
IPCMessage::IPCMessage(uint32_t cmd)
: header_(Header{ cmd, 0 })
{
}
/**
* \brief Construct an IPCMessage instance with a given header
* \param[in] header The header that the constructed IPCMessage will contain
*/
IPCMessage::IPCMessage(const Header &header)
: header_(header)
{
}
/**
* \brief Construct an IPCMessage instance from an IPC payload
* \param[in] payload The IPCUnixSocket payload to construct from
*
* This essentially converts an IPCUnixSocket payload into an IPCMessage.
* The header is extracted from the payload into the IPCMessage's header field.
*
* If the IPCUnixSocket payload had any valid file descriptors, then they will
* all be invalidated.
*/
IPCMessage::IPCMessage(IPCUnixSocket::Payload &payload)
{
memcpy(&header_, payload.data.data(), sizeof(header_));
data_ = std::vector<uint8_t>(payload.data.begin() + sizeof(header_),
payload.data.end());
for (int32_t &fd : payload.fds)
fds_.push_back(SharedFD(std::move(fd)));
}
/**
* \brief Create an IPCUnixSocket payload from the IPCMessage
*
* This essentially converts the IPCMessage into an IPCUnixSocket payload.
*
* \todo Resolve the layering violation (add other converters later?)
*/
IPCUnixSocket::Payload IPCMessage::payload() const
{
IPCUnixSocket::Payload payload;
payload.data.resize(sizeof(Header) + data_.size());
payload.fds.reserve(fds_.size());
memcpy(payload.data.data(), &header_, sizeof(Header));
if (data_.size() > 0) {
/* \todo Make this work without copy */
memcpy(payload.data.data() + sizeof(Header),
data_.data(), data_.size());
}
for (const SharedFD &fd : fds_)
payload.fds.push_back(fd.get());
return payload;
}
/**
* \fn IPCMessage::header()
* \brief Returns a reference to the header
*/
/**
* \fn IPCMessage::data()
* \brief Returns a reference to the byte vector containing data
*/
/**
* \fn IPCMessage::fds()
* \brief Returns a reference to the vector containing file descriptors
*/
/**
* \fn IPCMessage::header() const
* \brief Returns a const reference to the header
*/
/**
* \fn IPCMessage::data() const
* \brief Returns a const reference to the byte vector containing data
*/
/**
* \fn IPCMessage::fds() const
* \brief Returns a const reference to the vector containing file descriptors
*/
/**
* \class IPCPipe
* \brief IPC message pipe for IPA isolation
*
* Virtual class to model an IPC message pipe for use by IPA proxies for IPA
* isolation. sendSync() and sendAsync() must be implemented, and the recvMessage
* signal must be emitted whenever new data is available.
*/
/**
* \brief Construct an IPCPipe instance
*/
IPCPipe::IPCPipe()
: connected_(false)
{
}
IPCPipe::~IPCPipe()
{
}
/**
* \fn IPCPipe::isConnected()
* \brief Check if the IPCPipe instance is connected
*
* An IPCPipe instance is connected if IPC is successfully set up.
*
* \return True if the IPCPipe is connected, false otherwise
*/
/**
* \fn IPCPipe::sendSync()
* \brief Send a message over IPC synchronously
* \param[in] in Data to send
* \param[in] out IPCMessage instance in which to receive data, if applicable
*
* This function will not return until a response is received. The event loop
* will still continue to execute, however.
*
* \return Zero on success, negative error code otherwise
*
* \todo Determine if the event loop should limit the types of messages it
* processes, to avoid reintrancy in the caller, and carefully document what
* the caller needs to implement to make this safe.
*/
/**
* \fn IPCPipe::sendAsync()
* \brief Send a message over IPC asynchronously
* \param[in] data Data to send
*
* This function will return immediately after sending the message.
*
* \return Zero on success, negative error code otherwise
*/
/**
* \var IPCPipe::recv
* \brief Signal to be emitted when a message is received over IPC
*
* When a message is received over IPC, this signal shall be emitted. Users must
* connect to this to receive messages.
*/
/**
* \var IPCPipe::connected_
* \brief Flag to indicate if the IPCPipe instance is connected
*
* An IPCPipe instance is connected if IPC is successfully set up.
*
* This flag can be read via IPCPipe::isConnected().
*
* Implementations of the IPCPipe class should set this flag upon successful
* connection.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/orientation.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Ideas On Board Oy
*
* Image orientation
*/
#include <libcamera/orientation.h>
#include <array>
#include <string>
/**
* \file libcamera/orientation.h
* \brief Image orientation definition
*/
namespace libcamera {
/**
* \enum Orientation
* \brief The image orientation in a memory buffer
*
* The Orientation enumeration describes the orientation of the images
* produced by the camera pipeline as they get received by the application
* inside memory buffers.
*
* The image orientation expressed using the Orientation enumeration can be then
* inferred by applying to a naturally oriented image a multiple of a 90 degrees
* rotation in the clockwise direction from the origin and then by applying an
* optional horizontal mirroring.
*
* The enumeration numerical values follow the ones defined by the EXIF
* Specification version 2.32, Tag 274 "Orientation", while the names of the
* enumerated values report the rotation and mirroring operations performed.
*
* For example, Orientation::Rotate90Mirror describes the orientation obtained
* by rotating the image 90 degrees clockwise first and then applying a
* horizontal mirroring.
*
* \var CameraConfiguration::Rotate0
* \image html rotation/rotate0.svg
* \var CameraConfiguration::Rotate0Mirror
* \image html rotation/rotate0Mirror.svg
* \var CameraConfiguration::Rotate180
* \image html rotation/rotate180.svg
* \var CameraConfiguration::Rotate180Mirror
* \image html rotation/rotate180Mirror.svg
* \var CameraConfiguration::Rotate90Mirror
* \image html rotation/rotate90Mirror.svg
* \var CameraConfiguration::Rotate270
* \image html rotation/rotate270.svg
* \var CameraConfiguration::Rotate270Mirror
* \image html rotation/rotate270Mirror.svg
* \var CameraConfiguration::Rotate90
* \image html rotation/rotate90.svg
*/
/**
* \brief Return the orientation representing a rotation of the given angle
* clockwise
* \param[in] angle The angle of rotation in a clockwise sense. Negative values
* can be used to represent anticlockwise rotations
* \param[out] success Set to `true` if the angle is a multiple of 90 degrees,
* otherwise `false`
* \return The orientation corresponding to the rotation if \a success was set
* to `true`, otherwise the `Rotate0` orientation
*/
Orientation orientationFromRotation(int angle, bool *success)
{
angle = angle % 360;
if (angle < 0)
angle += 360;
if (success != nullptr)
*success = true;
switch (angle) {
case 0:
return Orientation::Rotate0;
case 90:
return Orientation::Rotate90;
case 180:
return Orientation::Rotate180;
case 270:
return Orientation::Rotate270;
}
if (success != nullptr)
*success = false;
return Orientation::Rotate0;
}
/**
* \brief Prints human-friendly names for Orientation items
* \param[in] out The output stream
* \param[in] orientation The Orientation item
* \return The output stream \a out
*/
std::ostream &operator<<(std::ostream &out, const Orientation &orientation)
{
constexpr std::array<const char *, 9> orientationNames = {
"", /* Orientation starts counting from 1. */
"Rotate0", "Rotate0Mirror",
"Rotate180", "Rotate180Mirror",
"Rotate90Mirror", "Rotate270",
"Rotate270Mirror", "Rotate90",
};
out << orientationNames[static_cast<unsigned int>(orientation)];
return out;
}
} /* namespace libcamera */
|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/control_ids_core.yaml | # SPDX-License-Identifier: LGPL-2.1-or-later
#
# Copyright (C) 2019, Google Inc.
#
%YAML 1.1
---
# Unless otherwise stated, all controls are bi-directional, i.e. they can be
# set through Request::controls() and returned out through Request::metadata().
vendor: libcamera
controls:
- AeEnable:
type: bool
description: |
Enable or disable the AE.
\sa ExposureTime AnalogueGain
- AeLocked:
type: bool
description: |
Report the lock status of a running AE algorithm.
If the AE algorithm is locked the value shall be set to true, if it's
converging it shall be set to false. If the AE algorithm is not
running the control shall not be present in the metadata control list.
\sa AeEnable
# AeMeteringMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AeMeteringMode:
type: int32_t
description: |
Specify a metering mode for the AE algorithm to use. The metering
modes determine which parts of the image are used to determine the
scene brightness. Metering modes may be platform specific and not
all metering modes may be supported.
enum:
- name: MeteringCentreWeighted
value: 0
description: Centre-weighted metering mode.
- name: MeteringSpot
value: 1
description: Spot metering mode.
- name: MeteringMatrix
value: 2
description: Matrix metering mode.
- name: MeteringCustom
value: 3
description: Custom metering mode.
# AeConstraintMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AeConstraintMode:
type: int32_t
description: |
Specify a constraint mode for the AE algorithm to use. These determine
how the measured scene brightness is adjusted to reach the desired
target exposure. Constraint modes may be platform specific, and not
all constraint modes may be supported.
enum:
- name: ConstraintNormal
value: 0
description: Default constraint mode.
This mode aims to balance the exposure of different parts of the
image so as to reach a reasonable average level. However, highlights
in the image may appear over-exposed and lowlights may appear
under-exposed.
- name: ConstraintHighlight
value: 1
description: Highlight constraint mode.
This mode adjusts the exposure levels in order to try and avoid
over-exposing the brightest parts (highlights) of an image.
Other non-highlight parts of the image may appear under-exposed.
- name: ConstraintShadows
value: 2
description: Shadows constraint mode.
This mode adjusts the exposure levels in order to try and avoid
under-exposing the dark parts (shadows) of an image. Other normally
exposed parts of the image may appear over-exposed.
- name: ConstraintCustom
value: 3
description: Custom constraint mode.
# AeExposureMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AeExposureMode:
type: int32_t
description: |
Specify an exposure mode for the AE algorithm to use. These specify
how the desired total exposure is divided between the shutter time
and the sensor's analogue gain. The exposure modes are platform
specific, and not all exposure modes may be supported.
enum:
- name: ExposureNormal
value: 0
description: Default exposure mode.
- name: ExposureShort
value: 1
description: Exposure mode allowing only short exposure times.
- name: ExposureLong
value: 2
description: Exposure mode allowing long exposure times.
- name: ExposureCustom
value: 3
description: Custom exposure mode.
- ExposureValue:
type: float
description: |
Specify an Exposure Value (EV) parameter. The EV parameter will only be
applied if the AE algorithm is currently enabled.
By convention EV adjusts the exposure as log2. For example
EV = [-2, -1, 0.5, 0, 0.5, 1, 2] results in an exposure adjustment
of [1/4x, 1/2x, 1/sqrt(2)x, 1x, sqrt(2)x, 2x, 4x].
\sa AeEnable
- ExposureTime:
type: int32_t
description: |
Exposure time (shutter speed) for the frame applied in the sensor
device. This value is specified in micro-seconds.
Setting this value means that it is now fixed and the AE algorithm may
not change it. Setting it back to zero returns it to the control of the
AE algorithm.
\sa AnalogueGain AeEnable
\todo Document the interactions between AeEnable and setting a fixed
value for this control. Consider interactions with other AE features,
such as aperture and aperture/shutter priority mode, and decide if
control of which features should be automatically adjusted shouldn't
better be handled through a separate AE mode control.
- AnalogueGain:
type: float
description: |
Analogue gain value applied in the sensor device.
The value of the control specifies the gain multiplier applied to all
colour channels. This value cannot be lower than 1.0.
Setting this value means that it is now fixed and the AE algorithm may
not change it. Setting it back to zero returns it to the control of the
AE algorithm.
\sa ExposureTime AeEnable
\todo Document the interactions between AeEnable and setting a fixed
value for this control. Consider interactions with other AE features,
such as aperture and aperture/shutter priority mode, and decide if
control of which features should be automatically adjusted shouldn't
better be handled through a separate AE mode control.
- AeFlickerMode:
type: int32_t
description: |
Set the flicker mode, which determines whether, and how, the AGC/AEC
algorithm attempts to hide flicker effects caused by the duty cycle of
artificial lighting.
Although implementation dependent, many algorithms for "flicker
avoidance" work by restricting this exposure time to integer multiples
of the cycle period, wherever possible.
Implementations may not support all of the flicker modes listed below.
By default the system will start in FlickerAuto mode if this is
supported, otherwise the flicker mode will be set to FlickerOff.
enum:
- name: FlickerOff
value: 0
description: No flicker avoidance is performed.
- name: FlickerManual
value: 1
description: Manual flicker avoidance.
Suppress flicker effects caused by lighting running with a period
specified by the AeFlickerPeriod control.
\sa AeFlickerPeriod
- name: FlickerAuto
value: 2
description: Automatic flicker period detection and avoidance.
The system will automatically determine the most likely value of
flicker period, and avoid flicker of this frequency. Once flicker
is being corrected, it is implementation dependent whether the
system is still able to detect a change in the flicker period.
\sa AeFlickerDetected
- AeFlickerPeriod:
type: int32_t
description: Manual flicker period in microseconds.
This value sets the current flicker period to avoid. It is used when
AeFlickerMode is set to FlickerManual.
To cancel 50Hz mains flicker, this should be set to 10000 (corresponding
to 100Hz), or 8333 (120Hz) for 60Hz mains.
Setting the mode to FlickerManual when no AeFlickerPeriod has ever been
set means that no flicker cancellation occurs (until the value of this
control is updated).
Switching to modes other than FlickerManual has no effect on the
value of the AeFlickerPeriod control.
\sa AeFlickerMode
- AeFlickerDetected:
type: int32_t
description: Flicker period detected in microseconds.
The value reported here indicates the currently detected flicker
period, or zero if no flicker at all is detected.
When AeFlickerMode is set to FlickerAuto, there may be a period during
which the value reported here remains zero. Once a non-zero value is
reported, then this is the flicker period that has been detected and is
now being cancelled.
In the case of 50Hz mains flicker, the value would be 10000
(corresponding to 100Hz), or 8333 (120Hz) for 60Hz mains flicker.
It is implementation dependent whether the system can continue to detect
flicker of different periods when another frequency is already being
cancelled.
\sa AeFlickerMode
- Brightness:
type: float
description: |
Specify a fixed brightness parameter. Positive values (up to 1.0)
produce brighter images; negative values (up to -1.0) produce darker
images and 0.0 leaves pixels unchanged.
- Contrast:
type: float
description: |
Specify a fixed contrast parameter. Normal contrast is given by the
value 1.0; larger values produce images with more contrast.
- Lux:
type: float
description: |
Report an estimate of the current illuminance level in lux. The Lux
control can only be returned in metadata.
- AwbEnable:
type: bool
description: |
Enable or disable the AWB.
\sa ColourGains
# AwbMode needs further attention:
# - Auto-generate max enum value.
# - Better handling of custom types.
- AwbMode:
type: int32_t
description: |
Specify the range of illuminants to use for the AWB algorithm. The modes
supported are platform specific, and not all modes may be supported.
enum:
- name: AwbAuto
value: 0
description: Search over the whole colour temperature range.
- name: AwbIncandescent
value: 1
description: Incandescent AWB lamp mode.
- name: AwbTungsten
value: 2
description: Tungsten AWB lamp mode.
- name: AwbFluorescent
value: 3
description: Fluorescent AWB lamp mode.
- name: AwbIndoor
value: 4
description: Indoor AWB lighting mode.
- name: AwbDaylight
value: 5
description: Daylight AWB lighting mode.
- name: AwbCloudy
value: 6
description: Cloudy AWB lighting mode.
- name: AwbCustom
value: 7
description: Custom AWB mode.
- AwbLocked:
type: bool
description: |
Report the lock status of a running AWB algorithm.
If the AWB algorithm is locked the value shall be set to true, if it's
converging it shall be set to false. If the AWB algorithm is not
running the control shall not be present in the metadata control list.
\sa AwbEnable
- ColourGains:
type: float
description: |
Pair of gain values for the Red and Blue colour channels, in that
order. ColourGains can only be applied in a Request when the AWB is
disabled.
\sa AwbEnable
size: [2]
- ColourTemperature:
type: int32_t
description: Report the current estimate of the colour temperature, in
kelvin, for this frame. The ColourTemperature control can only be
returned in metadata.
- Saturation:
type: float
description: |
Specify a fixed saturation parameter. Normal saturation is given by
the value 1.0; larger values produce more saturated colours; 0.0
produces a greyscale image.
- SensorBlackLevels:
type: int32_t
description: |
Reports the sensor black levels used for processing a frame, in the
order R, Gr, Gb, B. These values are returned as numbers out of a 16-bit
pixel range (as if pixels ranged from 0 to 65535). The SensorBlackLevels
control can only be returned in metadata.
size: [4]
- Sharpness:
type: float
description: |
A value of 0.0 means no sharpening. The minimum value means
minimal sharpening, and shall be 0.0 unless the camera can't
disable sharpening completely. The default value shall give a
"reasonable" level of sharpening, suitable for most use cases.
The maximum value may apply extremely high levels of sharpening,
higher than anyone could reasonably want. Negative values are
not allowed. Note also that sharpening is not applied to raw
streams.
- FocusFoM:
type: int32_t
description: |
Reports a Figure of Merit (FoM) to indicate how in-focus the frame is.
A larger FocusFoM value indicates a more in-focus frame. This singular
value may be based on a combination of statistics gathered from
multiple focus regions within an image. The number of focus regions and
method of combination is platform dependent. In this respect, it is not
necessarily aimed at providing a way to implement a focus algorithm by
the application, rather an indication of how in-focus a frame is.
- ColourCorrectionMatrix:
type: float
description: |
The 3x3 matrix that converts camera RGB to sRGB within the
imaging pipeline. This should describe the matrix that is used
after pixels have been white-balanced, but before any gamma
transformation. The 3x3 matrix is stored in conventional reading
order in an array of 9 floating point values.
size: [3,3]
- ScalerCrop:
type: Rectangle
description: |
Sets the image portion that will be scaled to form the whole of
the final output image. The (x,y) location of this rectangle is
relative to the PixelArrayActiveAreas that is being used. The units
remain native sensor pixels, even if the sensor is being used in
a binning or skipping mode.
This control is only present when the pipeline supports scaling. Its
maximum valid value is given by the properties::ScalerCropMaximum
property, and the two can be used to implement digital zoom.
- DigitalGain:
type: float
description: |
Digital gain value applied during the processing steps applied
to the image as captured from the sensor.
The global digital gain factor is applied to all the colour channels
of the RAW image. Different pipeline models are free to
specify how the global gain factor applies to each separate
channel.
If an imaging pipeline applies digital gain in distinct
processing steps, this value indicates their total sum.
Pipelines are free to decide how to adjust each processing
step to respect the received gain factor and shall report
their total value in the request metadata.
- FrameDuration:
type: int64_t
description: |
The instantaneous frame duration from start of frame exposure to start
of next exposure, expressed in microseconds. This control is meant to
be returned in metadata.
- FrameDurationLimits:
type: int64_t
description: |
The minimum and maximum (in that order) frame duration, expressed in
microseconds.
When provided by applications, the control specifies the sensor frame
duration interval the pipeline has to use. This limits the largest
exposure time the sensor can use. For example, if a maximum frame
duration of 33ms is requested (corresponding to 30 frames per second),
the sensor will not be able to raise the exposure time above 33ms.
A fixed frame duration is achieved by setting the minimum and maximum
values to be the same. Setting both values to 0 reverts to using the
camera defaults.
The maximum frame duration provides the absolute limit to the shutter
speed computed by the AE algorithm and it overrides any exposure mode
setting specified with controls::AeExposureMode. Similarly, when a
manual exposure time is set through controls::ExposureTime, it also
gets clipped to the limits set by this control. When reported in
metadata, the control expresses the minimum and maximum frame
durations used after being clipped to the sensor provided frame
duration limits.
\sa AeExposureMode
\sa ExposureTime
\todo Define how to calculate the capture frame rate by
defining controls to report additional delays introduced by
the capture pipeline or post-processing stages (ie JPEG
conversion, frame scaling).
\todo Provide an explicit definition of default control values, for
this and all other controls.
size: [2]
- SensorTemperature:
type: float
description: |
Temperature measure from the camera sensor in Celsius. This is typically
obtained by a thermal sensor present on-die or in the camera module. The
range of reported temperatures is device dependent.
The SensorTemperature control will only be returned in metadata if a
thermal sensor is present.
- SensorTimestamp:
type: int64_t
description: |
The time when the first row of the image sensor active array is exposed.
The timestamp, expressed in nanoseconds, represents a monotonically
increasing counter since the system boot time, as defined by the
Linux-specific CLOCK_BOOTTIME clock id.
The SensorTimestamp control can only be returned in metadata.
\todo Define how the sensor timestamp has to be used in the reprocessing
use case.
- AfMode:
type: int32_t
description: |
Control to set the mode of the AF (autofocus) algorithm.
An implementation may choose not to implement all the modes.
enum:
- name: AfModeManual
value: 0
description: |
The AF algorithm is in manual mode. In this mode it will never
perform any action nor move the lens of its own accord, but an
application can specify the desired lens position using the
LensPosition control.
In this mode the AfState will always report AfStateIdle.
If the camera is started in AfModeManual, it will move the focus
lens to the position specified by the LensPosition control.
This mode is the recommended default value for the AfMode control.
External cameras (as reported by the Location property set to
CameraLocationExternal) may use a different default value.
- name: AfModeAuto
value: 1
description: |
The AF algorithm is in auto mode. This means that the algorithm
will never move the lens or change state unless the AfTrigger
control is used. The AfTrigger control can be used to initiate a
focus scan, the results of which will be reported by AfState.
If the autofocus algorithm is moved from AfModeAuto to another
mode while a scan is in progress, the scan is cancelled
immediately, without waiting for the scan to finish.
When first entering this mode the AfState will report
AfStateIdle. When a trigger control is sent, AfState will
report AfStateScanning for a period before spontaneously
changing to AfStateFocused or AfStateFailed, depending on
the outcome of the scan. It will remain in this state until
another scan is initiated by the AfTrigger control. If a scan is
cancelled (without changing to another mode), AfState will return
to AfStateIdle.
- name: AfModeContinuous
value: 2
description: |
The AF algorithm is in continuous mode. This means that the lens can
re-start a scan spontaneously at any moment, without any user
intervention. The AfState still reports whether the algorithm is
currently scanning or not, though the application has no ability to
initiate or cancel scans, nor to move the lens for itself.
However, applications can pause the AF algorithm from continuously
scanning by using the AfPause control. This allows video or still
images to be captured whilst guaranteeing that the focus is fixed.
When set to AfModeContinuous, the system will immediately initiate a
scan so AfState will report AfStateScanning, and will settle on one
of AfStateFocused or AfStateFailed, depending on the scan result.
- AfRange:
type: int32_t
description: |
Control to set the range of focus distances that is scanned. An
implementation may choose not to implement all the options here.
enum:
- name: AfRangeNormal
value: 0
description: |
A wide range of focus distances is scanned, all the way from
infinity down to close distances, though depending on the
implementation, possibly not including the very closest macro
positions.
- name: AfRangeMacro
value: 1
description: Only close distances are scanned.
- name: AfRangeFull
value: 2
description: |
The full range of focus distances is scanned just as with
AfRangeNormal but this time including the very closest macro
positions.
- AfSpeed:
type: int32_t
description: |
Control that determines whether the AF algorithm is to move the lens
as quickly as possible or more steadily. For example, during video
recording it may be desirable not to move the lens too abruptly, but
when in a preview mode (waiting for a still capture) it may be
helpful to move the lens as quickly as is reasonably possible.
enum:
- name: AfSpeedNormal
value: 0
description: Move the lens at its usual speed.
- name: AfSpeedFast
value: 1
description: Move the lens more quickly.
- AfMetering:
type: int32_t
description: |
Instruct the AF algorithm how it should decide which parts of the image
should be used to measure focus.
enum:
- name: AfMeteringAuto
value: 0
description: The AF algorithm should decide for itself where it will
measure focus.
- name: AfMeteringWindows
value: 1
description: The AF algorithm should use the rectangles defined by
the AfWindows control to measure focus. If no windows are specified
the behaviour is platform dependent.
- AfWindows:
type: Rectangle
description: |
Sets the focus windows used by the AF algorithm when AfMetering is set
to AfMeteringWindows. The units used are pixels within the rectangle
returned by the ScalerCropMaximum property.
In order to be activated, a rectangle must be programmed with non-zero
width and height. Internally, these rectangles are intersected with the
ScalerCropMaximum rectangle. If the window becomes empty after this
operation, then the window is ignored. If all the windows end up being
ignored, then the behaviour is platform dependent.
On platforms that support the ScalerCrop control (for implementing
digital zoom, for example), no automatic recalculation or adjustment of
AF windows is performed internally if the ScalerCrop is changed. If any
window lies outside the output image after the scaler crop has been
applied, it is up to the application to recalculate them.
The details of how the windows are used are platform dependent. We note
that when there is more than one AF window, a typical implementation
might find the optimal focus position for each one and finally select
the window where the focal distance for the objects shown in that part
of the image are closest to the camera.
size: [n]
- AfTrigger:
type: int32_t
description: |
This control starts an autofocus scan when AfMode is set to AfModeAuto,
and can also be used to terminate a scan early.
It is ignored if AfMode is set to AfModeManual or AfModeContinuous.
enum:
- name: AfTriggerStart
value: 0
description: Start an AF scan. Ignored if a scan is in progress.
- name: AfTriggerCancel
value: 1
description: Cancel an AF scan. This does not cause the lens to move
anywhere else. Ignored if no scan is in progress.
- AfPause:
type: int32_t
description: |
This control has no effect except when in continuous autofocus mode
(AfModeContinuous). It can be used to pause any lens movements while
(for example) images are captured. The algorithm remains inactive
until it is instructed to resume.
enum:
- name: AfPauseImmediate
value: 0
description: |
Pause the continuous autofocus algorithm immediately, whether or not
any kind of scan is underway. AfPauseState will subsequently report
AfPauseStatePaused. AfState may report any of AfStateScanning,
AfStateFocused or AfStateFailed, depending on the algorithm's state
when it received this control.
- name: AfPauseDeferred
value: 1
description: |
This is similar to AfPauseImmediate, and if the AfState is currently
reporting AfStateFocused or AfStateFailed it will remain in that
state and AfPauseState will report AfPauseStatePaused.
However, if the algorithm is scanning (AfStateScanning),
AfPauseState will report AfPauseStatePausing until the scan is
finished, at which point AfState will report one of AfStateFocused
or AfStateFailed, and AfPauseState will change to
AfPauseStatePaused.
- name: AfPauseResume
value: 2
description: |
Resume continuous autofocus operation. The algorithm starts again
from exactly where it left off, and AfPauseState will report
AfPauseStateRunning.
- LensPosition:
type: float
description: |
Acts as a control to instruct the lens to move to a particular position
and also reports back the position of the lens for each frame.
The LensPosition control is ignored unless the AfMode is set to
AfModeManual, though the value is reported back unconditionally in all
modes.
This value, which is generally a non-integer, is the reciprocal of the
focal distance in metres, also known as dioptres. That is, to set a
focal distance D, the lens position LP is given by
\f$LP = \frac{1\mathrm{m}}{D}\f$
For example:
0 moves the lens to infinity.
0.5 moves the lens to focus on objects 2m away.
2 moves the lens to focus on objects 50cm away.
And larger values will focus the lens closer.
The default value of the control should indicate a good general position
for the lens, often corresponding to the hyperfocal distance (the
closest position for which objects at infinity are still acceptably
sharp). The minimum will often be zero (meaning infinity), and the
maximum value defines the closest focus position.
\todo Define a property to report the Hyperfocal distance of calibrated
lenses.
- AfState:
type: int32_t
description: |
Reports the current state of the AF algorithm in conjunction with the
reported AfMode value and (in continuous AF mode) the AfPauseState
value. The possible state changes are described below, though we note
the following state transitions that occur when the AfMode is changed.
If the AfMode is set to AfModeManual, then the AfState will always
report AfStateIdle (even if the lens is subsequently moved). Changing to
the AfModeManual state does not initiate any lens movement.
If the AfMode is set to AfModeAuto then the AfState will report
AfStateIdle. However, if AfModeAuto and AfTriggerStart are sent together
then AfState will omit AfStateIdle and move straight to AfStateScanning
(and start a scan).
If the AfMode is set to AfModeContinuous then the AfState will initially
report AfStateScanning.
enum:
- name: AfStateIdle
value: 0
description: |
The AF algorithm is in manual mode (AfModeManual) or in auto mode
(AfModeAuto) and a scan has not yet been triggered, or an
in-progress scan was cancelled.
- name: AfStateScanning
value: 1
description: |
The AF algorithm is in auto mode (AfModeAuto), and a scan has been
started using the AfTrigger control. The scan can be cancelled by
sending AfTriggerCancel at which point the algorithm will either
move back to AfStateIdle or, if the scan actually completes before
the cancel request is processed, to one of AfStateFocused or
AfStateFailed.
Alternatively the AF algorithm could be in continuous mode
(AfModeContinuous) at which point it may enter this state
spontaneously whenever it determines that a rescan is needed.
- name: AfStateFocused
value: 2
description: |
The AF algorithm is in auto (AfModeAuto) or continuous
(AfModeContinuous) mode and a scan has completed with the result
that the algorithm believes the image is now in focus.
- name: AfStateFailed
value: 3
description: |
The AF algorithm is in auto (AfModeAuto) or continuous
(AfModeContinuous) mode and a scan has completed with the result
that the algorithm did not find a good focus position.
- AfPauseState:
type: int32_t
description: |
Only applicable in continuous (AfModeContinuous) mode, this reports
whether the algorithm is currently running, paused or pausing (that is,
will pause as soon as any in-progress scan completes).
Any change to AfMode will cause AfPauseStateRunning to be reported.
enum:
- name: AfPauseStateRunning
value: 0
description: |
Continuous AF is running and the algorithm may restart a scan
spontaneously.
- name: AfPauseStatePausing
value: 1
description: |
Continuous AF has been sent an AfPauseDeferred control, and will
pause as soon as any in-progress scan completes (and then report
AfPauseStatePaused). No new scans will be start spontaneously until
the AfPauseResume control is sent.
- name: AfPauseStatePaused
value: 2
description: |
Continuous AF is paused. No further state changes or lens movements
will occur until the AfPauseResume control is sent.
- HdrMode:
type: int32_t
description: |
Control to set the mode to be used for High Dynamic Range (HDR)
imaging. HDR techniques typically include multiple exposure, image
fusion and tone mapping techniques to improve the dynamic range of the
resulting images.
When using an HDR mode, images are captured with different sets of AGC
settings called HDR channels. Channels indicate in particular the type
of exposure (short, medium or long) used to capture the raw image,
before fusion. Each HDR image is tagged with the corresponding channel
using the HdrChannel control.
\sa HdrChannel
enum:
- name: HdrModeOff
value: 0
description: |
HDR is disabled. Metadata for this frame will not include the
HdrChannel control.
- name: HdrModeMultiExposureUnmerged
value: 1
description: |
Multiple exposures will be generated in an alternating fashion.
However, they will not be merged together and will be returned to
the application as they are. Each image will be tagged with the
correct HDR channel, indicating what kind of exposure it is. The
tag should be the same as in the HdrModeMultiExposure case.
The expectation is that an application using this mode would merge
the frames to create HDR images for itself if it requires them.
- name: HdrModeMultiExposure
value: 2
description: |
Multiple exposures will be generated and merged to create HDR
images. Each image will be tagged with the HDR channel (long, medium
or short) that arrived and which caused this image to be output.
Systems that use two channels for HDR will return images tagged
alternately as the short and long channel. Systems that use three
channels for HDR will cycle through the short, medium and long
channel before repeating.
- name: HdrModeSingleExposure
value: 3
description: |
Multiple frames all at a single exposure will be used to create HDR
images. These images should be reported as all corresponding to the
HDR short channel.
- name: HdrModeNight
value: 4
description: |
Multiple frames will be combined to produce "night mode" images. It
is up to the implementation exactly which HDR channels it uses, and
the images will all be tagged accordingly with the correct HDR
channel information.
- HdrChannel:
type: int32_t
description: |
This value is reported back to the application so that it can discover
whether this capture corresponds to the short or long exposure image (or
any other image used by the HDR procedure). An application can monitor
the HDR channel to discover when the differently exposed images have
arrived.
This metadata is only available when an HDR mode has been enabled.
\sa HdrMode
enum:
- name: HdrChannelNone
value: 0
description: |
This image does not correspond to any of the captures used to create
an HDR image.
- name: HdrChannelShort
value: 1
description: |
This is a short exposure image.
- name: HdrChannelMedium
value: 2
description: |
This is a medium exposure image.
- name: HdrChannelLong
value: 3
description: |
This is a long exposure image.
- Gamma:
type: float
description: |
Specify a fixed gamma value. Default must be 2.2 which closely mimics
sRGB gamma. Note that this is camera gamma, so it is applied as
1.0/gamma.
...
|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/device_enumerator_udev.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018-2019, Google Inc.
*
* udev-based device enumerator
*/
#include "libcamera/internal/device_enumerator_udev.h"
#include <algorithm>
#include <fcntl.h>
#include <libudev.h>
#include <list>
#include <map>
#include <string.h>
#include <string_view>
#include <sys/ioctl.h>
#include <sys/sysmacros.h>
#include <unistd.h>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
#include "libcamera/internal/media_device.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(DeviceEnumerator)
DeviceEnumeratorUdev::DeviceEnumeratorUdev()
: udev_(nullptr), monitor_(nullptr), notifier_(nullptr)
{
}
DeviceEnumeratorUdev::~DeviceEnumeratorUdev()
{
delete notifier_;
if (monitor_)
udev_monitor_unref(monitor_);
if (udev_)
udev_unref(udev_);
}
int DeviceEnumeratorUdev::init()
{
int ret;
if (udev_)
return -EBUSY;
udev_ = udev_new();
if (!udev_)
return -ENODEV;
monitor_ = udev_monitor_new_from_netlink(udev_, "udev");
if (!monitor_)
return -ENODEV;
ret = udev_monitor_filter_add_match_subsystem_devtype(monitor_, "media",
nullptr);
if (ret < 0)
return ret;
ret = udev_monitor_filter_add_match_subsystem_devtype(monitor_, "video4linux",
nullptr);
if (ret < 0)
return ret;
return 0;
}
int DeviceEnumeratorUdev::addUdevDevice(struct udev_device *dev)
{
const char *subsystem = udev_device_get_subsystem(dev);
if (!subsystem)
return -ENODEV;
if (!strcmp(subsystem, "media")) {
std::unique_ptr<MediaDevice> media =
createDevice(udev_device_get_devnode(dev));
if (!media)
return -ENODEV;
DependencyMap deps;
int ret = populateMediaDevice(media.get(), &deps);
if (ret < 0) {
LOG(DeviceEnumerator, Warning)
<< "Failed to populate media device "
<< media->deviceNode()
<< " (" << media->driver() << "), skipping";
return ret;
}
if (!deps.empty()) {
LOG(DeviceEnumerator, Debug)
<< "Defer media device " << media->deviceNode()
<< " due to " << deps.size()
<< " missing dependencies";
pending_.emplace_back(std::move(media), std::move(deps));
MediaDeviceDeps *mediaDeps = &pending_.back();
for (const auto &dep : mediaDeps->deps_)
devMap_[dep.first] = mediaDeps;
return 0;
}
addDevice(std::move(media));
return 0;
}
if (!strcmp(subsystem, "video4linux")) {
addV4L2Device(udev_device_get_devnum(dev));
return 0;
}
return -ENODEV;
}
int DeviceEnumeratorUdev::enumerate()
{
struct udev_enumerate *udev_enum = nullptr;
struct udev_list_entry *ents, *ent;
int ret;
udev_enum = udev_enumerate_new(udev_);
if (!udev_enum)
return -ENOMEM;
ret = udev_enumerate_add_match_subsystem(udev_enum, "media");
if (ret < 0)
goto done;
ret = udev_enumerate_add_match_subsystem(udev_enum, "video4linux");
if (ret < 0)
goto done;
ret = udev_enumerate_add_match_is_initialized(udev_enum);
if (ret < 0)
goto done;
ret = udev_enumerate_scan_devices(udev_enum);
if (ret < 0)
goto done;
ents = udev_enumerate_get_list_entry(udev_enum);
if (!ents)
goto done;
udev_list_entry_foreach(ent, ents) {
struct udev_device *dev;
const char *devnode;
const char *syspath = udev_list_entry_get_name(ent);
dev = udev_device_new_from_syspath(udev_, syspath);
if (!dev) {
LOG(DeviceEnumerator, Warning)
<< "Failed to get device for '"
<< syspath << "', skipping";
continue;
}
devnode = udev_device_get_devnode(dev);
if (!devnode) {
udev_device_unref(dev);
LOG(DeviceEnumerator, Warning)
<< "Failed to get device node for '"
<< syspath << "', skipping";
continue;
}
if (addUdevDevice(dev) < 0)
LOG(DeviceEnumerator, Warning)
<< "Failed to add device for '"
<< syspath << "', skipping";
udev_device_unref(dev);
}
done:
udev_enumerate_unref(udev_enum);
if (ret < 0)
return ret;
ret = udev_monitor_enable_receiving(monitor_);
if (ret < 0)
return ret;
int fd = udev_monitor_get_fd(monitor_);
notifier_ = new EventNotifier(fd, EventNotifier::Read);
notifier_->activated.connect(this, &DeviceEnumeratorUdev::udevNotify);
return 0;
}
int DeviceEnumeratorUdev::populateMediaDevice(MediaDevice *media, DependencyMap *deps)
{
std::set<dev_t> children;
/* Associate entities to device node paths. */
for (MediaEntity *entity : media->entities()) {
if (entity->deviceMajor() == 0 && entity->deviceMinor() == 0)
continue;
dev_t devnum = makedev(entity->deviceMajor(),
entity->deviceMinor());
/*
* If the devnum isn't in the orphans list, add it to the unmet
* dependencies.
*/
if (orphans_.find(devnum) == orphans_.end()) {
(*deps)[devnum].push_back(entity);
continue;
}
/*
* Otherwise take it from the orphans list. Don't remove the
* entry from the list yet as other entities in this media
* device may need the same device.
*/
std::string deviceNode = lookupDeviceNode(devnum);
if (deviceNode.empty())
return -EINVAL;
int ret = entity->setDeviceNode(deviceNode);
if (ret)
return ret;
children.insert(devnum);
}
/* Remove all found children from the orphans list. */
for (auto it = orphans_.begin(), last = orphans_.end(); it != last;) {
if (children.find(*it) != children.end())
it = orphans_.erase(it);
else
++it;
}
return 0;
}
/**
* \brief Lookup device node path from device number
* \param[in] devnum The device number
*
* Translate a device number given as \a devnum to a device node path.
*
* \return The device node path on success, or an empty string if the lookup
* fails
*/
std::string DeviceEnumeratorUdev::lookupDeviceNode(dev_t devnum)
{
struct udev_device *device;
const char *name;
std::string deviceNode = std::string();
device = udev_device_new_from_devnum(udev_, 'c', devnum);
if (!device)
return std::string();
name = udev_device_get_devnode(device);
if (name)
deviceNode = name;
udev_device_unref(device);
return deviceNode;
}
/**
* \brief Add a V4L2 device to the media device that it belongs to
* \param[in] devnum major:minor number of V4L2 device to add, as a dev_t
*
* Add V4L2 device identified by \a devnum to the MediaDevice that it belongs
* to, if such a MediaDevice has been created. Otherwise add the V4L2 device
* to the orphan list. If the V4L2 device is added to a MediaDevice, and it is
* the last V4L2 device that the MediaDevice needs, then the MediaDevice is
* added to the DeviceEnumerator, where it is available for pipeline handlers.
*
* \return 0 on success or a negative error code otherwise
*/
int DeviceEnumeratorUdev::addV4L2Device(dev_t devnum)
{
/*
* If the devnum doesn't belong to any media device, add it to the
* orphans list.
*/
auto it = devMap_.find(devnum);
if (it == devMap_.end()) {
orphans_.insert(devnum);
return 0;
}
/*
* Set the device node for all entities matching the devnum. Multiple
* entities can share the same device node, for instance for V4L2 M2M
* devices.
*/
std::string deviceNode = lookupDeviceNode(devnum);
if (deviceNode.empty())
return -EINVAL;
MediaDeviceDeps *deps = it->second;
for (MediaEntity *entity : deps->deps_[devnum]) {
int ret = entity->setDeviceNode(deviceNode);
if (ret)
return ret;
}
/*
* Remove the devnum from the unmet dependencies for this media device.
* If no more dependency is unmet, add the media device to the
* enumerator.
*/
deps->deps_.erase(devnum);
devMap_.erase(it);
if (deps->deps_.empty()) {
LOG(DeviceEnumerator, Debug)
<< "All dependencies for media device "
<< deps->media_->deviceNode() << " found";
addDevice(std::move(deps->media_));
pending_.remove(*deps);
}
return 0;
}
void DeviceEnumeratorUdev::udevNotify()
{
struct udev_device *dev = udev_monitor_receive_device(monitor_);
std::string_view action(udev_device_get_action(dev));
std::string_view deviceNode(udev_device_get_devnode(dev));
LOG(DeviceEnumerator, Debug)
<< action << " device " << deviceNode;
if (action == "add") {
addUdevDevice(dev);
} else if (action == "remove") {
const char *subsystem = udev_device_get_subsystem(dev);
if (subsystem && !strcmp(subsystem, "media"))
removeDevice(std::string(deviceNode));
}
udev_device_unref(dev);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/ipc_pipe_unixsocket.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Image Processing Algorithm IPC module using unix socket
*/
#include "libcamera/internal/ipc_pipe_unixsocket.h"
#include <vector>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/log.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include "libcamera/internal/ipc_pipe.h"
#include "libcamera/internal/ipc_unixsocket.h"
#include "libcamera/internal/process.h"
using namespace std::chrono_literals;
namespace libcamera {
LOG_DECLARE_CATEGORY(IPCPipe)
IPCPipeUnixSocket::IPCPipeUnixSocket(const char *ipaModulePath,
const char *ipaProxyWorkerPath)
: IPCPipe()
{
std::vector<int> fds;
std::vector<std::string> args;
args.push_back(ipaModulePath);
socket_ = std::make_unique<IPCUnixSocket>();
UniqueFD fd = socket_->create();
if (!fd.isValid()) {
LOG(IPCPipe, Error) << "Failed to create socket";
return;
}
socket_->readyRead.connect(this, &IPCPipeUnixSocket::readyRead);
args.push_back(std::to_string(fd.get()));
fds.push_back(fd.get());
proc_ = std::make_unique<Process>();
int ret = proc_->start(ipaProxyWorkerPath, args, fds);
if (ret) {
LOG(IPCPipe, Error)
<< "Failed to start proxy worker process";
return;
}
connected_ = true;
}
IPCPipeUnixSocket::~IPCPipeUnixSocket()
{
}
int IPCPipeUnixSocket::sendSync(const IPCMessage &in, IPCMessage *out)
{
IPCUnixSocket::Payload response;
int ret = call(in.payload(), &response, in.header().cookie);
if (ret) {
LOG(IPCPipe, Error) << "Failed to call sync";
return ret;
}
if (out)
*out = IPCMessage(response);
return 0;
}
int IPCPipeUnixSocket::sendAsync(const IPCMessage &data)
{
int ret = socket_->send(data.payload());
if (ret) {
LOG(IPCPipe, Error) << "Failed to call async";
return ret;
}
return 0;
}
void IPCPipeUnixSocket::readyRead()
{
IPCUnixSocket::Payload payload;
int ret = socket_->receive(&payload);
if (ret) {
LOG(IPCPipe, Error) << "Receive message failed" << ret;
return;
}
/* \todo Use span to avoid the double copy when callData is found. */
if (payload.data.size() < sizeof(IPCMessage::Header)) {
LOG(IPCPipe, Error) << "Not enough data received";
return;
}
IPCMessage ipcMessage(payload);
auto callData = callData_.find(ipcMessage.header().cookie);
if (callData != callData_.end()) {
*callData->second.response = std::move(payload);
callData->second.done = true;
return;
}
/* Received unexpected data, this means it's a call from the IPA. */
recv.emit(ipcMessage);
}
int IPCPipeUnixSocket::call(const IPCUnixSocket::Payload &message,
IPCUnixSocket::Payload *response, uint32_t cookie)
{
Timer timeout;
int ret;
const auto result = callData_.insert({ cookie, { response, false } });
const auto &iter = result.first;
ret = socket_->send(message);
if (ret) {
callData_.erase(iter);
return ret;
}
/* \todo Make this less dangerous, see IPCPipe::sendSync() */
timeout.start(2000ms);
while (!iter->second.done) {
if (!timeout.isRunning()) {
LOG(IPCPipe, Error) << "Call timeout!";
callData_.erase(iter);
return -ETIMEDOUT;
}
Thread::current()->eventDispatcher()->processEvents();
}
callData_.erase(iter);
return 0;
}
} /* namespace libcamera */
|
0 | repos/libcamera/src | repos/libcamera/src/libcamera/property_ids_core.yaml | # SPDX-License-Identifier: LGPL-2.1-or-later
#
# Copyright (C) 2019, Google Inc.
#
%YAML 1.1
---
vendor: libcamera
controls:
- Location:
type: int32_t
description: |
Camera mounting location
enum:
- name: CameraLocationFront
value: 0
description: |
The camera is mounted on the front side of the device, facing the
user
- name: CameraLocationBack
value: 1
description: |
The camera is mounted on the back side of the device, facing away
from the user
- name: CameraLocationExternal
value: 2
description: |
The camera is attached to the device in a way that allows it to
be moved freely
- Rotation:
type: int32_t
description: |
The camera physical mounting rotation. It is expressed as the angular
difference in degrees between two reference systems, one relative to the
camera module, and one defined on the external world scene to be
captured when projected on the image sensor pixel array.
A camera sensor has a 2-dimensional reference system 'Rc' defined by
its pixel array read-out order. The origin is set to the first pixel
being read out, the X-axis points along the column read-out direction
towards the last columns, and the Y-axis along the row read-out
direction towards the last row.
A typical example for a sensor with a 2592x1944 pixel array matrix
observed from the front is
2591 X-axis 0
<------------------------+ 0
.......... ... ..........!
.......... ... ..........! Y-axis
... !
.......... ... ..........!
.......... ... ..........! 1943
V
The external world scene reference system 'Rs' is a 2-dimensional
reference system on the focal plane of the camera module. The origin is
placed on the top-left corner of the visible scene, the X-axis points
towards the right, and the Y-axis points towards the bottom of the
scene. The top, bottom, left and right directions are intentionally not
defined and depend on the environment in which the camera is used.
A typical example of a (very common) picture of a shark swimming from
left to right, as seen from the camera, is
0 X-axis
0 +------------------------------------->
!
!
!
! |\____)\___
! ) _____ __`<
! |/ )/
!
!
!
V
Y-axis
With the reference system 'Rs' placed on the camera focal plane.
¸.·˙!
¸.·˙ !
_ ¸.·˙ !
+-/ \-+¸.·˙ !
| (o) | ! Camera focal plane
+-----+˙·.¸ !
˙·.¸ !
˙·.¸ !
˙·.¸!
When projected on the sensor's pixel array, the image and the associated
reference system 'Rs' are typically (but not always) inverted, due to
the camera module's lens optical inversion effect.
Assuming the above represented scene of the swimming shark, the lens
inversion projects the scene and its reference system onto the sensor
pixel array, seen from the front of the camera sensor, as follow
Y-axis
^
!
!
!
! |\_____)\__
! ) ____ ___.<
! |/ )/
!
!
!
0 +------------------------------------->
0 X-axis
Note the shark being upside-down.
The resulting projected reference system is named 'Rp'.
The camera rotation property is then defined as the angular difference
in the counter-clockwise direction between the camera reference system
'Rc' and the projected scene reference system 'Rp'. It is expressed in
degrees as a number in the range [0, 360[.
Examples
0 degrees camera rotation
Y-Rp
^
Y-Rc !
^ !
! !
! !
! !
! !
! !
! !
! !
! 0 +------------------------------------->
! 0 X-Rp
0 +------------------------------------->
0 X-Rc
X-Rc 0
<------------------------------------+ 0
X-Rp 0 !
<------------------------------------+ 0 !
! !
! !
! !
! !
! !
! !
! !
! V
! Y-Rc
V
Y-Rp
90 degrees camera rotation
0 Y-Rc
0 +-------------------->
! Y-Rp
! ^
! !
! !
! !
! !
! !
! !
! !
! !
! !
! 0 +------------------------------------->
! 0 X-Rp
!
!
!
!
V
X-Rc
180 degrees camera rotation
0
<------------------------------------+ 0
X-Rc !
Y-Rp !
^ !
! !
! !
! !
! !
! !
! !
! V
! Y-Rc
0 +------------------------------------->
0 X-Rp
270 degrees camera rotation
0 Y-Rc
0 +-------------------->
! 0
! <-----------------------------------+ 0
! X-Rp !
! !
! !
! !
! !
! !
! !
! !
! !
! V
! Y-Rp
!
!
!
!
V
X-Rc
Example one - Webcam
A camera module installed on the user facing part of a laptop screen
casing used for video calls. The captured images are meant to be
displayed in landscape mode (width > height) on the laptop screen.
The camera is typically mounted upside-down to compensate the lens
optical inversion effect.
Y-Rp
Y-Rc ^
^ !
! !
! ! |\_____)\__
! ! ) ____ ___.<
! ! |/ )/
! !
! !
! !
! 0 +------------------------------------->
! 0 X-Rp
0 +------------------------------------->
0 X-Rc
The two reference systems are aligned, the resulting camera rotation is
0 degrees, no rotation correction needs to be applied to the resulting
image once captured to memory buffers to correctly display it to users.
+--------------------------------------+
! !
! !
! !
! |\____)\___ !
! ) _____ __`< !
! |/ )/ !
! !
! !
! !
+--------------------------------------+
If the camera sensor is not mounted upside-down to compensate for the
lens optical inversion, the two reference systems will not be aligned,
with 'Rp' being rotated 180 degrees relatively to 'Rc'.
X-Rc 0
<------------------------------------+ 0
!
Y-Rp !
^ !
! !
! |\_____)\__ !
! ) ____ ___.< !
! |/ )/ !
! !
! !
! V
! Y-Rc
0 +------------------------------------->
0 X-Rp
The image once captured to memory will then be rotated by 180 degrees
+--------------------------------------+
! !
! !
! !
! __/(_____/| !
! >.___ ____ ( !
! \( \| !
! !
! !
! !
+--------------------------------------+
A software rotation correction of 180 degrees should be applied to
correctly display the image.
+--------------------------------------+
! !
! !
! !
! |\____)\___ !
! ) _____ __`< !
! |/ )/ !
! !
! !
! !
+--------------------------------------+
Example two - Phone camera
A camera installed on the back side of a mobile device facing away from
the user. The captured images are meant to be displayed in portrait mode
(height > width) to match the device screen orientation and the device
usage orientation used when taking the picture.
The camera sensor is typically mounted with its pixel array longer side
aligned to the device longer side, upside-down mounted to compensate for
the lens optical inversion effect.
0 Y-Rc
0 +-------------------->
! Y-Rp
! ^
! !
! !
! !
! ! |\_____)\__
! ! ) ____ ___.<
! ! |/ )/
! !
! !
! !
! 0 +------------------------------------->
! 0 X-Rp
!
!
!
!
V
X-Rc
The two reference systems are not aligned and the 'Rp' reference
system is rotated by 90 degrees in the counter-clockwise direction
relatively to the 'Rc' reference system.
The image once captured to memory will be rotated.
+-------------------------------------+
| _ _ |
| \ / |
| | | |
| | | |
| | > |
| < | |
| | | |
| . |
| V |
+-------------------------------------+
A correction of 90 degrees in counter-clockwise direction has to be
applied to correctly display the image in portrait mode on the device
screen.
+--------------------+
| |
| |
| |
| |
| |
| |
| |\____)\___ |
| ) _____ __`< |
| |/ )/ |
| |
| |
| |
| |
| |
+--------------------+
- Model:
type: string
description: |
The model name shall to the extent possible describe the sensor. For
most devices this is the model name of the sensor. While for some
devices the sensor model is unavailable as the sensor or the entire
camera is part of a larger unit and exposed as a black-box to the
system. In such cases the model name of the smallest device that
contains the camera sensor shall be used.
The model name is not meant to be a camera name displayed to the
end-user, but may be combined with other camera information to create a
camera name.
The model name is not guaranteed to be unique in the system nor is
it guaranteed to be stable or have any other properties required to make
it a good candidate to be used as a permanent identifier of a camera.
The model name shall describe the camera in a human readable format and
shall be encoded in ASCII.
Example model names are 'ov5670', 'imx219' or 'Logitech Webcam C930e'.
- UnitCellSize:
type: Size
description: |
The pixel unit cell physical size, in nanometers.
The UnitCellSize properties defines the horizontal and vertical sizes of
a single pixel unit, including its active and non-active parts. In
other words, it expresses the horizontal and vertical distance between
the top-left corners of adjacent pixels.
The property can be used to calculate the physical size of the sensor's
pixel array area and for calibration purposes.
- PixelArraySize:
type: Size
description: |
The camera sensor pixel array readable area vertical and horizontal
sizes, in pixels.
The PixelArraySize property defines the size in pixel units of the
readable part of full pixel array matrix, including optical black
pixels used for calibration, pixels which are not considered valid for
capture and active pixels containing valid image data.
The property describes the maximum size of the raw data captured by the
camera, which might not correspond to the physical size of the sensor
pixel array matrix, as some portions of the physical pixel array matrix
are not accessible and cannot be transmitted out.
For example, let's consider a pixel array matrix assembled as follows
+--------------------------------------------------+
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
... ... ... ... ...
... ... ... ... ...
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
|xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx|
+--------------------------------------------------+
starting with two lines of non-readable pixels (x), followed by N lines
of readable data (D) surrounded by two columns of non-readable pixels on
each side, and ending with two more lines of non-readable pixels. Only
the readable portion is transmitted to the receiving side, defining the
sizes of the largest possible buffer of raw data that can be presented
to applications.
PixelArraySize.width
/----------------------------------------------/
+----------------------------------------------+ /
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| | PixelArraySize.height
... ... ... ... ...
... ... ... ... ...
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
|DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD| |
+----------------------------------------------+ /
This defines a rectangle whose top-left corner is placed in position (0,
0) and whose vertical and horizontal sizes are defined by this property.
All other rectangles that describe portions of the pixel array, such as
the optical black pixels rectangles and active pixel areas, are defined
relatively to this rectangle.
All the coordinates are expressed relative to the default sensor readout
direction, without any transformation (such as horizontal and vertical
flipping) applied. When mapping them to the raw pixel buffer,
applications shall take any configured transformation into account.
\todo Rename this property to Size once we will have property
categories (i.e. Properties::PixelArray::Size)
- PixelArrayOpticalBlackRectangles:
type: Rectangle
size: [n]
description: |
The pixel array region(s) which contain optical black pixels
considered valid for calibration purposes.
This property describes the position and size of optical black pixel
regions in the raw data buffer as stored in memory, which might differ
from their actual physical location in the pixel array matrix.
It is important to note, in fact, that camera sensors might
automatically reorder or skip portions of their pixels array matrix when
transmitting data to the receiver. For instance, a sensor may merge the
top and bottom optical black rectangles into a single rectangle,
transmitted at the beginning of the frame.
The pixel array contains several areas with different purposes,
interleaved by lines and columns which are said not to be valid for
capturing purposes. Invalid lines and columns are defined as invalid as
they could be positioned too close to the chip margins or to the optical
black shielding placed on top of optical black pixels.
PixelArraySize.width
/----------------------------------------------/
x1 x2
+--o---------------------------------------o---+ /
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
y1 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
|IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
|IIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
y2 oIIOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOII| |
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
y3 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
|IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| | PixelArraySize.height
|IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
... ... ... ... ...
... ... ... ... ...
y4 |IIOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOOII| |
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
|IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII| |
+----------------------------------------------+ /
The readable pixel array matrix is composed by
2 invalid lines (I)
4 lines of valid optical black pixels (O)
2 invalid lines (I)
n lines of valid pixel data (P)
2 invalid lines (I)
And the position of the optical black pixel rectangles is defined by
PixelArrayOpticalBlackRectangles = {
{ x1, y1, x2 - x1 + 1, y2 - y1 + 1 },
{ x1, y3, 2, y4 - y3 + 1 },
{ x2, y3, 2, y4 - y3 + 1 },
};
If the camera, when capturing the full pixel array matrix, automatically
skips the invalid lines and columns, producing the following data
buffer, when captured to memory
PixelArraySize.width
/----------------------------------------------/
x1
+--------------------------------------------o-+ /
|OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
|OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
|OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
|OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO| |
y1 oOOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
|OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
|OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| | PixelArraySize.height
... ... ... ... ... |
... ... ... ... ... |
|OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
|OOPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPOO| |
+----------------------------------------------+ /
then the invalid lines and columns should not be reported as part of the
PixelArraySize property in first place.
In this case, the position of the black pixel rectangles will be
PixelArrayOpticalBlackRectangles = {
{ 0, 0, y1 + 1, PixelArraySize[0] },
{ 0, y1, 2, PixelArraySize[1] - y1 + 1 },
{ x1, y1, 2, PixelArraySize[1] - y1 + 1 },
};
\todo Rename this property to Size once we will have property
categories (i.e. Properties::PixelArray::OpticalBlackRectangles)
- PixelArrayActiveAreas:
type: Rectangle
size: [n]
description: |
The PixelArrayActiveAreas property defines the (possibly multiple and
overlapping) portions of the camera sensor readable pixel matrix
which are considered valid for image acquisition purposes.
This property describes an arbitrary number of overlapping rectangles,
with each rectangle representing the maximum image size that the camera
sensor can produce for a particular aspect ratio. They are defined
relatively to the PixelArraySize rectangle.
When multiple rectangles are reported, they shall be ordered from the
tallest to the shortest.
Example 1
A camera sensor which only produces images in the 4:3 image resolution
will report a single PixelArrayActiveAreas rectangle, from which all
other image formats are obtained by either cropping the field-of-view
and/or applying pixel sub-sampling techniques such as pixel skipping or
binning.
PixelArraySize.width
/----------------/
x1 x2
(0,0)-> +-o------------o-+ /
y1 o +------------+ | |
| |////////////| | |
| |////////////| | | PixelArraySize.height
| |////////////| | |
y2 o +------------+ | |
+----------------+ /
The property reports a single rectangle
PixelArrayActiveAreas = (x1, y1, x2 - x1 + 1, y2 - y1 + 1)
Example 2
A camera sensor which can produce images in different native
resolutions will report several overlapping rectangles, one for each
natively supported resolution.
PixelArraySize.width
/------------------/
x1 x2 x3 x4
(0,0)-> +o---o------o---o+ /
y1 o +------+ | |
| |//////| | |
y2 o+---+------+---+| |
||///|//////|///|| | PixelArraySize.height
y3 o+---+------+---+| |
| |//////| | |
y4 o +------+ | |
+----+------+----+ /
The property reports two rectangles
PixelArrayActiveAreas = ((x2, y1, x3 - x2 + 1, y4 - y1 + 1),
(x1, y2, x4 - x1 + 1, y3 - y2 + 1))
The first rectangle describes the maximum field-of-view of all image
formats in the 4:3 resolutions, while the second one describes the
maximum field of view for all image formats in the 16:9 resolutions.
Multiple rectangles shall only be reported when the sensor can't capture
the pixels in the corner regions. If all the pixels in the (x1,y1) -
(x4,y4) area can be captured, the PixelArrayActiveAreas property shall
contains the single rectangle (x1,y1) - (x4,y4).
\todo Rename this property to ActiveAreas once we will have property
categories (i.e. Properties::PixelArray::ActiveAreas)
- ScalerCropMaximum:
type: Rectangle
description: |
The maximum valid rectangle for the controls::ScalerCrop control. This
reflects the minimum mandatory cropping applied in the camera sensor and
the rest of the pipeline. Just as the ScalerCrop control, it defines a
rectangle taken from the sensor's active pixel array.
This property is valid only after the camera has been successfully
configured and its value may change whenever a new configuration is
applied.
\todo Turn this property into a "maximum control value" for the
ScalerCrop control once "dynamic" controls have been implemented.
- SensorSensitivity:
type: float
description: |
The relative sensitivity of the chosen sensor mode.
Some sensors have readout modes with different sensitivities. For example,
a binned camera mode might, with the same exposure and gains, produce
twice the signal level of the full resolution readout. This would be
signalled by the binned mode, when it is chosen, indicating a value here
that is twice that of the full resolution mode. This value will be valid
after the configure method has returned successfully.
- SystemDevices:
type: int64_t
size: [n]
description: |
A list of integer values of type dev_t denoting the major and minor
device numbers of the underlying devices used in the operation of this
camera.
Different cameras may report identical devices.
...
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/swstats_cpu.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* CPU based software statistics implementation
*/
#pragma once
#include <stdint.h>
#include <libcamera/base/signal.h>
#include <libcamera/geometry.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/shared_mem_object.h"
#include "libcamera/internal/software_isp/swisp_stats.h"
namespace libcamera {
class PixelFormat;
struct StreamConfiguration;
class SwStatsCpu
{
public:
SwStatsCpu();
~SwStatsCpu() = default;
bool isValid() const { return sharedStats_.fd().isValid(); }
const SharedFD &getStatsFD() { return sharedStats_.fd(); }
const Size &patternSize() { return patternSize_; }
int configure(const StreamConfiguration &inputCfg);
void setWindow(const Rectangle &window);
void startFrame();
void finishFrame();
void processLine0(unsigned int y, const uint8_t *src[])
{
if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
y >= (window_.y + window_.height))
return;
(this->*stats0_)(src);
}
void processLine2(unsigned int y, const uint8_t *src[])
{
if ((y & ySkipMask_) || y < static_cast<unsigned int>(window_.y) ||
y >= (window_.y + window_.height))
return;
(this->*stats2_)(src);
}
Signal<> statsReady;
private:
using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]);
int setupStandardBayerOrder(BayerFormat::Order order);
/* Bayer 8 bpp unpacked */
void statsBGGR8Line0(const uint8_t *src[]);
/* Bayer 10 bpp unpacked */
void statsBGGR10Line0(const uint8_t *src[]);
/* Bayer 12 bpp unpacked */
void statsBGGR12Line0(const uint8_t *src[]);
/* Bayer 10 bpp packed */
void statsBGGR10PLine0(const uint8_t *src[]);
void statsGBRG10PLine0(const uint8_t *src[]);
/* Variables set by configure(), used every line */
statsProcessFn stats0_;
statsProcessFn stats2_;
bool swapLines_;
unsigned int ySkipMask_;
Rectangle window_;
Size patternSize_;
unsigned int xShift_;
SharedMemObject<SwIspStats> sharedStats_;
SwIspStats stats_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/debayer.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, 2024 Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* debayer base class
*/
#include "debayer.h"
namespace libcamera {
/**
* \struct DebayerParams
* \brief Struct to hold the debayer parameters.
*/
/**
* \var DebayerParams::kRGBLookupSize
* \brief Size of a color lookup table
*/
/**
* \typedef DebayerParams::ColorLookupTable
* \brief Type of the lookup tables for red, green, blue values
*/
/**
* \var DebayerParams::red
* \brief Lookup table for red color, mapping input values to output values
*/
/**
* \var DebayerParams::green
* \brief Lookup table for green color, mapping input values to output values
*/
/**
* \var DebayerParams::blue
* \brief Lookup table for blue color, mapping input values to output values
*/
/**
* \class Debayer
* \brief Base debayering class
*
* Base class that provides functions for setting up the debayering process.
*/
LOG_DEFINE_CATEGORY(Debayer)
Debayer::~Debayer()
{
}
/**
* \fn int Debayer::configure(const StreamConfiguration &inputCfg, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
* \brief Configure the debayer object according to the passed in parameters.
* \param[in] inputCfg The input configuration.
* \param[in] outputCfgs The output configurations.
*
* \return 0 on success, a negative errno on failure.
*/
/**
* \fn Size Debayer::patternSize(PixelFormat inputFormat)
* \brief Get the width and height at which the bayer pattern repeats.
* \param[in] inputFormat The input format.
*
* Valid sizes are: 2x2, 4x2 or 4x4.
*
* \return Pattern size or an empty size for unsupported inputFormats.
*/
/**
* \fn std::vector<PixelFormat> Debayer::formats(PixelFormat inputFormat)
* \brief Get the supported output formats.
* \param[in] inputFormat The input format.
*
* \return All supported output formats or an empty vector if there are none.
*/
/**
* \fn std::tuple<unsigned int, unsigned int> Debayer::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
* \brief Get the stride and the frame size.
* \param[in] outputFormat The output format.
* \param[in] size The output size.
*
* \return A tuple of the stride and the frame size, or a tuple with 0,0 if
* there is no valid output config.
*/
/**
* \fn void Debayer::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
* \brief Process the bayer data into the requested format.
* \param[in] input The input buffer.
* \param[in] output The output buffer.
* \param[in] params The parameters to be used in debayering.
*
* \note DebayerParams is passed by value deliberately so that a copy is passed
* when this is run in another thread by invokeMethod().
*/
/**
* \fn virtual SizeRange Debayer::sizes(PixelFormat inputFormat, const Size &inputSize)
* \brief Get the supported output sizes for the given input format and size.
* \param[in] inputFormat The input format.
* \param[in] inputSize The input size.
*
* \return The valid size ranges or an empty range if there are none.
*/
/**
* \var Signal<FrameBuffer *> Debayer::inputBufferReady
* \brief Signals when the input buffer is ready.
*/
/**
* \var Signal<FrameBuffer *> Debayer::outputBufferReady
* \brief Signals when the output buffer is ready.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/software_isp.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
*
* Simple software ISP implementation
*/
#include "libcamera/internal/software_isp/software_isp.h"
#include <cmath>
#include <stdint.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <libcamera/formats.h>
#include <libcamera/stream.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/mapped_framebuffer.h"
#include "libcamera/internal/software_isp/debayer_params.h"
#include "debayer_cpu.h"
/**
* \file software_isp.cpp
* \brief Simple software ISP implementation
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(SoftwareIsp)
/**
* \class SoftwareIsp
* \brief Class for the Software ISP
*/
/**
* \var SoftwareIsp::inputBufferReady
* \brief A signal emitted when the input frame buffer completes
*/
/**
* \var SoftwareIsp::outputBufferReady
* \brief A signal emitted when the output frame buffer completes
*/
/**
* \var SoftwareIsp::ispStatsReady
* \brief A signal emitted when the statistics for IPA are ready
*/
/**
* \var SoftwareIsp::setSensorControls
* \brief A signal emitted when the values to write to the sensor controls are
* ready
*/
/**
* \brief Constructs SoftwareIsp object
* \param[in] pipe The pipeline handler in use
* \param[in] sensor Pointer to the CameraSensor instance owned by the pipeline
* handler
*/
SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor)
: dmaHeap_(DmaBufAllocator::DmaBufAllocatorFlag::CmaHeap |
DmaBufAllocator::DmaBufAllocatorFlag::SystemHeap |
DmaBufAllocator::DmaBufAllocatorFlag::UDmaBuf)
{
/*
* debayerParams_ must be initialized because the initial value is used for
* the first two frames, i.e. until stats processing starts providing its
* own parameters.
*
* \todo This should be handled in the same place as the related
* operations, in the IPA module.
*/
std::array<uint8_t, 256> gammaTable;
for (unsigned int i = 0; i < 256; i++)
gammaTable[i] = UINT8_MAX * std::pow(i / 256.0, 0.5);
for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) {
debayerParams_.red[i] = gammaTable[i];
debayerParams_.green[i] = gammaTable[i];
debayerParams_.blue[i] = gammaTable[i];
}
if (!dmaHeap_.isValid()) {
LOG(SoftwareIsp, Error) << "Failed to create DmaBufAllocator object";
return;
}
sharedParams_ = SharedMemObject<DebayerParams>("softIsp_params");
if (!sharedParams_) {
LOG(SoftwareIsp, Error) << "Failed to create shared memory for parameters";
return;
}
auto stats = std::make_unique<SwStatsCpu>();
if (!stats->isValid()) {
LOG(SoftwareIsp, Error) << "Failed to create SwStatsCpu object";
return;
}
stats->statsReady.connect(this, &SoftwareIsp::statsReady);
debayer_ = std::make_unique<DebayerCpu>(std::move(stats));
debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady);
debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady);
ipa_ = IPAManager::createIPA<ipa::soft::IPAProxySoft>(pipe, 0, 0);
if (!ipa_) {
LOG(SoftwareIsp, Error)
<< "Creating IPA for software ISP failed";
debayer_.reset();
return;
}
/*
* The API tuning file is made from the sensor name. If the tuning file
* isn't found, fall back to the 'uncalibrated' file.
*/
std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
if (ipaTuningFile.empty())
ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
int ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
debayer_->getStatsFD(),
sharedParams_.fd(),
sensor->controls());
if (ret) {
LOG(SoftwareIsp, Error) << "IPA init failed";
debayer_.reset();
return;
}
ipa_->setIspParams.connect(this, &SoftwareIsp::saveIspParams);
ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls);
debayer_->moveToThread(&ispWorkerThread_);
}
SoftwareIsp::~SoftwareIsp()
{
/* make sure to destroy the DebayerCpu before the ispWorkerThread_ is gone */
debayer_.reset();
}
/**
* \fn int SoftwareIsp::loadConfiguration([[maybe_unused]] const std::string &filename)
* \brief Load a configuration from a file
* \param[in] filename The file to load the configuration data from
*
* Currently is a stub doing nothing and always returning "success".
*
* \return 0 on success
*/
/**
* \brief Process the statistics gathered
* \param[in] sensorControls The sensor controls
*
* Requests the IPA to calculate new parameters for ISP and new control
* values for the sensor.
*/
void SoftwareIsp::processStats(const ControlList &sensorControls)
{
ASSERT(ipa_);
ipa_->processStats(sensorControls);
}
/**
* \brief Check the validity of Software Isp object
* \return True if Software Isp is valid, false otherwise
*/
bool SoftwareIsp::isValid() const
{
return !!debayer_;
}
/**
* \brief Get the output formats supported for the given input format
* \param[in] inputFormat The input format
* \return All the supported output formats or an empty vector if there are none
*/
std::vector<PixelFormat> SoftwareIsp::formats(PixelFormat inputFormat)
{
ASSERT(debayer_);
return debayer_->formats(inputFormat);
}
/**
* \brief Get the supported output sizes for the given input format and size
* \param[in] inputFormat The input format
* \param[in] inputSize The input frame size
* \return The valid size range or an empty range if there are none
*/
SizeRange SoftwareIsp::sizes(PixelFormat inputFormat, const Size &inputSize)
{
ASSERT(debayer_);
return debayer_->sizes(inputFormat, inputSize);
}
/**
* Get the output stride and the frame size in bytes for the given output format and size
* \param[in] outputFormat The output format
* \param[in] size The output size (width and height in pixels)
* \return A tuple of the stride and the frame size in bytes, or a tuple of 0,0
* if there is no valid output config
*/
std::tuple<unsigned int, unsigned int>
SoftwareIsp::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
{
ASSERT(debayer_);
return debayer_->strideAndFrameSize(outputFormat, size);
}
/**
* \brief Configure the SoftwareIsp object according to the passed in parameters
* \param[in] inputCfg The input configuration
* \param[in] outputCfgs The output configurations
* \param[in] sensorControls ControlInfoMap of the controls supported by the sensor
* \return 0 on success, a negative errno on failure
*/
int SoftwareIsp::configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
const ControlInfoMap &sensorControls)
{
ASSERT(ipa_ && debayer_);
int ret = ipa_->configure(sensorControls);
if (ret < 0)
return ret;
return debayer_->configure(inputCfg, outputCfgs);
}
/**
* \brief Export the buffers from the Software ISP
* \param[in] output Output stream index exporting the buffers
* \param[in] count Number of buffers to allocate
* \param[out] buffers Vector to store the allocated buffers
* \return The number of allocated buffers on success or a negative error code
* otherwise
*/
int SoftwareIsp::exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
ASSERT(debayer_ != nullptr);
/* single output for now */
if (output >= 1)
return -EINVAL;
for (unsigned int i = 0; i < count; i++) {
const std::string name = "frame-" + std::to_string(i);
const size_t frameSize = debayer_->frameSize();
FrameBuffer::Plane outPlane;
outPlane.fd = SharedFD(dmaHeap_.alloc(name.c_str(), frameSize));
if (!outPlane.fd.isValid()) {
LOG(SoftwareIsp, Error)
<< "failed to allocate a dma_buf";
return -ENOMEM;
}
outPlane.offset = 0;
outPlane.length = frameSize;
std::vector<FrameBuffer::Plane> planes{ outPlane };
buffers->emplace_back(std::make_unique<FrameBuffer>(std::move(planes)));
}
return count;
}
/**
* \brief Queue buffers to Software ISP
* \param[in] input The input framebuffer
* \param[in] outputs The container holding the output stream indexes and
* their respective frame buffer outputs
* \return 0 on success, a negative errno on failure
*/
int SoftwareIsp::queueBuffers(FrameBuffer *input,
const std::map<unsigned int, FrameBuffer *> &outputs)
{
unsigned int mask = 0;
/*
* Validate the outputs as a sanity check: at least one output is
* required, all outputs must reference a valid stream and no two
* outputs can reference the same stream.
*/
if (outputs.empty())
return -EINVAL;
for (auto [index, buffer] : outputs) {
if (!buffer)
return -EINVAL;
if (index >= 1) /* only single stream atm */
return -EINVAL;
if (mask & (1 << index))
return -EINVAL;
mask |= 1 << index;
}
process(input, outputs.at(0));
return 0;
}
/**
* \brief Starts the Software ISP streaming operation
* \return 0 on success, any other value indicates an error
*/
int SoftwareIsp::start()
{
int ret = ipa_->start();
if (ret)
return ret;
ispWorkerThread_.start();
return 0;
}
/**
* \brief Stops the Software ISP streaming operation
*/
void SoftwareIsp::stop()
{
ispWorkerThread_.exit();
ispWorkerThread_.wait();
ipa_->stop();
}
/**
* \brief Passes the input framebuffer to the ISP worker to process
* \param[in] input The input framebuffer
* \param[out] output The framebuffer to write the processed frame to
*/
void SoftwareIsp::process(FrameBuffer *input, FrameBuffer *output)
{
debayer_->invokeMethod(&DebayerCpu::process,
ConnectionTypeQueued, input, output, debayerParams_);
}
void SoftwareIsp::saveIspParams()
{
debayerParams_ = *sharedParams_;
}
void SoftwareIsp::setSensorCtrls(const ControlList &sensorControls)
{
setSensorControls.emit(sensorControls);
}
void SoftwareIsp::statsReady()
{
ispStatsReady.emit();
}
void SoftwareIsp::inputReady(FrameBuffer *input)
{
inputBufferReady.emit(input);
}
void SoftwareIsp::outputReady(FrameBuffer *output)
{
outputBufferReady.emit(output);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/debayer_cpu.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* CPU based debayering header
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <vector>
#include <libcamera/base/object.h>
#include "libcamera/internal/bayer_format.h"
#include "debayer.h"
#include "swstats_cpu.h"
namespace libcamera {
class DebayerCpu : public Debayer, public Object
{
public:
DebayerCpu(std::unique_ptr<SwStatsCpu> stats);
~DebayerCpu();
int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs);
Size patternSize(PixelFormat inputFormat);
std::vector<PixelFormat> formats(PixelFormat input);
std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &outputFormat, const Size &size);
void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params);
SizeRange sizes(PixelFormat inputFormat, const Size &inputSize);
/**
* \brief Get the file descriptor for the statistics
*
* \return the file descriptor pointing to the statistics
*/
const SharedFD &getStatsFD() { return stats_->getStatsFD(); }
/**
* \brief Get the output frame size
*
* \return The output frame size
*/
unsigned int frameSize() { return outputConfig_.frameSize; }
private:
/**
* \brief Called to debayer 1 line of Bayer input data to output format
* \param[out] dst Pointer to the start of the output line to write
* \param[in] src The input data
*
* Input data is an array of (patternSize_.height + 1) src
* pointers each pointing to a line in the Bayer source. The middle
* element of the array will point to the actual line being processed.
* Earlier element(s) will point to the previous line(s) and later
* element(s) to the next line(s).
*
* These functions take an array of src pointers, rather than
* a single src pointer + a stride for the source, so that when the src
* is slow uncached memory it can be copied to faster memory before
* debayering. Debayering a standard 2x2 Bayer pattern requires access
* to the previous and next src lines for interpolating the missing
* colors. To allow copying the src lines only once 3 temporary buffers
* each holding a single line are used, re-using the oldest buffer for
* the next line and the pointers are swizzled so that:
* src[0] = previous-line, src[1] = currrent-line, src[2] = next-line.
* This way the 3 pointers passed to the debayer functions form
* a sliding window over the src avoiding the need to copy each
* line more than once.
*
* Similarly for bayer patterns which repeat every 4 lines, 5 src
* pointers are passed holding: src[0] = 2-lines-up, src[1] = 1-line-up
* src[2] = current-line, src[3] = 1-line-down, src[4] = 2-lines-down.
*/
using debayerFn = void (DebayerCpu::*)(uint8_t *dst, const uint8_t *src[]);
/* 8-bit raw bayer format */
template<bool addAlphaByte>
void debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
/* unpacked 10-bit raw bayer format */
template<bool addAlphaByte>
void debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
/* unpacked 12-bit raw bayer format */
template<bool addAlphaByte>
void debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
/* CSI-2 packed 10-bit raw bayer format (all the 4 orders) */
template<bool addAlphaByte>
void debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[]);
template<bool addAlphaByte>
void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]);
struct DebayerInputConfig {
Size patternSize;
unsigned int bpp; /* Memory used per pixel, not precision */
unsigned int stride;
std::vector<PixelFormat> outputFormats;
};
struct DebayerOutputConfig {
unsigned int bpp; /* Memory used per pixel, not precision */
unsigned int stride;
unsigned int frameSize;
};
int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config);
int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config);
int setupStandardBayerOrder(BayerFormat::Order order);
int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat);
void setupInputMemcpy(const uint8_t *linePointers[]);
void shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src);
void memcpyNextLine(const uint8_t *linePointers[]);
void process2(const uint8_t *src, uint8_t *dst);
void process4(const uint8_t *src, uint8_t *dst);
/* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */
static constexpr unsigned int kMaxLineBuffers = 5;
DebayerParams::ColorLookupTable red_;
DebayerParams::ColorLookupTable green_;
DebayerParams::ColorLookupTable blue_;
debayerFn debayer0_;
debayerFn debayer1_;
debayerFn debayer2_;
debayerFn debayer3_;
Rectangle window_;
DebayerInputConfig inputConfig_;
DebayerOutputConfig outputConfig_;
std::unique_ptr<SwStatsCpu> stats_;
uint8_t *lineBuffers_[kMaxLineBuffers];
unsigned int lineBufferLength_;
unsigned int lineBufferPadding_;
unsigned int lineBufferIndex_;
unsigned int xShift_; /* Offset of 0/1 applied to window_.x */
bool enableInputMemcpy_;
bool swapRedBlueGains_;
unsigned int measuredFrames_;
int64_t frameProcessTime_;
/* Skip 30 frames for things to stabilize then measure 30 frames */
static constexpr unsigned int kFramesToSkip = 30;
static constexpr unsigned int kLastFrameToMeasure = 60;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/debayer.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* debayering base class
*/
#pragma once
#include <stdint.h>
#include <libcamera/base/log.h>
#include <libcamera/base/signal.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
#include "libcamera/internal/software_isp/debayer_params.h"
namespace libcamera {
class FrameBuffer;
LOG_DECLARE_CATEGORY(Debayer)
class Debayer
{
public:
virtual ~Debayer() = 0;
virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
virtual std::vector<PixelFormat> formats(PixelFormat inputFormat) = 0;
virtual std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) = 0;
virtual void process(FrameBuffer *input, FrameBuffer *output, DebayerParams params) = 0;
virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0;
Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady;
private:
virtual Size patternSize(PixelFormat inputFormat) = 0;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/debayer_cpu.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* CPU based debayering class
*/
#include "debayer_cpu.h"
#include <stdlib.h>
#include <time.h>
#include <libcamera/formats.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/mapped_framebuffer.h"
namespace libcamera {
/**
* \class DebayerCpu
* \brief Class for debayering on the CPU
*
* Implementation for CPU based debayering
*/
/**
* \brief Constructs a DebayerCpu object
* \param[in] stats Pointer to the stats object to use
*/
DebayerCpu::DebayerCpu(std::unique_ptr<SwStatsCpu> stats)
: stats_(std::move(stats))
{
/*
* Reading from uncached buffers may be very slow.
* In such a case, it's better to copy input buffer data to normal memory.
* But in case of cached buffers, copying the data is unnecessary overhead.
* enable_input_memcpy_ makes this behavior configurable. At the moment, we
* always set it to true as the safer choice but this should be changed in
* future.
*/
enableInputMemcpy_ = true;
/* Initialize color lookup tables */
for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++)
red_[i] = green_[i] = blue_[i] = i;
for (unsigned int i = 0; i < kMaxLineBuffers; i++)
lineBuffers_[i] = nullptr;
}
DebayerCpu::~DebayerCpu()
{
for (unsigned int i = 0; i < kMaxLineBuffers; i++)
free(lineBuffers_[i]);
}
#define DECLARE_SRC_POINTERS(pixel_t) \
const pixel_t *prev = (const pixel_t *)src[0] + xShift_; \
const pixel_t *curr = (const pixel_t *)src[1] + xShift_; \
const pixel_t *next = (const pixel_t *)src[2] + xShift_;
/*
* RGR
* GBG
* RGR
*/
#define BGGR_BGR888(p, n, div) \
*dst++ = blue_[curr[x] / (div)]; \
*dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
*dst++ = red_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
if constexpr (addAlphaByte) \
*dst++ = 255; \
x++;
/*
* GBG
* RGR
* GBG
*/
#define GRBG_BGR888(p, n, div) \
*dst++ = blue_[(prev[x] + next[x]) / (2 * (div))]; \
*dst++ = green_[curr[x] / (div)]; \
*dst++ = red_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
if constexpr (addAlphaByte) \
*dst++ = 255; \
x++;
/*
* GRG
* BGB
* GRG
*/
#define GBRG_BGR888(p, n, div) \
*dst++ = blue_[(curr[x - p] + curr[x + n]) / (2 * (div))]; \
*dst++ = green_[curr[x] / (div)]; \
*dst++ = red_[(prev[x] + next[x]) / (2 * (div))]; \
if constexpr (addAlphaByte) \
*dst++ = 255; \
x++;
/*
* BGB
* GRG
* BGB
*/
#define RGGB_BGR888(p, n, div) \
*dst++ = blue_[(prev[x - p] + prev[x + n] + next[x - p] + next[x + n]) / (4 * (div))]; \
*dst++ = green_[(prev[x] + curr[x - p] + curr[x + n] + next[x]) / (4 * (div))]; \
*dst++ = red_[curr[x] / (div)]; \
if constexpr (addAlphaByte) \
*dst++ = 255; \
x++;
template<bool addAlphaByte>
void DebayerCpu::debayer8_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint8_t)
for (int x = 0; x < (int)window_.width;) {
BGGR_BGR888(1, 1, 1)
GBRG_BGR888(1, 1, 1)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer8_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint8_t)
for (int x = 0; x < (int)window_.width;) {
GRBG_BGR888(1, 1, 1)
RGGB_BGR888(1, 1, 1)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint16_t)
for (int x = 0; x < (int)window_.width;) {
/* divide values by 4 for 10 -> 8 bpp value */
BGGR_BGR888(1, 1, 4)
GBRG_BGR888(1, 1, 4)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint16_t)
for (int x = 0; x < (int)window_.width;) {
/* divide values by 4 for 10 -> 8 bpp value */
GRBG_BGR888(1, 1, 4)
RGGB_BGR888(1, 1, 4)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer12_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint16_t)
for (int x = 0; x < (int)window_.width;) {
/* divide values by 16 for 12 -> 8 bpp value */
BGGR_BGR888(1, 1, 16)
GBRG_BGR888(1, 1, 16)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer12_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
{
DECLARE_SRC_POINTERS(uint16_t)
for (int x = 0; x < (int)window_.width;) {
/* divide values by 16 for 12 -> 8 bpp value */
GRBG_BGR888(1, 1, 16)
RGGB_BGR888(1, 1, 16)
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10P_BGBG_BGR888(uint8_t *dst, const uint8_t *src[])
{
const int widthInBytes = window_.width * 5 / 4;
const uint8_t *prev = src[0];
const uint8_t *curr = src[1];
const uint8_t *next = src[2];
/*
* For the first pixel getting a pixel from the previous column uses
* x - 2 to skip the 5th byte with least-significant bits for 4 pixels.
* Same for last pixel (uses x + 2) and looking at the next column.
*/
for (int x = 0; x < widthInBytes;) {
/* First pixel */
BGGR_BGR888(2, 1, 1)
/* Second pixel BGGR -> GBRG */
GBRG_BGR888(1, 1, 1)
/* Same thing for third and fourth pixels */
BGGR_BGR888(1, 1, 1)
GBRG_BGR888(1, 2, 1)
/* Skip 5th src byte with 4 x 2 least-significant-bits */
x++;
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10P_GRGR_BGR888(uint8_t *dst, const uint8_t *src[])
{
const int widthInBytes = window_.width * 5 / 4;
const uint8_t *prev = src[0];
const uint8_t *curr = src[1];
const uint8_t *next = src[2];
for (int x = 0; x < widthInBytes;) {
/* First pixel */
GRBG_BGR888(2, 1, 1)
/* Second pixel GRBG -> RGGB */
RGGB_BGR888(1, 1, 1)
/* Same thing for third and fourth pixels */
GRBG_BGR888(1, 1, 1)
RGGB_BGR888(1, 2, 1)
/* Skip 5th src byte with 4 x 2 least-significant-bits */
x++;
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10P_GBGB_BGR888(uint8_t *dst, const uint8_t *src[])
{
const int widthInBytes = window_.width * 5 / 4;
const uint8_t *prev = src[0];
const uint8_t *curr = src[1];
const uint8_t *next = src[2];
for (int x = 0; x < widthInBytes;) {
/* Even pixel */
GBRG_BGR888(2, 1, 1)
/* Odd pixel GBGR -> BGGR */
BGGR_BGR888(1, 1, 1)
/* Same thing for next 2 pixels */
GBRG_BGR888(1, 1, 1)
BGGR_BGR888(1, 2, 1)
/* Skip 5th src byte with 4 x 2 least-significant-bits */
x++;
}
}
template<bool addAlphaByte>
void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[])
{
const int widthInBytes = window_.width * 5 / 4;
const uint8_t *prev = src[0];
const uint8_t *curr = src[1];
const uint8_t *next = src[2];
for (int x = 0; x < widthInBytes;) {
/* Even pixel */
RGGB_BGR888(2, 1, 1)
/* Odd pixel RGGB -> GRBG */
GRBG_BGR888(1, 1, 1)
/* Same thing for next 2 pixels */
RGGB_BGR888(1, 1, 1)
GRBG_BGR888(1, 2, 1)
/* Skip 5th src byte with 4 x 2 least-significant-bits */
x++;
}
}
static bool isStandardBayerOrder(BayerFormat::Order order)
{
return order == BayerFormat::BGGR || order == BayerFormat::GBRG ||
order == BayerFormat::GRBG || order == BayerFormat::RGGB;
}
/*
* Setup the Debayer object according to the passed in parameters.
* Return 0 on success, a negative errno value on failure
* (unsupported parameters).
*/
int DebayerCpu::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config)
{
BayerFormat bayerFormat =
BayerFormat::fromPixelFormat(inputFormat);
if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
bayerFormat.packing == BayerFormat::Packing::None &&
isStandardBayerOrder(bayerFormat.order)) {
config.bpp = (bayerFormat.bitDepth + 7) & ~7;
config.patternSize.width = 2;
config.patternSize.height = 2;
config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
formats::XRGB8888,
formats::ARGB8888,
formats::BGR888,
formats::XBGR8888,
formats::ABGR8888 });
return 0;
}
if (bayerFormat.bitDepth == 10 &&
bayerFormat.packing == BayerFormat::Packing::CSI2 &&
isStandardBayerOrder(bayerFormat.order)) {
config.bpp = 10;
config.patternSize.width = 4; /* 5 bytes per *4* pixels */
config.patternSize.height = 2;
config.outputFormats = std::vector<PixelFormat>({ formats::RGB888,
formats::XRGB8888,
formats::ARGB8888,
formats::BGR888,
formats::XBGR8888,
formats::ABGR8888 });
return 0;
}
LOG(Debayer, Info)
<< "Unsupported input format " << inputFormat.toString();
return -EINVAL;
}
int DebayerCpu::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config)
{
if (outputFormat == formats::RGB888 || outputFormat == formats::BGR888) {
config.bpp = 24;
return 0;
}
if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 ||
outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) {
config.bpp = 32;
return 0;
}
LOG(Debayer, Info)
<< "Unsupported output format " << outputFormat.toString();
return -EINVAL;
}
/*
* Check for standard Bayer orders and set xShift_ and swap debayer0/1, so that
* a single pair of BGGR debayer functions can be used for all 4 standard orders.
*/
int DebayerCpu::setupStandardBayerOrder(BayerFormat::Order order)
{
switch (order) {
case BayerFormat::BGGR:
break;
case BayerFormat::GBRG:
xShift_ = 1; /* BGGR -> GBRG */
break;
case BayerFormat::GRBG:
std::swap(debayer0_, debayer1_); /* BGGR -> GRBG */
break;
case BayerFormat::RGGB:
xShift_ = 1; /* BGGR -> GBRG */
std::swap(debayer0_, debayer1_); /* GBRG -> RGGB */
break;
default:
return -EINVAL;
}
return 0;
}
int DebayerCpu::setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat)
{
BayerFormat bayerFormat =
BayerFormat::fromPixelFormat(inputFormat);
bool addAlphaByte = false;
xShift_ = 0;
swapRedBlueGains_ = false;
auto invalidFmt = []() -> int {
LOG(Debayer, Error) << "Unsupported input output format combination";
return -EINVAL;
};
switch (outputFormat) {
case formats::XRGB8888:
case formats::ARGB8888:
addAlphaByte = true;
[[fallthrough]];
case formats::RGB888:
break;
case formats::XBGR8888:
case formats::ABGR8888:
addAlphaByte = true;
[[fallthrough]];
case formats::BGR888:
/* Swap R and B in bayer order to generate BGR888 instead of RGB888 */
swapRedBlueGains_ = true;
switch (bayerFormat.order) {
case BayerFormat::BGGR:
bayerFormat.order = BayerFormat::RGGB;
break;
case BayerFormat::GBRG:
bayerFormat.order = BayerFormat::GRBG;
break;
case BayerFormat::GRBG:
bayerFormat.order = BayerFormat::GBRG;
break;
case BayerFormat::RGGB:
bayerFormat.order = BayerFormat::BGGR;
break;
default:
return invalidFmt();
}
break;
default:
return invalidFmt();
}
if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10 || bayerFormat.bitDepth == 12) &&
bayerFormat.packing == BayerFormat::Packing::None &&
isStandardBayerOrder(bayerFormat.order)) {
switch (bayerFormat.bitDepth) {
case 8:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer8_BGBG_BGR888<true> : &DebayerCpu::debayer8_BGBG_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer8_GRGR_BGR888<true> : &DebayerCpu::debayer8_GRGR_BGR888<false>;
break;
case 10:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer10_BGBG_BGR888<true> : &DebayerCpu::debayer10_BGBG_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer10_GRGR_BGR888<true> : &DebayerCpu::debayer10_GRGR_BGR888<false>;
break;
case 12:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer12_BGBG_BGR888<true> : &DebayerCpu::debayer12_BGBG_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer12_GRGR_BGR888<true> : &DebayerCpu::debayer12_GRGR_BGR888<false>;
break;
}
setupStandardBayerOrder(bayerFormat.order);
return 0;
}
if (bayerFormat.bitDepth == 10 &&
bayerFormat.packing == BayerFormat::Packing::CSI2) {
switch (bayerFormat.order) {
case BayerFormat::BGGR:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
return 0;
case BayerFormat::GBRG:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
return 0;
case BayerFormat::GRBG:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_GRGR_BGR888<true> : &DebayerCpu::debayer10P_GRGR_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_BGBG_BGR888<true> : &DebayerCpu::debayer10P_BGBG_BGR888<false>;
return 0;
case BayerFormat::RGGB:
debayer0_ = addAlphaByte ? &DebayerCpu::debayer10P_RGRG_BGR888<true> : &DebayerCpu::debayer10P_RGRG_BGR888<false>;
debayer1_ = addAlphaByte ? &DebayerCpu::debayer10P_GBGB_BGR888<true> : &DebayerCpu::debayer10P_GBGB_BGR888<false>;
return 0;
default:
break;
}
}
return invalidFmt();
}
int DebayerCpu::configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
{
if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0)
return -EINVAL;
if (stats_->configure(inputCfg) != 0)
return -EINVAL;
const Size &statsPatternSize = stats_->patternSize();
if (inputConfig_.patternSize.width != statsPatternSize.width ||
inputConfig_.patternSize.height != statsPatternSize.height) {
LOG(Debayer, Error)
<< "mismatching stats and debayer pattern sizes for "
<< inputCfg.pixelFormat.toString();
return -EINVAL;
}
inputConfig_.stride = inputCfg.stride;
if (outputCfgs.size() != 1) {
LOG(Debayer, Error)
<< "Unsupported number of output streams: "
<< outputCfgs.size();
return -EINVAL;
}
const StreamConfiguration &outputCfg = outputCfgs[0];
SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size);
std::tie(outputConfig_.stride, outputConfig_.frameSize) =
strideAndFrameSize(outputCfg.pixelFormat, outputCfg.size);
if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) {
LOG(Debayer, Error)
<< "Invalid output size/stride: "
<< "\n " << outputCfg.size << " (" << outSizeRange << ")"
<< "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")";
return -EINVAL;
}
if (setDebayerFunctions(inputCfg.pixelFormat, outputCfg.pixelFormat) != 0)
return -EINVAL;
window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) &
~(inputConfig_.patternSize.width - 1);
window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) &
~(inputConfig_.patternSize.height - 1);
window_.width = outputCfg.size.width;
window_.height = outputCfg.size.height;
/* Don't pass x,y since process() already adjusts src before passing it */
stats_->setWindow(Rectangle(window_.size()));
/* pad with patternSize.Width on both left and right side */
lineBufferPadding_ = inputConfig_.patternSize.width * inputConfig_.bpp / 8;
lineBufferLength_ = window_.width * inputConfig_.bpp / 8 +
2 * lineBufferPadding_;
for (unsigned int i = 0;
i < (inputConfig_.patternSize.height + 1) && enableInputMemcpy_;
i++) {
free(lineBuffers_[i]);
lineBuffers_[i] = (uint8_t *)malloc(lineBufferLength_);
if (!lineBuffers_[i])
return -ENOMEM;
}
measuredFrames_ = 0;
frameProcessTime_ = 0;
return 0;
}
/*
* Get width and height at which the bayer-pattern repeats.
* Return pattern-size or an empty Size for an unsupported inputFormat.
*/
Size DebayerCpu::patternSize(PixelFormat inputFormat)
{
DebayerCpu::DebayerInputConfig config;
if (getInputConfig(inputFormat, config) != 0)
return {};
return config.patternSize;
}
std::vector<PixelFormat> DebayerCpu::formats(PixelFormat inputFormat)
{
DebayerCpu::DebayerInputConfig config;
if (getInputConfig(inputFormat, config) != 0)
return std::vector<PixelFormat>();
return config.outputFormats;
}
std::tuple<unsigned int, unsigned int>
DebayerCpu::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size)
{
DebayerCpu::DebayerOutputConfig config;
if (getOutputConfig(outputFormat, config) != 0)
return std::make_tuple(0, 0);
/* round up to multiple of 8 for 64 bits alignment */
unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7;
return std::make_tuple(stride, stride * size.height);
}
void DebayerCpu::setupInputMemcpy(const uint8_t *linePointers[])
{
const unsigned int patternHeight = inputConfig_.patternSize.height;
if (!enableInputMemcpy_)
return;
for (unsigned int i = 0; i < patternHeight; i++) {
memcpy(lineBuffers_[i], linePointers[i + 1] - lineBufferPadding_,
lineBufferLength_);
linePointers[i + 1] = lineBuffers_[i] + lineBufferPadding_;
}
/* Point lineBufferIndex_ to first unused lineBuffer */
lineBufferIndex_ = patternHeight;
}
void DebayerCpu::shiftLinePointers(const uint8_t *linePointers[], const uint8_t *src)
{
const unsigned int patternHeight = inputConfig_.patternSize.height;
for (unsigned int i = 0; i < patternHeight; i++)
linePointers[i] = linePointers[i + 1];
linePointers[patternHeight] = src +
(patternHeight / 2) * (int)inputConfig_.stride;
}
void DebayerCpu::memcpyNextLine(const uint8_t *linePointers[])
{
const unsigned int patternHeight = inputConfig_.patternSize.height;
if (!enableInputMemcpy_)
return;
memcpy(lineBuffers_[lineBufferIndex_], linePointers[patternHeight] - lineBufferPadding_,
lineBufferLength_);
linePointers[patternHeight] = lineBuffers_[lineBufferIndex_] + lineBufferPadding_;
lineBufferIndex_ = (lineBufferIndex_ + 1) % (patternHeight + 1);
}
void DebayerCpu::process2(const uint8_t *src, uint8_t *dst)
{
unsigned int yEnd = window_.y + window_.height;
/* Holds [0] previous- [1] current- [2] next-line */
const uint8_t *linePointers[3];
/* Adjust src to top left corner of the window */
src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
/* [x] becomes [x - 1] after initial shiftLinePointers() call */
if (window_.y) {
linePointers[1] = src - inputConfig_.stride; /* previous-line */
linePointers[2] = src;
} else {
/* window_.y == 0, use the next line as prev line */
linePointers[1] = src + inputConfig_.stride;
linePointers[2] = src;
/* Last 2 lines also need special handling */
yEnd -= 2;
}
setupInputMemcpy(linePointers);
for (unsigned int y = window_.y; y < yEnd; y += 2) {
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
stats_->processLine0(y, linePointers);
(this->*debayer0_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
(this->*debayer1_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
}
if (window_.y == 0) {
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
stats_->processLine0(yEnd, linePointers);
(this->*debayer0_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
shiftLinePointers(linePointers, src);
/* next line may point outside of src, use prev. */
linePointers[2] = linePointers[0];
(this->*debayer1_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
}
}
void DebayerCpu::process4(const uint8_t *src, uint8_t *dst)
{
const unsigned int yEnd = window_.y + window_.height;
/*
* This holds pointers to [0] 2-lines-up [1] 1-line-up [2] current-line
* [3] 1-line-down [4] 2-lines-down.
*/
const uint8_t *linePointers[5];
/* Adjust src to top left corner of the window */
src += window_.y * inputConfig_.stride + window_.x * inputConfig_.bpp / 8;
/* [x] becomes [x - 1] after initial shiftLinePointers() call */
linePointers[1] = src - 2 * inputConfig_.stride;
linePointers[2] = src - inputConfig_.stride;
linePointers[3] = src;
linePointers[4] = src + inputConfig_.stride;
setupInputMemcpy(linePointers);
for (unsigned int y = window_.y; y < yEnd; y += 4) {
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
stats_->processLine0(y, linePointers);
(this->*debayer0_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
(this->*debayer1_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
stats_->processLine2(y, linePointers);
(this->*debayer2_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
shiftLinePointers(linePointers, src);
memcpyNextLine(linePointers);
(this->*debayer3_)(dst, linePointers);
src += inputConfig_.stride;
dst += outputConfig_.stride;
}
}
static inline int64_t timeDiff(timespec &after, timespec &before)
{
return (after.tv_sec - before.tv_sec) * 1000000000LL +
(int64_t)after.tv_nsec - (int64_t)before.tv_nsec;
}
void DebayerCpu::process(FrameBuffer *input, FrameBuffer *output, DebayerParams params)
{
timespec frameStartTime;
if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) {
frameStartTime = {};
clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime);
}
green_ = params.green;
red_ = swapRedBlueGains_ ? params.blue : params.red;
blue_ = swapRedBlueGains_ ? params.red : params.blue;
/* Copy metadata from the input buffer */
FrameMetadata &metadata = output->_d()->metadata();
metadata.status = input->metadata().status;
metadata.sequence = input->metadata().sequence;
metadata.timestamp = input->metadata().timestamp;
MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read);
MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write);
if (!in.isValid() || !out.isValid()) {
LOG(Debayer, Error) << "mmap-ing buffer(s) failed";
metadata.status = FrameMetadata::FrameError;
return;
}
stats_->startFrame();
if (inputConfig_.patternSize.height == 2)
process2(in.planes()[0].data(), out.planes()[0].data());
else
process4(in.planes()[0].data(), out.planes()[0].data());
metadata.planes()[0].bytesused = out.planes()[0].size();
/* Measure before emitting signals */
if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure &&
++measuredFrames_ > DebayerCpu::kFramesToSkip) {
timespec frameEndTime = {};
clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime);
frameProcessTime_ += timeDiff(frameEndTime, frameStartTime);
if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) {
const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure -
DebayerCpu::kFramesToSkip;
LOG(Debayer, Info)
<< "Processed " << measuredFrames
<< " frames in " << frameProcessTime_ / 1000 << "us, "
<< frameProcessTime_ / (1000 * measuredFrames)
<< " us/frame";
}
}
stats_->finishFrame();
outputBufferReady.emit(output);
inputBufferReady.emit(input);
}
SizeRange DebayerCpu::sizes(PixelFormat inputFormat, const Size &inputSize)
{
Size patternSize = this->patternSize(inputFormat);
unsigned int borderHeight = patternSize.height;
if (patternSize.isNull())
return {};
/* No need for top/bottom border with a pattern height of 2 */
if (patternSize.height == 2)
borderHeight = 0;
/*
* For debayer interpolation a border is kept around the entire image
* and the minimum output size is pattern-height x pattern-width.
*/
if (inputSize.width < (3 * patternSize.width) ||
inputSize.height < (2 * borderHeight + patternSize.height)) {
LOG(Debayer, Warning)
<< "Input format size too small: " << inputSize.toString();
return {};
}
return SizeRange(Size(patternSize.width, patternSize.height),
Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1),
(inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)),
patternSize.width, patternSize.height);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/software_isp/swstats_cpu.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Linaro Ltd
* Copyright (C) 2023, Red Hat Inc.
*
* Authors:
* Hans de Goede <[email protected]>
*
* CPU based software statistics implementation
*/
#include "swstats_cpu.h"
#include <libcamera/base/log.h>
#include <libcamera/stream.h>
#include "libcamera/internal/bayer_format.h"
namespace libcamera {
/**
* \class SwStatsCpu
* \brief Class for gathering statistics on the CPU
*
* CPU based software ISP statistics implementation.
*
* This class offers a configure function + functions to gather statistics on a
* line by line basis. This allows CPU based software debayering to interleave
* debayering and statistics gathering on a line by line basis while the input
* data is still hot in the cache.
*
* It is also possible to specify a window over which to gather statistics
* instead of processing the whole frame.
*/
/**
* \fn bool SwStatsCpu::isValid() const
* \brief Gets whether the statistics object is valid
*
* \return True if it's valid, false otherwise
*/
/**
* \fn const SharedFD &SwStatsCpu::getStatsFD()
* \brief Get the file descriptor for the statistics
*
* \return The file descriptor
*/
/**
* \fn const Size &SwStatsCpu::patternSize()
* \brief Get the pattern size
*
* For some input-formats, e.g. Bayer data, processing is done multiple lines
* and/or columns at a time. Get width and height at which the (bayer) pattern
* repeats. Window values are rounded down to a multiple of this and the height
* also indicates if processLine2() should be called or not.
* This may only be called after a successful configure() call.
*
* \return The pattern size
*/
/**
* \fn void SwStatsCpu::processLine0(unsigned int y, const uint8_t *src[])
* \brief Process line 0
* \param[in] y The y coordinate.
* \param[in] src The input data.
*
* This function processes line 0 for input formats with
* patternSize height == 1.
* It'll process line 0 and 1 for input formats with patternSize height >= 2.
* This function may only be called after a successful setWindow() call.
*/
/**
* \fn void SwStatsCpu::processLine2(unsigned int y, const uint8_t *src[])
* \brief Process line 2 and 3
* \param[in] y The y coordinate.
* \param[in] src The input data.
*
* This function processes line 2 and 3 for input formats with
* patternSize height == 4.
* This function may only be called after a successful setWindow() call.
*/
/**
* \var Signal<> SwStatsCpu::statsReady
* \brief Signals that the statistics are ready
*/
/**
* \typedef SwStatsCpu::statsProcessFn
* \brief Called when there is data to get statistics from
* \param[in] src The input data
*
* These functions take an array of (patternSize_.height + 1) src
* pointers each pointing to a line in the source image. The middle
* element of the array will point to the actual line being processed.
* Earlier element(s) will point to the previous line(s) and later
* element(s) to the next line(s).
*
* See the documentation of DebayerCpu::debayerFn for more details.
*/
/**
* \var unsigned int SwStatsCpu::ySkipMask_
* \brief Skip lines where this bitmask is set in y
*/
/**
* \var Rectangle SwStatsCpu::window_
* \brief Statistics window, set by setWindow(), used every line
*/
/**
* \var Size SwStatsCpu::patternSize_
* \brief The size of the bayer pattern
*
* Valid sizes are: 2x2, 4x2 or 4x4.
*/
/**
* \var unsigned int SwStatsCpu::xShift_
* \brief The offset of x, applied to window_.x for bayer variants
*
* This can either be 0 or 1.
*/
LOG_DEFINE_CATEGORY(SwStatsCpu)
SwStatsCpu::SwStatsCpu()
: sharedStats_("softIsp_stats")
{
if (!sharedStats_)
LOG(SwStatsCpu, Error)
<< "Failed to create shared memory for statistics";
}
static constexpr unsigned int kRedYMul = 77; /* 0.299 * 256 */
static constexpr unsigned int kGreenYMul = 150; /* 0.587 * 256 */
static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */
#define SWSTATS_START_LINE_STATS(pixel_t) \
pixel_t r, g, g2, b; \
uint64_t yVal; \
\
uint64_t sumR = 0; \
uint64_t sumG = 0; \
uint64_t sumB = 0;
#define SWSTATS_ACCUMULATE_LINE_STATS(div) \
sumR += r; \
sumG += g; \
sumB += b; \
\
yVal = r * kRedYMul; \
yVal += g * kGreenYMul; \
yVal += b * kBlueYMul; \
stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++;
#define SWSTATS_FINISH_LINE_STATS() \
stats_.sumR_ += sumR; \
stats_.sumG_ += sumG; \
stats_.sumB_ += sumB;
void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[])
{
const uint8_t *src0 = src[1] + window_.x;
const uint8_t *src1 = src[2] + window_.x;
SWSTATS_START_LINE_STATS(uint8_t)
if (swapLines_)
std::swap(src0, src1);
/* x += 4 sample every other 2x2 block */
for (int x = 0; x < (int)window_.width; x += 4) {
b = src0[x];
g = src0[x + 1];
g2 = src1[x];
r = src1[x + 1];
g = (g + g2) / 2;
SWSTATS_ACCUMULATE_LINE_STATS(1)
}
SWSTATS_FINISH_LINE_STATS()
}
void SwStatsCpu::statsBGGR10Line0(const uint8_t *src[])
{
const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
SWSTATS_START_LINE_STATS(uint16_t)
if (swapLines_)
std::swap(src0, src1);
/* x += 4 sample every other 2x2 block */
for (int x = 0; x < (int)window_.width; x += 4) {
b = src0[x];
g = src0[x + 1];
g2 = src1[x];
r = src1[x + 1];
g = (g + g2) / 2;
/* divide Y by 4 for 10 -> 8 bpp value */
SWSTATS_ACCUMULATE_LINE_STATS(4)
}
SWSTATS_FINISH_LINE_STATS()
}
void SwStatsCpu::statsBGGR12Line0(const uint8_t *src[])
{
const uint16_t *src0 = (const uint16_t *)src[1] + window_.x;
const uint16_t *src1 = (const uint16_t *)src[2] + window_.x;
SWSTATS_START_LINE_STATS(uint16_t)
if (swapLines_)
std::swap(src0, src1);
/* x += 4 sample every other 2x2 block */
for (int x = 0; x < (int)window_.width; x += 4) {
b = src0[x];
g = src0[x + 1];
g2 = src1[x];
r = src1[x + 1];
g = (g + g2) / 2;
/* divide Y by 16 for 12 -> 8 bpp value */
SWSTATS_ACCUMULATE_LINE_STATS(16)
}
SWSTATS_FINISH_LINE_STATS()
}
void SwStatsCpu::statsBGGR10PLine0(const uint8_t *src[])
{
const uint8_t *src0 = src[1] + window_.x * 5 / 4;
const uint8_t *src1 = src[2] + window_.x * 5 / 4;
const int widthInBytes = window_.width * 5 / 4;
if (swapLines_)
std::swap(src0, src1);
SWSTATS_START_LINE_STATS(uint8_t)
/* x += 5 sample every other 2x2 block */
for (int x = 0; x < widthInBytes; x += 5) {
/* BGGR */
b = src0[x];
g = src0[x + 1];
g2 = src1[x];
r = src1[x + 1];
g = (g + g2) / 2;
/* Data is already 8 bits, divide by 1 */
SWSTATS_ACCUMULATE_LINE_STATS(1)
}
SWSTATS_FINISH_LINE_STATS()
}
void SwStatsCpu::statsGBRG10PLine0(const uint8_t *src[])
{
const uint8_t *src0 = src[1] + window_.x * 5 / 4;
const uint8_t *src1 = src[2] + window_.x * 5 / 4;
const int widthInBytes = window_.width * 5 / 4;
if (swapLines_)
std::swap(src0, src1);
SWSTATS_START_LINE_STATS(uint8_t)
/* x += 5 sample every other 2x2 block */
for (int x = 0; x < widthInBytes; x += 5) {
/* GBRG */
g = src0[x];
b = src0[x + 1];
r = src1[x];
g2 = src1[x + 1];
g = (g + g2) / 2;
/* Data is already 8 bits, divide by 1 */
SWSTATS_ACCUMULATE_LINE_STATS(1)
}
SWSTATS_FINISH_LINE_STATS()
}
/**
* \brief Reset state to start statistics gathering for a new frame
*
* This may only be called after a successful setWindow() call.
*/
void SwStatsCpu::startFrame(void)
{
if (window_.width == 0)
LOG(SwStatsCpu, Error) << "Calling startFrame() without setWindow()";
stats_.sumR_ = 0;
stats_.sumB_ = 0;
stats_.sumG_ = 0;
stats_.yHistogram.fill(0);
}
/**
* \brief Finish statistics calculation for the current frame
*
* This may only be called after a successful setWindow() call.
*/
void SwStatsCpu::finishFrame(void)
{
*sharedStats_ = stats_;
statsReady.emit();
}
/**
* \brief Setup SwStatsCpu object for standard Bayer orders
* \param[in] order The Bayer order
*
* Check if order is a standard Bayer order and setup xShift_ and swapLines_
* so that a single BGGR stats function can be used for all 4 standard orders.
*/
int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order)
{
switch (order) {
case BayerFormat::BGGR:
xShift_ = 0;
swapLines_ = false;
break;
case BayerFormat::GBRG:
xShift_ = 1; /* BGGR -> GBRG */
swapLines_ = false;
break;
case BayerFormat::GRBG:
xShift_ = 0;
swapLines_ = true; /* BGGR -> GRBG */
break;
case BayerFormat::RGGB:
xShift_ = 1; /* BGGR -> GBRG */
swapLines_ = true; /* GBRG -> RGGB */
break;
default:
return -EINVAL;
}
patternSize_.height = 2;
patternSize_.width = 2;
ySkipMask_ = 0x02; /* Skip every 3th and 4th line */
return 0;
}
/**
* \brief Configure the statistics object for the passed in input format
* \param[in] inputCfg The input format
*
* \return 0 on success, a negative errno value on failure
*/
int SwStatsCpu::configure(const StreamConfiguration &inputCfg)
{
BayerFormat bayerFormat =
BayerFormat::fromPixelFormat(inputCfg.pixelFormat);
if (bayerFormat.packing == BayerFormat::Packing::None &&
setupStandardBayerOrder(bayerFormat.order) == 0) {
switch (bayerFormat.bitDepth) {
case 8:
stats0_ = &SwStatsCpu::statsBGGR8Line0;
return 0;
case 10:
stats0_ = &SwStatsCpu::statsBGGR10Line0;
return 0;
case 12:
stats0_ = &SwStatsCpu::statsBGGR12Line0;
return 0;
}
}
if (bayerFormat.bitDepth == 10 &&
bayerFormat.packing == BayerFormat::Packing::CSI2) {
patternSize_.height = 2;
patternSize_.width = 4; /* 5 bytes per *4* pixels */
/* Skip every 3th and 4th line, sample every other 2x2 block */
ySkipMask_ = 0x02;
xShift_ = 0;
switch (bayerFormat.order) {
case BayerFormat::BGGR:
case BayerFormat::GRBG:
stats0_ = &SwStatsCpu::statsBGGR10PLine0;
swapLines_ = bayerFormat.order == BayerFormat::GRBG;
return 0;
case BayerFormat::GBRG:
case BayerFormat::RGGB:
stats0_ = &SwStatsCpu::statsGBRG10PLine0;
swapLines_ = bayerFormat.order == BayerFormat::RGGB;
return 0;
default:
break;
}
}
LOG(SwStatsCpu, Info)
<< "Unsupported input format " << inputCfg.pixelFormat.toString();
return -EINVAL;
}
/**
* \brief Specify window coordinates over which to gather statistics
* \param[in] window The window object.
*/
void SwStatsCpu::setWindow(const Rectangle &window)
{
window_ = window;
window_.x &= ~(patternSize_.width - 1);
window_.x += xShift_;
window_.y &= ~(patternSize_.height - 1);
/* width_ - xShift_ to make sure the window fits */
window_.width -= xShift_;
window_.width &= ~(patternSize_.width - 1);
window_.height &= ~(patternSize_.height - 1);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/converter/converter_v4l2_m2m.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart
* Copyright 2022 NXP
*
* V4L2 M2M Format converter
*/
#include "libcamera/internal/converter/converter_v4l2_m2m.h"
#include <algorithm>
#include <limits.h>
#include <libcamera/base/log.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/utils.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_videodevice.h"
/**
* \file internal/converter/converter_v4l2_m2m.h
* \brief V4L2 M2M based converter
*/
namespace libcamera {
LOG_DECLARE_CATEGORY(Converter)
/* -----------------------------------------------------------------------------
* V4L2M2MConverter::Stream
*/
V4L2M2MConverter::Stream::Stream(V4L2M2MConverter *converter, unsigned int index)
: converter_(converter), index_(index)
{
m2m_ = std::make_unique<V4L2M2MDevice>(converter->deviceNode());
m2m_->output()->bufferReady.connect(this, &Stream::outputBufferReady);
m2m_->capture()->bufferReady.connect(this, &Stream::captureBufferReady);
int ret = m2m_->open();
if (ret < 0)
m2m_.reset();
}
int V4L2M2MConverter::Stream::configure(const StreamConfiguration &inputCfg,
const StreamConfiguration &outputCfg)
{
V4L2PixelFormat videoFormat =
m2m_->output()->toV4L2PixelFormat(inputCfg.pixelFormat);
V4L2DeviceFormat format;
format.fourcc = videoFormat;
format.size = inputCfg.size;
format.planesCount = 1;
format.planes[0].bpl = inputCfg.stride;
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set input format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != inputCfg.size ||
format.planes[0].bpl != inputCfg.stride) {
LOG(Converter, Error)
<< "Input format not supported (requested "
<< inputCfg.size << "-" << videoFormat
<< ", got " << format << ")";
return -EINVAL;
}
/* Set the pixel format and size on the output. */
videoFormat = m2m_->capture()->toV4L2PixelFormat(outputCfg.pixelFormat);
format = {};
format.fourcc = videoFormat;
format.size = outputCfg.size;
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set output format: " << strerror(-ret);
return ret;
}
if (format.fourcc != videoFormat || format.size != outputCfg.size) {
LOG(Converter, Error)
<< "Output format not supported";
return -EINVAL;
}
inputBufferCount_ = inputCfg.bufferCount;
outputBufferCount_ = outputCfg.bufferCount;
return 0;
}
int V4L2M2MConverter::Stream::exportBuffers(unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return m2m_->capture()->exportBuffers(count, buffers);
}
int V4L2M2MConverter::Stream::start()
{
int ret = m2m_->output()->importBuffers(inputBufferCount_);
if (ret < 0)
return ret;
ret = m2m_->capture()->importBuffers(outputBufferCount_);
if (ret < 0) {
stop();
return ret;
}
ret = m2m_->output()->streamOn();
if (ret < 0) {
stop();
return ret;
}
ret = m2m_->capture()->streamOn();
if (ret < 0) {
stop();
return ret;
}
return 0;
}
void V4L2M2MConverter::Stream::stop()
{
m2m_->capture()->streamOff();
m2m_->output()->streamOff();
m2m_->capture()->releaseBuffers();
m2m_->output()->releaseBuffers();
}
int V4L2M2MConverter::Stream::queueBuffers(FrameBuffer *input, FrameBuffer *output)
{
int ret = m2m_->output()->queueBuffer(input);
if (ret < 0)
return ret;
ret = m2m_->capture()->queueBuffer(output);
if (ret < 0)
return ret;
return 0;
}
std::string V4L2M2MConverter::Stream::logPrefix() const
{
return "stream" + std::to_string(index_);
}
void V4L2M2MConverter::Stream::outputBufferReady(FrameBuffer *buffer)
{
auto it = converter_->queue_.find(buffer);
if (it == converter_->queue_.end())
return;
if (!--it->second) {
converter_->inputBufferReady.emit(buffer);
converter_->queue_.erase(it);
}
}
void V4L2M2MConverter::Stream::captureBufferReady(FrameBuffer *buffer)
{
converter_->outputBufferReady.emit(buffer);
}
/* -----------------------------------------------------------------------------
* V4L2M2MConverter
*/
/**
* \class libcamera::V4L2M2MConverter
* \brief The V4L2 M2M converter implements the converter interface based on
* V4L2 M2M device.
*/
/**
* \fn V4L2M2MConverter::V4L2M2MConverter
* \brief Construct a V4L2M2MConverter instance
* \param[in] media The media device implementing the converter
*/
V4L2M2MConverter::V4L2M2MConverter(MediaDevice *media)
: Converter(media)
{
if (deviceNode().empty())
return;
m2m_ = std::make_unique<V4L2M2MDevice>(deviceNode());
int ret = m2m_->open();
if (ret < 0) {
m2m_.reset();
return;
}
}
/**
* \fn libcamera::V4L2M2MConverter::loadConfiguration
* \details \copydetails libcamera::Converter::loadConfiguration
*/
/**
* \fn libcamera::V4L2M2MConverter::isValid
* \details \copydetails libcamera::Converter::isValid
*/
/**
* \fn libcamera::V4L2M2MConverter::formats
* \details \copydetails libcamera::Converter::formats
*/
std::vector<PixelFormat> V4L2M2MConverter::formats(PixelFormat input)
{
if (!m2m_)
return {};
/*
* Set the format on the input side (V4L2 output) of the converter to
* enumerate the conversion capabilities on its output (V4L2 capture).
*/
V4L2DeviceFormat v4l2Format;
v4l2Format.fourcc = m2m_->output()->toV4L2PixelFormat(input);
v4l2Format.size = { 1, 1 };
int ret = m2m_->output()->setFormat(&v4l2Format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
if (v4l2Format.fourcc != m2m_->output()->toV4L2PixelFormat(input)) {
LOG(Converter, Debug)
<< "Input format " << input << " not supported.";
return {};
}
std::vector<PixelFormat> pixelFormats;
for (const auto &format : m2m_->capture()->formats()) {
PixelFormat pixelFormat = format.first.toPixelFormat();
if (pixelFormat)
pixelFormats.push_back(pixelFormat);
}
return pixelFormats;
}
/**
* \copydoc libcamera::Converter::sizes
*/
SizeRange V4L2M2MConverter::sizes(const Size &input)
{
if (!m2m_)
return {};
/*
* Set the size on the input side (V4L2 output) of the converter to
* enumerate the scaling capabilities on its output (V4L2 capture).
*/
V4L2DeviceFormat format;
format.fourcc = V4L2PixelFormat();
format.size = input;
int ret = m2m_->output()->setFormat(&format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
SizeRange sizes;
format.size = { 1, 1 };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
sizes.min = format.size;
format.size = { UINT_MAX, UINT_MAX };
ret = m2m_->capture()->setFormat(&format);
if (ret < 0) {
LOG(Converter, Error)
<< "Failed to set format: " << strerror(-ret);
return {};
}
sizes.max = format.size;
return sizes;
}
/**
* \copydoc libcamera::Converter::strideAndFrameSize
*/
std::tuple<unsigned int, unsigned int>
V4L2M2MConverter::strideAndFrameSize(const PixelFormat &pixelFormat,
const Size &size)
{
V4L2DeviceFormat format;
format.fourcc = m2m_->capture()->toV4L2PixelFormat(pixelFormat);
format.size = size;
int ret = m2m_->capture()->tryFormat(&format);
if (ret < 0)
return std::make_tuple(0, 0);
return std::make_tuple(format.planes[0].bpl, format.planes[0].size);
}
/**
* \copydoc libcamera::Converter::configure
*/
int V4L2M2MConverter::configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs)
{
int ret = 0;
streams_.clear();
streams_.reserve(outputCfgs.size());
for (unsigned int i = 0; i < outputCfgs.size(); ++i) {
Stream &stream = streams_.emplace_back(this, i);
if (!stream.isValid()) {
LOG(Converter, Error)
<< "Failed to create stream " << i;
ret = -EINVAL;
break;
}
ret = stream.configure(inputCfg, outputCfgs[i]);
if (ret < 0)
break;
}
if (ret < 0) {
streams_.clear();
return ret;
}
return 0;
}
/**
* \copydoc libcamera::Converter::exportBuffers
*/
int V4L2M2MConverter::exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
if (output >= streams_.size())
return -EINVAL;
return streams_[output].exportBuffers(count, buffers);
}
/**
* \copydoc libcamera::Converter::start
*/
int V4L2M2MConverter::start()
{
int ret;
for (Stream &stream : streams_) {
ret = stream.start();
if (ret < 0) {
stop();
return ret;
}
}
return 0;
}
/**
* \copydoc libcamera::Converter::stop
*/
void V4L2M2MConverter::stop()
{
for (Stream &stream : utils::reverse(streams_))
stream.stop();
}
/**
* \copydoc libcamera::Converter::queueBuffers
*/
int V4L2M2MConverter::queueBuffers(FrameBuffer *input,
const std::map<unsigned int, FrameBuffer *> &outputs)
{
unsigned int mask = 0;
int ret;
/*
* Validate the outputs as a sanity check: at least one output is
* required, all outputs must reference a valid stream and no two
* outputs can reference the same stream.
*/
if (outputs.empty())
return -EINVAL;
for (auto [index, buffer] : outputs) {
if (!buffer)
return -EINVAL;
if (index >= streams_.size())
return -EINVAL;
if (mask & (1 << index))
return -EINVAL;
mask |= 1 << index;
}
/* Queue the input and output buffers to all the streams. */
for (auto [index, buffer] : outputs) {
ret = streams_[index].queueBuffers(input, buffer);
if (ret < 0)
return ret;
}
/*
* Add the input buffer to the queue, with the number of streams as a
* reference count. Completion of the input buffer will be signalled by
* the stream that releases the last reference.
*/
queue_.emplace(std::piecewise_construct,
std::forward_as_tuple(input),
std::forward_as_tuple(outputs.size()));
return 0;
}
static std::initializer_list<std::string> compatibles = {
"mtk-mdp",
"pxp",
};
REGISTER_CONVERTER("v4l2_m2m", V4L2M2MConverter, compatibles)
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/sensor/camera_sensor_properties.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Database of camera sensor properties
*/
#include "libcamera/internal/camera_sensor_properties.h"
#include <map>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h>
/**
* \file camera_sensor_properties.h
* \brief Database of camera sensor properties
*
* The database of camera sensor properties collects static information about
* camera sensors that is not possible or desirable to retrieve from the device
* at run time.
*
* The database is indexed using the camera sensor model, as reported by the
* properties::Model property, and for each supported sensor it contains a
* list of properties.
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(CameraSensorProperties)
/**
* \struct CameraSensorProperties
* \brief Database of camera sensor properties
*
* \var CameraSensorProperties::unitCellSize
* \brief The physical size of a pixel, including pixel edges, in nanometers.
*
* \var CameraSensorProperties::testPatternModes
* \brief Map that associates the TestPattern control value with the indexes of
* the corresponding sensor test pattern modes as returned by
* V4L2_CID_TEST_PATTERN.
*/
/**
* \brief Retrieve the properties associated with a sensor
* \param sensor The sensor model name as reported by properties::Model
* \return A pointer to the CameraSensorProperties instance associated with a sensor
* or nullptr if the sensor is not supported
*/
const CameraSensorProperties *CameraSensorProperties::get(const std::string &sensor)
{
static const std::map<std::string, const CameraSensorProperties> sensorProps = {
{ "ar0521", {
.unitCellSize = { 2200, 2200 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeSolidColor, 1 },
{ controls::draft::TestPatternModeColorBars, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
},
} },
{ "hi846", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeSolidColor, 1 },
{ controls::draft::TestPatternModeColorBars, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
/*
* No corresponding test pattern mode for:
* 5: "Gradient Horizontal"
* 6: "Gradient Vertical"
* 7: "Check Board"
* 8: "Slant Pattern"
* 9: "Resolution Pattern"
*/
},
} },
{ "imx219", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
{ controls::draft::TestPatternModeSolidColor, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
} },
{ "imx258", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeSolidColor, 1 },
{ controls::draft::TestPatternModeColorBars, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
} },
{ "imx283", {
.unitCellSize = { 2400, 2400 },
.testPatternModes = {},
} },
{ "imx290", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
} },
{ "imx296", {
.unitCellSize = { 3450, 3450 },
.testPatternModes = {},
} },
{ "imx327", {
.unitCellSize = { 2900, 2900 },
.testPatternModes = {},
} },
{ "imx335", {
.unitCellSize = { 2000, 2000 },
.testPatternModes = {},
} },
{ "imx415", {
.unitCellSize = { 1450, 1450 },
.testPatternModes = {},
} },
{ "imx477", {
.unitCellSize = { 1550, 1550 },
.testPatternModes = {},
} },
{ "imx519", {
.unitCellSize = { 1220, 1220 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeSolidColor, 2 },
{ controls::draft::TestPatternModePn9, 4 },
/*
* The driver reports ColorBars and ColorBarsFadeToGray as well but
* these two patterns do not comply with MIPI CCS v1.1 (Section 10.1).
*/
},
} },
{ "imx708", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
{ controls::draft::TestPatternModeSolidColor, 2 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 3 },
{ controls::draft::TestPatternModePn9, 4 },
},
} },
{ "ov2685", {
.unitCellSize = { 1750, 1750 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1},
{ controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
/*
* No corresponding test pattern mode for:
* 3: "Random Data"
* 4: "Black White Square"
* 5: "Color Square"
*/
},
} },
{ "ov2740", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1},
},
} },
{ "ov4689", {
.unitCellSize = { 2000, 2000 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1},
{ controls::draft::TestPatternModeColorBarsFadeToGray, 2},
/*
* No corresponding test patterns in
* MIPI CCS specification for sensor's
* colorBarType2 and colorBarType3.
*/
},
} },
{ "ov5640", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
} },
{ "ov5647", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {},
} },
{ "ov5670", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
} },
{ "ov5675", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
} },
{ "ov5693", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 2 },
/*
* No corresponding test pattern mode for
* 1: "Random data" and 3: "Colour Bars with
* Rolling Bar".
*/
},
} },
{ "ov64a40", {
.unitCellSize = { 1008, 1008 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
/*
* No corresponding test patter mode
* 3: "Vertical Color Bar Type 3",
* 4: "Vertical Color Bar Type 4"
*/
},
} },
{ "ov8858", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
{ controls::draft::TestPatternModeColorBarsFadeToGray, 2 },
/*
* No corresponding test patter mode
* 3: "Vertical Color Bar Type 3",
* 4: "Vertical Color Bar Type 4"
*/
},
} },
{ "ov8865", {
.unitCellSize = { 1400, 1400 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 2 },
/*
* No corresponding test pattern mode for:
* 1: "Random data"
* 3: "Color bars with rolling bar"
* 4: "Color squares"
* 5: "Color squares with rolling bar"
*/
},
} },
{ "ov13858", {
.unitCellSize = { 1120, 1120 },
.testPatternModes = {
{ controls::draft::TestPatternModeOff, 0 },
{ controls::draft::TestPatternModeColorBars, 1 },
},
} },
};
const auto it = sensorProps.find(sensor);
if (it == sensorProps.end()) {
LOG(CameraSensorProperties, Warning)
<< "No static properties available for '" << sensor << "'";
LOG(CameraSensorProperties, Warning)
<< "Please consider updating the camera sensor properties database";
return nullptr;
}
return &it->second;
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/sensor/camera_sensor.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* A camera sensor
*/
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include <algorithm>
#include <float.h>
#include <iomanip>
#include <limits.h>
#include <math.h>
#include <string.h>
#include <libcamera/camera.h>
#include <libcamera/orientation.h>
#include <libcamera/property_ids.h>
#include <libcamera/base/utils.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/camera_sensor_properties.h"
#include "libcamera/internal/formats.h"
#include "libcamera/internal/sysfs.h"
/**
* \file camera_sensor.h
* \brief A camera sensor
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(CameraSensor)
/**
* \class CameraSensor
* \brief A camera sensor based on V4L2 subdevices
*
* The CameraSensor class eases handling of sensors for pipeline handlers by
* hiding the details of the V4L2 subdevice kernel API and caching sensor
* information.
*
* The implementation is currently limited to sensors that expose a single V4L2
* subdevice with a single pad. It will be extended to support more complex
* devices as the needs arise.
*/
/**
* \brief Construct a CameraSensor
* \param[in] entity The media entity backing the camera sensor
*
* Once constructed the instance must be initialized with init().
*/
CameraSensor::CameraSensor(const MediaEntity *entity)
: entity_(entity), pad_(UINT_MAX), staticProps_(nullptr),
bayerFormat_(nullptr), supportFlips_(false),
flipsAlterBayerOrder_(false), properties_(properties::properties)
{
}
/**
* \brief Destroy a CameraSensor
*/
CameraSensor::~CameraSensor()
{
}
/**
* \brief Initialize the camera sensor instance
*
* This function performs the initialisation steps of the CameraSensor that may
* fail. It shall be called once and only once after constructing the instance.
*
* \return 0 on success or a negative error code otherwise
*/
int CameraSensor::init()
{
for (const MediaPad *pad : entity_->pads()) {
if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
pad_ = pad->index();
break;
}
}
if (pad_ == UINT_MAX) {
LOG(CameraSensor, Error)
<< "Sensors with more than one pad are not supported";
return -EINVAL;
}
switch (entity_->function()) {
case MEDIA_ENT_F_CAM_SENSOR:
case MEDIA_ENT_F_PROC_VIDEO_ISP:
break;
default:
LOG(CameraSensor, Error)
<< "Invalid sensor function "
<< utils::hex(entity_->function());
return -EINVAL;
}
/* Create and open the subdev. */
subdev_ = std::make_unique<V4L2Subdevice>(entity_);
int ret = subdev_->open();
if (ret < 0)
return ret;
/*
* Clear any flips to be sure we get the "native" Bayer order. This is
* harmless for sensors where the flips don't affect the Bayer order.
*/
ControlList ctrls(subdev_->controls());
if (subdev_->controls().find(V4L2_CID_HFLIP) != subdev_->controls().end())
ctrls.set(V4L2_CID_HFLIP, 0);
if (subdev_->controls().find(V4L2_CID_VFLIP) != subdev_->controls().end())
ctrls.set(V4L2_CID_VFLIP, 0);
subdev_->setControls(&ctrls);
/* Enumerate, sort and cache media bus codes and sizes. */
formats_ = subdev_->formats(pad_);
if (formats_.empty()) {
LOG(CameraSensor, Error) << "No image format found";
return -EINVAL;
}
mbusCodes_ = utils::map_keys(formats_);
std::sort(mbusCodes_.begin(), mbusCodes_.end());
for (const auto &format : formats_) {
const std::vector<SizeRange> &ranges = format.second;
std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes_),
[](const SizeRange &range) { return range.max; });
}
std::sort(sizes_.begin(), sizes_.end());
/* Remove duplicates. */
auto last = std::unique(sizes_.begin(), sizes_.end());
sizes_.erase(last, sizes_.end());
/*
* VIMC is a bit special, as it does not yet support all the mandatory
* requirements regular sensors have to respect.
*
* Do not validate the driver if it's VIMC and initialize the sensor
* properties with static information.
*
* \todo Remove the special case once the VIMC driver has been
* updated in all test platforms.
*/
if (entity_->device()->driver() == "vimc") {
initVimcDefaultProperties();
ret = initProperties();
if (ret)
return ret;
return discoverAncillaryDevices();
}
/* Get the color filter array pattern (only for RAW sensors). */
for (unsigned int mbusCode : mbusCodes_) {
const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(mbusCode);
if (bayerFormat.isValid()) {
bayerFormat_ = &bayerFormat;
break;
}
}
ret = validateSensorDriver();
if (ret)
return ret;
ret = initProperties();
if (ret)
return ret;
ret = discoverAncillaryDevices();
if (ret)
return ret;
/*
* Set HBLANK to the minimum to start with a well-defined line length,
* allowing IPA modules that do not modify HBLANK to use the sensor
* minimum line length in their calculations.
*/
const struct v4l2_query_ext_ctrl *hblankInfo = subdev_->controlInfo(V4L2_CID_HBLANK);
if (hblankInfo && !(hblankInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
ControlList ctrl(subdev_->controls());
ctrl.set(V4L2_CID_HBLANK, static_cast<int32_t>(hblankInfo->minimum));
ret = subdev_->setControls(&ctrl);
if (ret)
return ret;
}
return applyTestPatternMode(controls::draft::TestPatternModeEnum::TestPatternModeOff);
}
int CameraSensor::generateId()
{
const std::string devPath = subdev_->devicePath();
/* Try to get ID from firmware description. */
id_ = sysfs::firmwareNodePath(devPath);
if (!id_.empty())
return 0;
/*
* Virtual sensors not described in firmware
*
* Verify it's a platform device and construct ID from the device path
* and model of sensor.
*/
if (devPath.find("/sys/devices/platform/", 0) == 0) {
id_ = devPath.substr(strlen("/sys/devices/")) + " " + model();
return 0;
}
LOG(CameraSensor, Error) << "Can't generate sensor ID";
return -EINVAL;
}
int CameraSensor::validateSensorDriver()
{
int err = 0;
/*
* Optional controls are used to register optional sensor properties. If
* not present, some values will be defaulted.
*/
static constexpr uint32_t optionalControls[] = {
V4L2_CID_CAMERA_SENSOR_ROTATION,
};
const ControlIdMap &controls = subdev_->controls().idmap();
for (uint32_t ctrl : optionalControls) {
if (!controls.count(ctrl))
LOG(CameraSensor, Debug)
<< "Optional V4L2 control " << utils::hex(ctrl)
<< " not supported";
}
/*
* Recommended controls are similar to optional controls, but will
* become mandatory in the near future. Be loud if they're missing.
*/
static constexpr uint32_t recommendedControls[] = {
V4L2_CID_CAMERA_ORIENTATION,
};
for (uint32_t ctrl : recommendedControls) {
if (!controls.count(ctrl)) {
LOG(CameraSensor, Warning)
<< "Recommended V4L2 control " << utils::hex(ctrl)
<< " not supported";
err = -EINVAL;
}
}
/*
* Verify if sensor supports horizontal/vertical flips
*
* \todo Handle horizontal and vertical flips independently.
*/
const struct v4l2_query_ext_ctrl *hflipInfo = subdev_->controlInfo(V4L2_CID_HFLIP);
const struct v4l2_query_ext_ctrl *vflipInfo = subdev_->controlInfo(V4L2_CID_VFLIP);
if (hflipInfo && !(hflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY) &&
vflipInfo && !(vflipInfo->flags & V4L2_CTRL_FLAG_READ_ONLY)) {
supportFlips_ = true;
if (hflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT ||
vflipInfo->flags & V4L2_CTRL_FLAG_MODIFY_LAYOUT)
flipsAlterBayerOrder_ = true;
}
if (!supportFlips_)
LOG(CameraSensor, Debug)
<< "Camera sensor does not support horizontal/vertical flip";
/*
* Make sure the required selection targets are supported.
*
* Failures in reading any of the targets are not deemed to be fatal,
* but some properties and features, like constructing a
* IPACameraSensorInfo for the IPA module, won't be supported.
*
* \todo Make support for selection targets mandatory as soon as all
* test platforms have been updated.
*/
Rectangle rect;
int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_BOUNDS, &rect);
if (ret) {
/*
* Default the pixel array size to the largest size supported
* by the sensor. The sizes_ vector is sorted in ascending
* order, the largest size is thus the last element.
*/
pixelArraySize_ = sizes_.back();
LOG(CameraSensor, Warning)
<< "The PixelArraySize property has been defaulted to "
<< pixelArraySize_;
err = -EINVAL;
} else {
pixelArraySize_ = rect.size();
}
ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP_DEFAULT, &activeArea_);
if (ret) {
activeArea_ = Rectangle(pixelArraySize_);
LOG(CameraSensor, Warning)
<< "The PixelArrayActiveAreas property has been defaulted to "
<< activeArea_;
err = -EINVAL;
}
ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &rect);
if (ret) {
LOG(CameraSensor, Warning)
<< "Failed to retrieve the sensor crop rectangle";
err = -EINVAL;
}
if (err) {
LOG(CameraSensor, Warning)
<< "The sensor kernel driver needs to be fixed";
LOG(CameraSensor, Warning)
<< "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
}
if (!bayerFormat_)
return 0;
/*
* For raw sensors, make sure the sensor driver supports the controls
* required by the CameraSensor class.
*/
static constexpr uint32_t mandatoryControls[] = {
V4L2_CID_ANALOGUE_GAIN,
V4L2_CID_EXPOSURE,
V4L2_CID_HBLANK,
V4L2_CID_PIXEL_RATE,
V4L2_CID_VBLANK,
};
err = 0;
for (uint32_t ctrl : mandatoryControls) {
if (!controls.count(ctrl)) {
LOG(CameraSensor, Error)
<< "Mandatory V4L2 control " << utils::hex(ctrl)
<< " not available";
err = -EINVAL;
}
}
if (err) {
LOG(CameraSensor, Error)
<< "The sensor kernel driver needs to be fixed";
LOG(CameraSensor, Error)
<< "See Documentation/sensor_driver_requirements.rst in the libcamera sources for more information";
return err;
}
return 0;
}
/*
* \brief Initialize properties that cannot be intialized by the
* regular initProperties() function for VIMC
*/
void CameraSensor::initVimcDefaultProperties()
{
/* Use the largest supported size. */
pixelArraySize_ = sizes_.back();
activeArea_ = Rectangle(pixelArraySize_);
}
void CameraSensor::initStaticProperties()
{
staticProps_ = CameraSensorProperties::get(model_);
if (!staticProps_)
return;
/* Register the properties retrieved from the sensor database. */
properties_.set(properties::UnitCellSize, staticProps_->unitCellSize);
initTestPatternModes();
}
void CameraSensor::initTestPatternModes()
{
const auto &v4l2TestPattern = controls().find(V4L2_CID_TEST_PATTERN);
if (v4l2TestPattern == controls().end()) {
LOG(CameraSensor, Debug) << "V4L2_CID_TEST_PATTERN is not supported";
return;
}
const auto &testPatternModes = staticProps_->testPatternModes;
if (testPatternModes.empty()) {
/*
* The camera sensor supports test patterns but we don't know
* how to map them so this should be fixed.
*/
LOG(CameraSensor, Debug) << "No static test pattern map for \'"
<< model() << "\'";
return;
}
/*
* Create a map that associates the V4L2 control index to the test
* pattern mode by reversing the testPatternModes map provided by the
* camera sensor properties. This makes it easier to verify if the
* control index is supported in the below for loop that creates the
* list of supported test patterns.
*/
std::map<int32_t, controls::draft::TestPatternModeEnum> indexToTestPatternMode;
for (const auto &it : testPatternModes)
indexToTestPatternMode[it.second] = it.first;
for (const ControlValue &value : v4l2TestPattern->second.values()) {
const int32_t index = value.get<int32_t>();
const auto it = indexToTestPatternMode.find(index);
if (it == indexToTestPatternMode.end()) {
LOG(CameraSensor, Debug)
<< "Test pattern mode " << index << " ignored";
continue;
}
testPatternModes_.push_back(it->second);
}
}
int CameraSensor::initProperties()
{
model_ = subdev_->model();
properties_.set(properties::Model, utils::toAscii(model_));
/* Generate a unique ID for the sensor. */
int ret = generateId();
if (ret)
return ret;
/* Initialize the static properties from the sensor database. */
initStaticProperties();
/* Retrieve and register properties from the kernel interface. */
const ControlInfoMap &controls = subdev_->controls();
const auto &orientation = controls.find(V4L2_CID_CAMERA_ORIENTATION);
if (orientation != controls.end()) {
int32_t v4l2Orientation = orientation->second.def().get<int32_t>();
int32_t propertyValue;
switch (v4l2Orientation) {
default:
LOG(CameraSensor, Warning)
<< "Unsupported camera location "
<< v4l2Orientation << ", setting to External";
[[fallthrough]];
case V4L2_CAMERA_ORIENTATION_EXTERNAL:
propertyValue = properties::CameraLocationExternal;
break;
case V4L2_CAMERA_ORIENTATION_FRONT:
propertyValue = properties::CameraLocationFront;
break;
case V4L2_CAMERA_ORIENTATION_BACK:
propertyValue = properties::CameraLocationBack;
break;
}
properties_.set(properties::Location, propertyValue);
} else {
LOG(CameraSensor, Warning) << "Failed to retrieve the camera location";
}
const auto &rotationControl = controls.find(V4L2_CID_CAMERA_SENSOR_ROTATION);
if (rotationControl != controls.end()) {
int32_t propertyValue = rotationControl->second.def().get<int32_t>();
/*
* Cache the Transform associated with the camera mounting
* rotation for later use in computeTransform().
*/
bool success;
mountingOrientation_ = orientationFromRotation(propertyValue, &success);
if (!success) {
LOG(CameraSensor, Warning)
<< "Invalid rotation of " << propertyValue
<< " degrees - ignoring";
mountingOrientation_ = Orientation::Rotate0;
}
properties_.set(properties::Rotation, propertyValue);
} else {
LOG(CameraSensor, Warning)
<< "Rotation control not available, default to 0 degrees";
properties_.set(properties::Rotation, 0);
mountingOrientation_ = Orientation::Rotate0;
}
properties_.set(properties::PixelArraySize, pixelArraySize_);
properties_.set(properties::PixelArrayActiveAreas, { activeArea_ });
/* Color filter array pattern, register only for RAW sensors. */
if (bayerFormat_) {
int32_t cfa;
switch (bayerFormat_->order) {
case BayerFormat::BGGR:
cfa = properties::draft::BGGR;
break;
case BayerFormat::GBRG:
cfa = properties::draft::GBRG;
break;
case BayerFormat::GRBG:
cfa = properties::draft::GRBG;
break;
case BayerFormat::RGGB:
cfa = properties::draft::RGGB;
break;
case BayerFormat::MONO:
cfa = properties::draft::MONO;
break;
}
properties_.set(properties::draft::ColorFilterArrangement, cfa);
}
return 0;
}
/**
* \brief Check for and initialise any ancillary devices
*
* Sensors sometimes have ancillary devices such as a Lens or Flash that could
* be linked to their MediaEntity by the kernel. Search for and handle any
* such device.
*
* \todo Handle MEDIA_ENT_F_FLASH too.
*/
int CameraSensor::discoverAncillaryDevices()
{
int ret;
for (MediaEntity *ancillary : entity_->ancillaryEntities()) {
switch (ancillary->function()) {
case MEDIA_ENT_F_LENS:
focusLens_ = std::make_unique<CameraLens>(ancillary);
ret = focusLens_->init();
if (ret) {
LOG(CameraSensor, Error)
<< "Lens initialisation failed, lens disabled";
focusLens_.reset();
}
break;
default:
LOG(CameraSensor, Warning)
<< "Unsupported ancillary entity function "
<< ancillary->function();
break;
}
}
return 0;
}
/**
* \fn CameraSensor::model()
* \brief Retrieve the sensor model name
*
* The sensor model name is a free-formed string that uniquely identifies the
* sensor model.
*
* \return The sensor model name
*/
/**
* \fn CameraSensor::id()
* \brief Retrieve the sensor ID
*
* The sensor ID is a free-form string that uniquely identifies the sensor in
* the system. The ID satisfies the requirements to be used as a camera ID.
*
* \return The sensor ID
*/
/**
* \fn CameraSensor::entity()
* \brief Retrieve the sensor media entity
* \return The sensor media entity
*/
/**
* \fn CameraSensor::device()
* \brief Retrieve the camera sensor device
* \todo Remove this function by integrating DelayedControl with CameraSensor
* \return The camera sensor device
*/
/**
* \fn CameraSensor::focusLens()
* \brief Retrieve the focus lens controller
*
* \return The focus lens controller. nullptr if no focus lens controller is
* connected to the sensor
*/
/**
* \fn CameraSensor::mbusCodes()
* \brief Retrieve the media bus codes supported by the camera sensor
*
* Any Bayer formats are listed using the sensor's native Bayer order,
* that is, with the effect of V4L2_CID_HFLIP and V4L2_CID_VFLIP undone
* (where these controls exist).
*
* \return The supported media bus codes sorted in increasing order
*/
/**
* \brief Retrieve the supported frame sizes for a media bus code
* \param[in] mbusCode The media bus code for which sizes are requested
*
* \return The supported frame sizes for \a mbusCode sorted in increasing order
*/
std::vector<Size> CameraSensor::sizes(unsigned int mbusCode) const
{
std::vector<Size> sizes;
const auto &format = formats_.find(mbusCode);
if (format == formats_.end())
return sizes;
const std::vector<SizeRange> &ranges = format->second;
std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
[](const SizeRange &range) { return range.max; });
std::sort(sizes.begin(), sizes.end());
return sizes;
}
/**
* \brief Retrieve the camera sensor resolution
*
* The camera sensor resolution is the active pixel area size, clamped to the
* maximum frame size the sensor can produce if it is smaller than the active
* pixel area.
*
* \todo Consider if it desirable to distinguish between the maximum resolution
* the sensor can produce (also including upscaled ones) and the actual pixel
* array size by splitting this function in two.
*
* \return The camera sensor resolution in pixels
*/
Size CameraSensor::resolution() const
{
return std::min(sizes_.back(), activeArea_.size());
}
/**
* \brief Retrieve the best sensor format for a desired output
* \param[in] mbusCodes The list of acceptable media bus codes
* \param[in] size The desired size
*
* Media bus codes are selected from \a mbusCodes, which lists all acceptable
* codes in decreasing order of preference. Media bus codes supported by the
* sensor but not listed in \a mbusCodes are ignored. If none of the desired
* codes is supported, it returns an error.
*
* \a size indicates the desired size at the output of the sensor. This function
* selects the best media bus code and size supported by the sensor according
* to the following criteria.
*
* - The desired \a size shall fit in the sensor output size to avoid the need
* to up-scale.
* - The sensor output size shall match the desired aspect ratio to avoid the
* need to crop the field of view.
* - The sensor output size shall be as small as possible to lower the required
* bandwidth.
* - The desired \a size shall be supported by one of the media bus code listed
* in \a mbusCodes.
*
* When multiple media bus codes can produce the same size, the code at the
* lowest position in \a mbusCodes is selected.
*
* The use of this function is optional, as the above criteria may not match the
* needs of all pipeline handlers. Pipeline handlers may implement custom
* sensor format selection when needed.
*
* The returned sensor output format is guaranteed to be acceptable by the
* setFormat() function without any modification.
*
* \return The best sensor output format matching the desired media bus codes
* and size on success, or an empty format otherwise.
*/
V4L2SubdeviceFormat CameraSensor::getFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const
{
unsigned int desiredArea = size.width * size.height;
unsigned int bestArea = UINT_MAX;
float desiredRatio = static_cast<float>(size.width) / size.height;
float bestRatio = FLT_MAX;
const Size *bestSize = nullptr;
uint32_t bestCode = 0;
for (unsigned int code : mbusCodes) {
const auto formats = formats_.find(code);
if (formats == formats_.end())
continue;
for (const SizeRange &range : formats->second) {
const Size &sz = range.max;
if (sz.width < size.width || sz.height < size.height)
continue;
float ratio = static_cast<float>(sz.width) / sz.height;
float ratioDiff = fabsf(ratio - desiredRatio);
unsigned int area = sz.width * sz.height;
unsigned int areaDiff = area - desiredArea;
if (ratioDiff > bestRatio)
continue;
if (ratioDiff < bestRatio || areaDiff < bestArea) {
bestRatio = ratioDiff;
bestArea = areaDiff;
bestSize = &sz;
bestCode = code;
}
}
}
if (!bestSize) {
LOG(CameraSensor, Debug) << "No supported format or size found";
return {};
}
V4L2SubdeviceFormat format{
.code = bestCode,
.size = *bestSize,
.colorSpace = ColorSpace::Raw,
};
return format;
}
/**
* \brief Set the sensor output format
* \param[in] format The desired sensor output format
* \param[in] transform The transform to be applied on the sensor.
* Defaults to Identity.
*
* If flips are writable they are configured according to the desired Transform.
* Transform::Identity always corresponds to H/V flip being disabled if the
* controls are writable. Flips are set before the new format is applied as
* they can effectively change the Bayer pattern ordering.
*
* The ranges of any controls associated with the sensor are also updated.
*
* \return 0 on success or a negative error code otherwise
*/
int CameraSensor::setFormat(V4L2SubdeviceFormat *format, Transform transform)
{
/* Configure flips if the sensor supports that. */
if (supportFlips_) {
ControlList flipCtrls(subdev_->controls());
flipCtrls.set(V4L2_CID_HFLIP,
static_cast<int32_t>(!!(transform & Transform::HFlip)));
flipCtrls.set(V4L2_CID_VFLIP,
static_cast<int32_t>(!!(transform & Transform::VFlip)));
int ret = subdev_->setControls(&flipCtrls);
if (ret)
return ret;
}
/* Apply format on the subdev. */
int ret = subdev_->setFormat(pad_, format);
if (ret)
return ret;
subdev_->updateControlInfo();
return 0;
}
/**
* \brief Try the sensor output format
* \param[in] format The desired sensor output format
*
* The ranges of any controls associated with the sensor are not updated.
*
* \todo Add support for Transform by changing the format's Bayer ordering
* before calling subdev_->setFormat().
*
* \return 0 on success or a negative error code otherwise
*/
int CameraSensor::tryFormat(V4L2SubdeviceFormat *format) const
{
return subdev_->setFormat(pad_, format,
V4L2Subdevice::Whence::TryFormat);
}
/**
* \brief Apply a sensor configuration to the camera sensor
* \param[in] config The sensor configuration
* \param[in] transform The transform to be applied on the sensor.
* Defaults to Identity
* \param[out] sensorFormat Format applied to the sensor (optional)
*
* Apply to the camera sensor the configuration \a config.
*
* \todo The configuration shall be fully populated and if any of the fields
* specified cannot be applied exactly, an error code is returned.
*
* \return 0 if \a config is applied correctly to the camera sensor, a negative
* error code otherwise
*/
int CameraSensor::applyConfiguration(const SensorConfiguration &config,
Transform transform,
V4L2SubdeviceFormat *sensorFormat)
{
if (!config.isValid()) {
LOG(CameraSensor, Error) << "Invalid sensor configuration";
return -EINVAL;
}
std::vector<unsigned int> filteredCodes;
std::copy_if(mbusCodes_.begin(), mbusCodes_.end(),
std::back_inserter(filteredCodes),
[&config](unsigned int mbusCode) {
BayerFormat bayer = BayerFormat::fromMbusCode(mbusCode);
if (bayer.bitDepth == config.bitDepth)
return true;
return false;
});
if (filteredCodes.empty()) {
LOG(CameraSensor, Error)
<< "Cannot find any format with bit depth "
<< config.bitDepth;
return -EINVAL;
}
/*
* Compute the sensor's data frame size by applying the cropping
* rectangle, subsampling and output crop to the sensor's pixel array
* size.
*
* \todo The actual size computation is for now ignored and only the
* output size is considered. This implies that resolutions obtained
* with two different cropping/subsampling will look identical and
* only the first found one will be considered.
*/
V4L2SubdeviceFormat subdevFormat = {};
for (unsigned int code : filteredCodes) {
for (const Size &size : sizes(code)) {
if (size.width != config.outputSize.width ||
size.height != config.outputSize.height)
continue;
subdevFormat.code = code;
subdevFormat.size = size;
break;
}
}
if (!subdevFormat.code) {
LOG(CameraSensor, Error) << "Invalid output size in sensor configuration";
return -EINVAL;
}
int ret = setFormat(&subdevFormat, transform);
if (ret)
return ret;
/*
* Return to the caller the format actually applied to the sensor.
* This is relevant if transform has changed the bayer pattern order.
*/
if (sensorFormat)
*sensorFormat = subdevFormat;
/* \todo Handle AnalogCrop. Most sensors do not support set_selection */
/* \todo Handle scaling in the digital domain. */
return 0;
}
/**
* \fn CameraSensor::properties()
* \brief Retrieve the camera sensor properties
* \return The list of camera sensor properties
*/
/**
* \brief Assemble and return the camera sensor info
* \param[out] info The camera sensor info
*
* This function fills \a info with information that describes the camera sensor
* and its current configuration. The information combines static data (such as
* the the sensor model or active pixel array size) and data specific to the
* current sensor configuration (such as the line length and pixel rate).
*
* Sensor information is only available for raw sensors. When called for a YUV
* sensor, this function returns -EINVAL.
*
* \return 0 on success, a negative error code otherwise
*/
int CameraSensor::sensorInfo(IPACameraSensorInfo *info) const
{
if (!bayerFormat_)
return -EINVAL;
info->model = model();
/*
* The active area size is a static property, while the crop
* rectangle needs to be re-read as it depends on the sensor
* configuration.
*/
info->activeAreaSize = { activeArea_.width, activeArea_.height };
/*
* \todo Support for retreiving the crop rectangle is scheduled to
* become mandatory. For the time being use the default value if it has
* been initialized at sensor driver validation time.
*/
int ret = subdev_->getSelection(pad_, V4L2_SEL_TGT_CROP, &info->analogCrop);
if (ret) {
info->analogCrop = activeArea_;
LOG(CameraSensor, Warning)
<< "The analogue crop rectangle has been defaulted to the active area size";
}
/*
* IPACameraSensorInfo::analogCrop::x and IPACameraSensorInfo::analogCrop::y
* are defined relatively to the active pixel area, while V4L2's
* TGT_CROP target is defined in respect to the full pixel array.
*
* Compensate it by subtracting the active area offset.
*/
info->analogCrop.x -= activeArea_.x;
info->analogCrop.y -= activeArea_.y;
/* The bit depth and image size depend on the currently applied format. */
V4L2SubdeviceFormat format{};
ret = subdev_->getFormat(pad_, &format);
if (ret)
return ret;
info->bitsPerPixel = MediaBusFormatInfo::info(format.code).bitsPerPixel;
info->outputSize = format.size;
std::optional<int32_t> cfa = properties_.get(properties::draft::ColorFilterArrangement);
info->cfaPattern = cfa ? *cfa : properties::draft::RGB;
/*
* Retrieve the pixel rate, line length and minimum/maximum frame
* duration through V4L2 controls. Support for the V4L2_CID_PIXEL_RATE,
* V4L2_CID_HBLANK and V4L2_CID_VBLANK controls is mandatory.
*/
ControlList ctrls = subdev_->getControls({ V4L2_CID_PIXEL_RATE,
V4L2_CID_HBLANK,
V4L2_CID_VBLANK });
if (ctrls.empty()) {
LOG(CameraSensor, Error)
<< "Failed to retrieve camera info controls";
return -EINVAL;
}
info->pixelRate = ctrls.get(V4L2_CID_PIXEL_RATE).get<int64_t>();
const ControlInfo hblank = ctrls.infoMap()->at(V4L2_CID_HBLANK);
info->minLineLength = info->outputSize.width + hblank.min().get<int32_t>();
info->maxLineLength = info->outputSize.width + hblank.max().get<int32_t>();
const ControlInfo vblank = ctrls.infoMap()->at(V4L2_CID_VBLANK);
info->minFrameLength = info->outputSize.height + vblank.min().get<int32_t>();
info->maxFrameLength = info->outputSize.height + vblank.max().get<int32_t>();
return 0;
}
/**
* \brief Compute the Transform that gives the requested \a orientation
* \param[inout] orientation The desired image orientation
*
* This function computes the Transform that the pipeline handler should apply
* to the CameraSensor to obtain the requested \a orientation.
*
* The intended caller of this function is the validate() implementation of
* pipeline handlers, that pass in the application requested
* CameraConfiguration::orientation and obtain a Transform to apply to the
* camera sensor, likely at configure() time.
*
* If the requested \a orientation cannot be obtained, the \a orientation
* parameter is adjusted to report the current image orientation and
* Transform::Identity is returned.
*
* If the requested \a orientation can be obtained, the function computes a
* Transform and does not adjust \a orientation.
*
* Pipeline handlers are expected to verify if \a orientation has been
* adjusted by this function and set the CameraConfiguration::status to
* Adjusted accordingly.
*
* \return A Transform instance that applied to the CameraSensor produces images
* with \a orientation
*/
Transform CameraSensor::computeTransform(Orientation *orientation) const
{
/*
* If we cannot do any flips we cannot change the native camera mounting
* orientation.
*/
if (!supportFlips_) {
*orientation = mountingOrientation_;
return Transform::Identity;
}
/*
* Now compute the required transform to obtain 'orientation' starting
* from the mounting rotation.
*
* As a note:
* orientation / mountingOrientation_ = transform
* mountingOrientation_ * transform = orientation
*/
Transform transform = *orientation / mountingOrientation_;
/*
* If transform contains any Transpose we cannot do it, so adjust
* 'orientation' to report the image native orientation and return Identity.
*/
if (!!(transform & Transform::Transpose)) {
*orientation = mountingOrientation_;
return Transform::Identity;
}
return transform;
}
/**
* \brief Compute the Bayer order that results from the given Transform
* \param[in] t The Transform to apply to the sensor
*
* Some sensors change their Bayer order when they are h-flipped or v-flipped.
* This function computes and returns the Bayer order that would result from the
* given transform applied to the sensor.
*
* This function is valid only when the sensor produces raw Bayer formats.
*
* \return The Bayer order produced by the sensor when the Transform is applied
*/
BayerFormat::Order CameraSensor::bayerOrder(Transform t) const
{
/* Return a defined by meaningless value for non-Bayer sensors. */
if (!bayerFormat_)
return BayerFormat::Order::BGGR;
if (!flipsAlterBayerOrder_)
return bayerFormat_->order;
/*
* Apply the transform to the native (i.e. untransformed) Bayer order,
* using the rest of the Bayer format supplied by the caller.
*/
return bayerFormat_->transform(t).order;
}
/**
* \brief Retrieve the supported V4L2 controls and their information
*
* Control information is updated automatically to reflect the current sensor
* configuration when the setFormat() function is called, without invalidating
* any iterator on the ControlInfoMap.
*
* \return A map of the V4L2 controls supported by the sensor
*/
const ControlInfoMap &CameraSensor::controls() const
{
return subdev_->controls();
}
/**
* \brief Read V4L2 controls from the sensor
* \param[in] ids The list of controls to read, specified by their ID
*
* This function reads the value of all controls contained in \a ids, and
* returns their values as a ControlList. The control identifiers are defined by
* the V4L2 specification (V4L2_CID_*).
*
* If any control in \a ids is not supported by the device, is disabled (i.e.
* has the V4L2_CTRL_FLAG_DISABLED flag set), or if any other error occurs
* during validation of the requested controls, no control is read and this
* function returns an empty control list.
*
* \sa V4L2Device::getControls()
*
* \return The control values in a ControlList on success, or an empty list on
* error
*/
ControlList CameraSensor::getControls(const std::vector<uint32_t> &ids)
{
return subdev_->getControls(ids);
}
/**
* \brief Write V4L2 controls to the sensor
* \param[in] ctrls The list of controls to write
*
* This function writes the value of all controls contained in \a ctrls, and
* stores the values actually applied to the device in the corresponding \a
* ctrls entry. The control identifiers are defined by the V4L2 specification
* (V4L2_CID_*).
*
* If any control in \a ctrls is not supported by the device, is disabled (i.e.
* has the V4L2_CTRL_FLAG_DISABLED flag set), is read-only, or if any other
* error occurs during validation of the requested controls, no control is
* written and this function returns -EINVAL.
*
* If an error occurs while writing the controls, the index of the first
* control that couldn't be written is returned. All controls below that index
* are written and their values are updated in \a ctrls, while all other
* controls are not written and their values are not changed.
*
* \sa V4L2Device::setControls()
*
* \return 0 on success or an error code otherwise
* \retval -EINVAL One of the control is not supported or not accessible
* \retval i The index of the control that failed
*/
int CameraSensor::setControls(ControlList *ctrls)
{
return subdev_->setControls(ctrls);
}
/**
* \fn CameraSensor::testPatternModes()
* \brief Retrieve all the supported test pattern modes of the camera sensor
* The test pattern mode values correspond to the controls::TestPattern control.
*
* \return The list of test pattern modes
*/
/**
* \brief Set the test pattern mode for the camera sensor
* \param[in] mode The test pattern mode
*
* The new \a mode is applied to the sensor if it differs from the active test
* pattern mode. Otherwise, this function is a no-op. Setting the same test
* pattern mode for every frame thus incurs no performance penalty.
*/
int CameraSensor::setTestPatternMode(controls::draft::TestPatternModeEnum mode)
{
if (testPatternMode_ == mode)
return 0;
if (testPatternModes_.empty()) {
LOG(CameraSensor, Error)
<< "Camera sensor does not support test pattern modes.";
return -EINVAL;
}
return applyTestPatternMode(mode);
}
int CameraSensor::applyTestPatternMode(controls::draft::TestPatternModeEnum mode)
{
if (testPatternModes_.empty())
return 0;
auto it = std::find(testPatternModes_.begin(), testPatternModes_.end(),
mode);
if (it == testPatternModes_.end()) {
LOG(CameraSensor, Error) << "Unsupported test pattern mode "
<< mode;
return -EINVAL;
}
LOG(CameraSensor, Debug) << "Apply test pattern mode " << mode;
int32_t index = staticProps_->testPatternModes.at(mode);
ControlList ctrls{ controls() };
ctrls.set(V4L2_CID_TEST_PATTERN, index);
int ret = setControls(&ctrls);
if (ret)
return ret;
testPatternMode_ = mode;
return 0;
}
std::string CameraSensor::logPrefix() const
{
return "'" + entity_->name() + "'";
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/class.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Utilities and helpers for classes
*/
#include <libcamera/base/class.h>
/**
* \file class.h
* \brief Utilities to help constructing class interfaces
*
* The extensible class can be inherited to create public classes with stable
* ABIs.
*/
namespace libcamera {
/**
* \def LIBCAMERA_DISABLE_COPY
* \brief Disable copy construction and assignment of the \a klass
* \param klass The name of the class
*
* Example usage:
* \code{.cpp}
* class NonCopyable
* {
* public:
* NonCopyable();
* ...
*
* private:
* LIBCAMERA_DISABLE_COPY(NonCopyable)
* };
* \endcode
*/
/**
* \def LIBCAMERA_DISABLE_MOVE
* \brief Disable move construction and assignment of the \a klass
* \param klass The name of the class
*
* Example usage:
* \code{.cpp}
* class NonMoveable
* {
* public:
* NonMoveable();
* ...
*
* private:
* LIBCAMERA_DISABLE_MOVE(NonMoveable)
* };
* \endcode
*/
/**
* \def LIBCAMERA_DISABLE_COPY_AND_MOVE
* \brief Disable copy and move construction and assignment of the \a klass
* \param klass The name of the class
*
* Example usage:
* \code{.cpp}
* class NonCopyableNonMoveable
* {
* public:
* NonCopyableNonMoveable();
* ...
*
* private:
* LIBCAMERA_DISABLE_COPY_AND_MOVE(NonCopyableNonMoveable)
* };
* \endcode
*/
/**
* \def LIBCAMERA_DECLARE_PRIVATE
* \brief Declare private data for a public class
*
* The LIBCAMERA_DECLARE_PRIVATE() macro plumbs the infrastructure necessary to
* make a class manage its private data through a d-pointer. It shall be used at
* the very top of the class definition.
*/
/**
* \def LIBCAMERA_DECLARE_PUBLIC
* \brief Declare public data for a private class
* \param klass The public class name
*
* The LIBCAMERA_DECLARE_PUBLIC() macro is the counterpart of
* LIBCAMERA_DECLARE_PRIVATE() to be used in the private data class. It shall be
* used at the very top of the private class definition, with the public class
* name passed as the \a klass parameter.
*/
/**
* \def LIBCAMERA_O_PTR()
* \brief Retrieve the public instance corresponding to the private data
*
* This macro is part of the libcamera::Extensible class infrastructure. It may
* be used in any member function of a libcamera::Extensible::Private subclass
* to access the public class instance corresponding to the private data.
*/
/**
* \class Extensible
* \brief Base class to manage private data through a d-pointer
*
* The Extensible class provides a base class to implement the
* <a href="https://wiki.qt.io/D-Pointer">d-pointer</a> design pattern (also
* known as <a href="https://en.wikipedia.org/wiki/Opaque_pointer">opaque pointer</a>
* or <a href="https://en.cppreference.com/w/cpp/language/pimpl">pImpl idiom</a>).
* It helps creating public classes that can be extended without breaking their
* ABI. Such classes store their private data in a separate private data object,
* referenced by a pointer in the public class (hence the name d-pointer).
*
* Classes that follow this design pattern are referred herein as extensible
* classes. To be extensible, a class PublicClass shall:
*
* - inherit from the Extensible class or from another extensible class
* - invoke the LIBCAMERA_DECLARE_PRIVATE() macro at the very top of the class
* definition
* - define a private data class named PublicClass::Private that inherits from
* the Private data class of the base class
* - invoke the LIBCAMERA_DECLARE_PUBLIC() macro at the very top of the Private
* data class definition
* - pass a pointer to a newly allocated Private data object to the constructor
* of the base class
*
* Additionally, if the PublicClass is not final, it shall expose one or more
* constructors that takes a pointer to a Private data instance, to be used by
* derived classes.
*
* The Private class is fully opaque to users of the libcamera public API.
* Internally, it can be kept private to the implementation of PublicClass, or
* be exposed to other classes. In the latter case, the members of the Private
* class need to be qualified with appropriate access specifiers. The
* PublicClass and Private classes always have full access to each other's
* protected and private members.
*
* The PublicClass exposes its Private data pointer through the _d() function.
* In the other direction, the pointer to the PublicClass can be retrieved in
* functions of the Private class using the LIBCAMERA_O_PTR() macro.
*/
/**
* \brief Construct an instance of an Extensible class
* \param[in] d Pointer to the private data instance
*
* The private data lifetime is managed by the Extensible class, which destroys
* it when the Extensible instance is destroyed.
*/
Extensible::Extensible(std::unique_ptr<Extensible::Private> d)
: d_(std::move(d))
{
*const_cast<Extensible **>(&d_->o_) = this;
}
/**
* \fn Extensible::_d() const
* \brief Retrieve the private data instance
*
* This template function isn't meant to be called directly. Instead, classes
* derived from Extensible get, through the LIBCAMERA_DECLARE_PRIVATE() macro,
* overriden _d() functions that return the correct pointer type to the
* corresponding derived Private class.
*
* The lifetime of the private data is tied to the Extensible class. The caller
* shall not retain any reference to the returned pointer for longer than it
* holds a reference to the Extensible instance.
*
* \return A pointer to the private data instance
*/
/**
* \fn Extensible::_d()
* \copydoc Extensible::_d() const
*/
/**
* \var Extensible::d_
* \brief Pointer to the private data instance
*/
/**
* \class Extensible::Private
* \brief Base class for private data managed through a d-pointer
*/
/**
* \brief Construct an instance of an Extensible class private data
*/
Extensible::Private::Private()
: o_(nullptr)
{
}
Extensible::Private::~Private()
{
}
/**
* \var Extensible::Private::o_
* \brief Pointer to the public class object
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/shared_fd.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* File descriptor wrapper with shared ownership
*/
#include <libcamera/base/shared_fd.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <utility>
#include <libcamera/base/log.h>
#include <libcamera/base/unique_fd.h>
/**
* \file base/shared_fd.h
* \brief File descriptor wrapper
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(SharedFD)
/**
* \class SharedFD
* \brief RAII-style wrapper for file descriptors
*
* The SharedFD class provides RAII-style lifetime management of file
* descriptors with an efficient mechanism for ownership sharing. At its core,
* an internal Descriptor object wraps a file descriptor (expressed as a signed
* integer) with an RAII-style interface. The Descriptor is then implicitly
* shared with all SharedFD instances constructed as copies.
*
* When constructed from a numerical file descriptor, the SharedFD instance
* either duplicates or takes over the file descriptor:
*
* - The SharedFD(const int &) constructor duplicates the numerical file
* descriptor and wraps the duplicate in a Descriptor. The caller is
* responsible for closing the original file descriptor, and the value
* returned by fd() will be different from the value passed to the
* constructor.
*
* - The SharedFD(int &&) constructor takes over the numerical file descriptor
* and wraps it in a Descriptor. The caller shall not touch the original file
* descriptor once the function returns, and the value returned by fd() will
* be identical to the value passed to the constructor.
*
* The copy constructor and assignment operator create copies that share the
* Descriptor, while the move versions of those functions additionally make the
* other SharedFD invalid. When the last SharedFD that references a Descriptor
* is destroyed, the file descriptor is closed.
*
* The numerical file descriptor is available through the fd() function. All
* SharedFD instances created as copies of a SharedFD will report the same fd()
* value. Callers can perform operations on the fd(), but shall never close it
* manually.
*/
/**
* \brief Create a SharedFD copying a given \a fd
* \param[in] fd File descriptor
*
* Construct a SharedFD from a numerical file descriptor by duplicating the
* \a fd, and take ownership of the copy. The original \a fd is left untouched,
* and the caller is responsible for closing it when appropriate. The duplicated
* file descriptor will be closed automatically when all SharedFD instances that
* reference it are destroyed.
*
* If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
* function will return -1.
*/
SharedFD::SharedFD(const int &fd)
{
if (fd < 0)
return;
fd_ = std::make_shared<Descriptor>(fd, true);
if (fd_->fd() < 0)
fd_.reset();
}
/**
* \brief Create a SharedFD taking ownership of a given \a fd
* \param[in] fd File descriptor
*
* Construct a SharedFD from a numerical file descriptor by taking ownership of
* the \a fd. The original \a fd is set to -1 and shall not be touched by the
* caller anymore. In particular, the caller shall not close the original \a fd
* manually. The duplicated file descriptor will be closed automatically when
* all SharedFD instances that reference it are destroyed.
*
* If the \a fd is negative, the SharedFD is constructed as invalid and the fd()
* function will return -1.
*/
SharedFD::SharedFD(int &&fd)
{
if (fd < 0)
return;
fd_ = std::make_shared<Descriptor>(fd, false);
/*
* The Descriptor constructor can't have failed here, as it took over
* the fd without duplicating it. Just set the original fd to -1 to
* implement move semantics.
*/
fd = -1;
}
/**
* \brief Create a SharedFD taking ownership of a given UniqueFD \a fd
* \param[in] fd UniqueFD
*
* Construct a SharedFD from UniqueFD by taking ownership of the \a fd. The
* original \a fd becomes invalid.
*/
SharedFD::SharedFD(UniqueFD fd)
: SharedFD(fd.release())
{
}
/**
* \brief Copy constructor, create a SharedFD from a copy of \a other
* \param[in] other The other SharedFD
*
* Copying a SharedFD implicitly shares ownership of the wrapped file
* descriptor. The original SharedFD is left untouched, and the caller is
* responsible for destroying it when appropriate. The wrapped file descriptor
* will be closed automatically when all SharedFD instances that reference it
* are destroyed.
*/
SharedFD::SharedFD(const SharedFD &other)
: fd_(other.fd_)
{
}
/**
* \brief Move constructor, create a SharedFD by taking over \a other
* \param[in] other The other SharedFD
*
* Moving a SharedFD moves the reference to the wrapped descriptor owned by
* \a other to the new SharedFD. The \a other SharedFD is invalidated and its
* fd() function will return -1. The wrapped file descriptor will be closed
* automatically when all SharedFD instances that reference it are destroyed.
*/
SharedFD::SharedFD(SharedFD &&other)
: fd_(std::move(other.fd_))
{
}
/**
* \brief Destroy the SharedFD instance
*
* Destroying a SharedFD instance releases its reference to the wrapped
* descriptor, if any. When the last instance that references a wrapped
* descriptor is destroyed, the file descriptor is automatically closed.
*/
SharedFD::~SharedFD()
{
}
/**
* \brief Copy assignment operator, replace the wrapped file descriptor with a
* copy of \a other
* \param[in] other The other SharedFD
*
* Copying a SharedFD creates a new reference to the wrapped file descriptor
* owner by \a other. If \a other is invalid, *this will also be invalid. The
* original SharedFD is left untouched, and the caller is responsible for
* destroying it when appropriate. The wrapped file descriptor will be closed
* automatically when all SharedFD instances that reference it are destroyed.
*
* \return A reference to this SharedFD
*/
SharedFD &SharedFD::operator=(const SharedFD &other)
{
fd_ = other.fd_;
return *this;
}
/**
* \brief Move assignment operator, replace the wrapped file descriptor by
* taking over \a other
* \param[in] other The other SharedFD
*
* Moving a SharedFD moves the reference to the wrapped descriptor owned by
* \a other to the new SharedFD. If \a other is invalid, *this will also be
* invalid. The \a other SharedFD is invalidated and its fd() function will
* return -1. The wrapped file descriptor will be closed automatically when
* all SharedFD instances that reference it are destroyed.
*
* \return A reference to this SharedFD
*/
SharedFD &SharedFD::operator=(SharedFD &&other)
{
fd_ = std::move(other.fd_);
return *this;
}
/**
* \fn SharedFD::isValid()
* \brief Check if the SharedFD instance is valid
* \return True if the SharedFD is valid, false otherwise
*/
/**
* \fn SharedFD::get()
* \brief Retrieve the numerical file descriptor
* \return The numerical file descriptor, which may be -1 if the SharedFD
* instance is invalid
*/
/**
* \fn bool operator==(const SharedFD &lhs, const SharedFD &rhs)
* \brief Compare the owned file descriptors of two SharedFD for equality
* \param[in] lhs The first SharedFD
* \param[in] rhs The second SharedFD
*
* Two file descriptors are considered equal if they have the same numerical
* value. File descriptors with different values that both reference the same
* file (for instance obtained using dup()) are considered not equal.
*
* \return True if the two file descriptors are equal, false otherwise
*/
/**
* \fn bool operator!=(const SharedFD &lhs, const SharedFD &rhs)
* \brief Compare the owned file descriptors of two SharedFD for equality
* \param[in] lhs The first SharedFD
* \param[in] rhs The second SharedFD
*
* Two file descriptors are considered equal if they have the same numerical
* value. File descriptors with different values that both reference the same
* file (for instance obtained using dup()) are considered not equal.
*
* \return True if the two file descriptors are not equal, false otherwise
*/
/**
* \brief Duplicate a SharedFD
*
* Duplicating a SharedFD creates a duplicate of the wrapped file descriptor and
* returns a UniqueFD that owns the duplicate. The fd() function of the original
* and the get() function of the duplicate will return different values. The
* duplicate instance will not be affected by destruction of the original
* instance or its copies.
*
* \return A UniqueFD owning a duplicate of the original file descriptor
*/
UniqueFD SharedFD::dup() const
{
if (!isValid())
return {};
UniqueFD dupFd(::dup(get()));
if (!dupFd.isValid()) {
int ret = -errno;
LOG(SharedFD, Error)
<< "Failed to dup() fd: " << strerror(-ret);
}
return dupFd;
}
SharedFD::Descriptor::Descriptor(int fd, bool duplicate)
{
if (!duplicate) {
fd_ = fd;
return;
}
/* Failing to dup() a fd should not happen and is fatal. */
fd_ = ::dup(fd);
if (fd_ == -1) {
int ret = -errno;
LOG(SharedFD, Fatal)
<< "Failed to dup() fd: " << strerror(-ret);
}
}
SharedFD::Descriptor::~Descriptor()
{
if (fd_ != -1)
close(fd_);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/semaphore.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* General-purpose counting semaphore
*/
#include <libcamera/base/semaphore.h>
/**
* \file base/semaphore.h
* \brief General-purpose counting semaphore
*/
namespace libcamera {
/**
* \class Semaphore
* \brief General-purpose counting semaphore
*
* A semaphore is a locking primitive that protects resources. It is created
* with an initial number of resources (which may be 0), and offers two
* primitives to acquire and release resources. The acquire() function tries to
* acquire a number of resources, and blocks if not enough resources are
* available until they get released. The release() function releases a number
* of resources, waking up any consumer blocked on an acquire() call.
*/
/**
* \brief Construct a semaphore with \a n resources
* \param[in] n The resource count
*/
Semaphore::Semaphore(unsigned int n)
: available_(n)
{
}
/**
* \brief Retrieve the number of available resources
* \return The number of available resources
*/
unsigned int Semaphore::available()
{
MutexLocker locker(mutex_);
return available_;
}
/**
* \brief Acquire \a n resources
* \param[in] n The resource count
*
* This function attempts to acquire \a n resources. If \a n is higher than the
* number of available resources, the call will block until enough resources
* become available.
*/
void Semaphore::acquire(unsigned int n)
{
MutexLocker locker(mutex_);
cv_.wait(locker, [&]() LIBCAMERA_TSA_REQUIRES(mutex_) {
return available_ >= n;
});
available_ -= n;
}
/**
* \brief Try to acquire \a n resources without blocking
* \param[in] n The resource count
*
* This function attempts to acquire \a n resources. If \a n is higher than the
* number of available resources, it returns false immediately without
* acquiring any resource. Otherwise it acquires the resources and returns
* true.
*
* \return True if the resources have been acquired, false otherwise
*/
bool Semaphore::tryAcquire(unsigned int n)
{
MutexLocker locker(mutex_);
if (available_ < n)
return false;
available_ -= n;
return true;
}
/**
* \brief Release \a n resources
* \param[in] n The resource count
*
* This function releases \a n resources, increasing the available resource
* count by \a n. If the number of available resources becomes large enough for
* any consumer blocked on an acquire() call, those consumers get woken up.
*/
void Semaphore::release(unsigned int n)
{
{
MutexLocker locker(mutex_);
available_ += n;
}
cv_.notify_all();
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/mutex.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Mutex classes with clang thread safety annotation
*/
#include <libcamera/base/mutex.h>
/**
* \file base/mutex.h
* \brief Mutex classes with clang thread safety annotation
*/
namespace libcamera {
/**
* \class Mutex
* \brief std::mutex wrapper with clang thread safety annotation
*
* The Mutex class wraps a std::mutex instance to add clang thread safety
* annotation support. The class exposes the same interface as std::mutex and
* can be used as a transparent replacement. It integrates with the
* MutexLocker and ConditionVariable classes.
*
* See https://en.cppreference.com/w/cpp/thread/mutex for the complete API
* documentation.
*/
/**
* \class MutexLocker
* \brief std::unique_lock wrapper with clang thread safety annotation
*
* The MutexLocker class wraps a std::unique_lock instance to add clang thread
* safety annotation support. The class exposes the same interface as
* std::unique_lock and can be used as a transparent replacement. It integrates
* with the Mutex and ConditionVariable classes.
*
* See https://en.cppreference.com/w/cpp/thread/unique_lock for the complete API
* documentation.
*/
/**
* \class ConditionVariable
* \brief std::condition_variable wrapper integrating with MutexLocker
*
* The ConditionVariable class wraps a std::condition_variable instance to
* integrate with the MutexLocker class. The class exposes the same interface as
* std::condition_variable and can be used as a transparent replacement.
*
* See https://en.cppreference.com/w/cpp/thread/condition_variable for the
* complete API documentation.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/bound_method.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Method bind and invocation
*/
#include <libcamera/base/bound_method.h>
#include <libcamera/base/message.h>
#include <libcamera/base/object.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/thread.h>
/**
* \file base/bound_method.h
* \brief Method bind and invocation
*/
namespace libcamera {
/**
* \enum ConnectionType
* \brief Connection type for asynchronous communication
*
* This enumeration describes the possible types of asynchronous communication
* between a sender and a receiver. It applies to Signal::emit() and
* Object::invokeMethod().
*
* \var ConnectionTypeAuto
* \brief If the sender and the receiver live in the same thread,
* ConnectionTypeDirect is used. Otherwise ConnectionTypeQueued is used.
*
* \var ConnectionTypeDirect
* \brief The receiver is invoked immediately and synchronously in the sender's
* thread.
*
* \var ConnectionTypeQueued
* \brief The receiver is invoked asynchronously
*
* Invoke the receiver asynchronously in its thread when control returns to the
* thread's event loop. The sender proceeds without waiting for the invocation
* to complete.
*
* \var ConnectionTypeBlocking
* \brief The receiver is invoked synchronously
*
* If the sender and the receiver live in the same thread, this is equivalent to
* ConnectionTypeDirect. Otherwise, the receiver is invoked asynchronously in
* its thread when control returns to the thread's event loop. The sender
* blocks until the receiver signals the completion of the invocation.
*/
/**
* \brief Invoke the bound method with packed arguments
* \param[in] pack Packed arguments
* \param[in] deleteMethod True to delete \a this bound method instance when
* method invocation completes
*
* The bound method stores its return value, if any, in the arguments \a pack.
* For direct and blocking invocations, this is performed synchronously, and
* the return value contained in the pack may be used. For queued invocations,
* the return value is stored at an undefined point of time and shall thus not
* be used by the caller.
*
* \return True if the return value contained in the \a pack may be used by the
* caller, false otherwise
*/
bool BoundMethodBase::activatePack(std::shared_ptr<BoundMethodPackBase> pack,
bool deleteMethod)
{
ConnectionType type = connectionType_;
if (type == ConnectionTypeAuto) {
if (Thread::current() == object_->thread())
type = ConnectionTypeDirect;
else
type = ConnectionTypeQueued;
} else if (type == ConnectionTypeBlocking) {
if (Thread::current() == object_->thread())
type = ConnectionTypeDirect;
}
switch (type) {
case ConnectionTypeDirect:
default:
invokePack(pack.get());
if (deleteMethod)
delete this;
return true;
case ConnectionTypeQueued: {
std::unique_ptr<Message> msg =
std::make_unique<InvokeMessage>(this, pack, nullptr, deleteMethod);
object_->postMessage(std::move(msg));
return false;
}
case ConnectionTypeBlocking: {
Semaphore semaphore;
std::unique_ptr<Message> msg =
std::make_unique<InvokeMessage>(this, pack, &semaphore, deleteMethod);
object_->postMessage(std::move(msg));
semaphore.acquire();
return true;
}
}
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/log.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* Logging infrastructure
*/
#include <libcamera/base/log.h>
#include <array>
#include <fstream>
#include <iostream>
#include <list>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <syslog.h>
#include <time.h>
#include <unordered_set>
#include <libcamera/logging.h>
#include <libcamera/base/backtrace.h>
#include <libcamera/base/mutex.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
/**
* \file base/log.h
* \brief Logging infrastructure
*
* libcamera includes a logging infrastructure used through the library that
* allows inspection of internal operation in a user-configurable way. The log
* messages are grouped in categories that represent areas of libcamera, and
* output of messages for each category can be controlled by independent log
* levels.
*
* The levels are configurable through the LIBCAMERA_LOG_LEVELS environment
* variable that contains a comma-separated list of 'category:level' pairs.
*
* The category names are strings and can include a wildcard ('*') character at
* the end to match multiple categories.
*
* The level are either numeric values, or strings containing the log level
* name. The available log levels are DEBUG, INFO, WARN, ERROR and FATAL. Log
* message with a level higher than or equal to the configured log level for
* their category are output to the log, while other messages are silently
* discarded.
*
* By default log messages are output to std::cerr. They can be redirected to a
* log file by setting the LIBCAMERA_LOG_FILE environment variable to the name
* of the file. The file must be writable and is truncated if it exists. If any
* error occurs when opening the file, the file is ignored and the log is output
* to std::cerr.
*/
/**
* \file logging.h
* \brief Logging management
*
* API to change the logging output destination and log levels programatically.
*/
namespace libcamera {
static int log_severity_to_syslog(LogSeverity severity)
{
switch (severity) {
case LogDebug:
return LOG_DEBUG;
case LogInfo:
return LOG_INFO;
case LogWarning:
return LOG_WARNING;
case LogError:
return LOG_ERR;
case LogFatal:
return LOG_ALERT;
default:
return LOG_NOTICE;
}
}
static const char *log_severity_name(LogSeverity severity)
{
static const char *const names[] = {
"DEBUG",
" INFO",
" WARN",
"ERROR",
"FATAL",
};
if (static_cast<unsigned int>(severity) < std::size(names))
return names[severity];
else
return "UNKWN";
}
/**
* \brief Log output
*
* The LogOutput class models a log output destination
*/
class LogOutput
{
public:
LogOutput(const char *path, bool color);
LogOutput(std::ostream *stream, bool color);
LogOutput();
~LogOutput();
bool isValid() const;
void write(const LogMessage &msg);
void write(const std::string &msg);
private:
void writeSyslog(LogSeverity severity, const std::string &msg);
void writeStream(const std::string &msg);
std::ostream *stream_;
LoggingTarget target_;
bool color_;
};
/**
* \brief Construct a log output based on a file
* \param[in] path Full path to log file
* \param[in] color True to output colored messages
*/
LogOutput::LogOutput(const char *path, bool color)
: target_(LoggingTargetFile), color_(color)
{
stream_ = new std::ofstream(path);
}
/**
* \brief Construct a log output based on a stream
* \param[in] stream Stream to send log output to
* \param[in] color True to output colored messages
*/
LogOutput::LogOutput(std::ostream *stream, bool color)
: stream_(stream), target_(LoggingTargetStream), color_(color)
{
}
/**
* \brief Construct a log output to syslog
*/
LogOutput::LogOutput()
: stream_(nullptr), target_(LoggingTargetSyslog), color_(false)
{
openlog("libcamera", LOG_PID, 0);
}
LogOutput::~LogOutput()
{
switch (target_) {
case LoggingTargetFile:
delete stream_;
break;
case LoggingTargetSyslog:
closelog();
break;
default:
break;
}
}
/**
* \brief Check if the log output is valid
* \return True if the log output is valid
*/
bool LogOutput::isValid() const
{
switch (target_) {
case LoggingTargetFile:
return stream_->good();
case LoggingTargetStream:
return stream_ != nullptr;
default:
return true;
}
}
namespace {
/*
* For more information about ANSI escape codes, see
* https://en.wikipedia.org/wiki/ANSI_escape_code#Colors.
*/
constexpr const char *kColorReset = "\033[0m";
constexpr const char *kColorGreen = "\033[0;32m";
constexpr const char *kColorBrightRed = "\033[1;31m";
constexpr const char *kColorBrightGreen = "\033[1;32m";
constexpr const char *kColorBrightYellow = "\033[1;33m";
constexpr const char *kColorBrightBlue = "\033[1;34m";
constexpr const char *kColorBrightMagenta = "\033[1;35m";
constexpr const char *kColorBrightCyan = "\033[1;36m";
constexpr const char *kColorBrightWhite = "\033[1;37m";
} /* namespace */
/**
* \brief Write message to log output
* \param[in] msg Message to write
*/
void LogOutput::write(const LogMessage &msg)
{
static const char *const severityColors[] = {
kColorBrightCyan,
kColorBrightGreen,
kColorBrightYellow,
kColorBrightRed,
kColorBrightMagenta,
};
const char *categoryColor = color_ ? kColorBrightWhite : "";
const char *fileColor = color_ ? kColorBrightBlue : "";
const char *prefixColor = color_ ? kColorGreen : "";
const char *resetColor = color_ ? kColorReset : "";
const char *severityColor = "";
LogSeverity severity = msg.severity();
std::string str;
if (color_) {
if (static_cast<unsigned int>(severity) < std::size(severityColors))
severityColor = severityColors[severity];
else
severityColor = kColorBrightWhite;
}
switch (target_) {
case LoggingTargetSyslog:
str = std::string(log_severity_name(severity)) + " "
+ msg.category().name() + " " + msg.fileInfo() + " ";
if (!msg.prefix().empty())
str += msg.prefix() + ": ";
str += msg.msg();
writeSyslog(severity, str);
break;
case LoggingTargetStream:
case LoggingTargetFile:
str = "[" + utils::time_point_to_string(msg.timestamp()) + "] ["
+ std::to_string(Thread::currentId()) + "] "
+ severityColor + log_severity_name(severity) + " "
+ categoryColor + msg.category().name() + " "
+ fileColor + msg.fileInfo() + " ";
if (!msg.prefix().empty())
str += prefixColor + msg.prefix() + ": ";
str += resetColor + msg.msg();
writeStream(str);
break;
default:
break;
}
}
/**
* \brief Write string to log output
* \param[in] str String to write
*/
void LogOutput::write(const std::string &str)
{
switch (target_) {
case LoggingTargetSyslog:
writeSyslog(LogDebug, str);
break;
case LoggingTargetStream:
case LoggingTargetFile:
writeStream(str);
break;
default:
break;
}
}
void LogOutput::writeSyslog(LogSeverity severity, const std::string &str)
{
syslog(log_severity_to_syslog(severity), "%s", str.c_str());
}
void LogOutput::writeStream(const std::string &str)
{
stream_->write(str.c_str(), str.size());
stream_->flush();
}
/**
* \brief Message logger
*
* The Logger class handles log configuration.
*/
class Logger
{
public:
~Logger();
static Logger *instance();
void write(const LogMessage &msg);
void backtrace();
int logSetFile(const char *path, bool color);
int logSetStream(std::ostream *stream, bool color);
int logSetTarget(LoggingTarget target);
void logSetLevel(const char *category, const char *level);
private:
Logger();
void parseLogFile();
void parseLogLevels();
static LogSeverity parseLogLevel(const std::string &level);
friend LogCategory;
void registerCategory(LogCategory *category);
LogCategory *findCategory(const char *name) const;
static bool destroyed_;
std::vector<LogCategory *> categories_;
std::list<std::pair<std::string, LogSeverity>> levels_;
std::shared_ptr<LogOutput> output_;
};
bool Logger::destroyed_ = false;
/**
* \enum LoggingTarget
* \brief Log destination type
* \var LoggingTargetNone
* \brief No logging destination
* \sa Logger::logSetTarget
* \var LoggingTargetSyslog
* \brief Log to syslog
* \sa Logger::logSetTarget
* \var LoggingTargetFile
* \brief Log to file
* \sa Logger::logSetFile
* \var LoggingTargetStream
* \brief Log to stream
* \sa Logger::logSetStream
*/
/**
* \brief Direct logging to a file
* \param[in] path Full path to the log file
* \param[in] color True to output colored messages
*
* This function directs the log output to the file identified by \a path. The
* previous log target, if any, is closed, and all new log messages will be
* written to the new log file.
*
* \a color controls whether or not the messages will be colored with standard
* ANSI escape codes. This is done regardless of whether \a path refers to a
* standard file or a TTY, the caller is responsible for disabling coloring when
* not suitable for the log target.
*
* If the function returns an error, the log target is not changed.
*
* \return Zero on success, or a negative error code otherwise
*/
int logSetFile(const char *path, bool color)
{
return Logger::instance()->logSetFile(path, color);
}
/**
* \brief Direct logging to a stream
* \param[in] stream Stream to send log output to
* \param[in] color True to output colored messages
*
* This function directs the log output to \a stream. The previous log target,
* if any, is closed, and all new log messages will be written to the new log
* stream.
*
* \a color controls whether or not the messages will be colored with standard
* ANSI escape codes. This is done regardless of whether \a stream refers to a
* standard file or a TTY, the caller is responsible for disabling coloring when
* not suitable for the log target.
*
* If the function returns an error, the log file is not changed
*
* \return Zero on success, or a negative error code otherwise.
*/
int logSetStream(std::ostream *stream, bool color)
{
return Logger::instance()->logSetStream(stream, color);
}
/**
* \brief Set the logging target
* \param[in] target Logging destination
*
* This function sets the logging output to the target specified by \a target.
* The allowed values of \a target are LoggingTargetNone and
* LoggingTargetSyslog. LoggingTargetNone will send the log output to nowhere,
* and LoggingTargetSyslog will send the log output to syslog. The previous
* log target, if any, is closed, and all new log messages will be written to
* the new log destination.
*
* LoggingTargetFile and LoggingTargetStream are not valid values for \a target.
* Use logSetFile() and logSetStream() instead, respectively.
*
* If the function returns an error, the log file is not changed.
*
* \return Zero on success, or a negative error code otherwise.
*/
int logSetTarget(LoggingTarget target)
{
return Logger::instance()->logSetTarget(target);
}
/**
* \brief Set the log level
* \param[in] category Logging category
* \param[in] level Log level
*
* This function sets the log level of \a category to \a level.
* \a level shall be one of the following strings:
* - "DEBUG"
* - "INFO"
* - "WARN"
* - "ERROR"
* - "FATAL"
*
* "*" is not a valid \a category for this function.
*/
void logSetLevel(const char *category, const char *level)
{
Logger::instance()->logSetLevel(category, level);
}
Logger::~Logger()
{
destroyed_ = true;
for (LogCategory *category : categories_)
delete category;
}
/**
* \brief Retrieve the logger instance
*
* The Logger is a singleton and can't be constructed manually. This function
* shall instead be used to retrieve the single global instance of the logger.
*
* \return The logger instance
*/
Logger *Logger::instance()
{
static Logger instance;
if (destroyed_)
return nullptr;
return &instance;
}
/**
* \brief Write a message to the configured logger output
* \param[in] msg The message object
*/
void Logger::write(const LogMessage &msg)
{
std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
if (!output)
return;
output->write(msg);
}
/**
* \brief Write a backtrace to the log
*/
void Logger::backtrace()
{
std::shared_ptr<LogOutput> output = std::atomic_load(&output_);
if (!output)
return;
/*
* Skip the first two entries that correspond to this function and
* ~LogMessage().
*/
std::string backtrace = Backtrace().toString(2);
if (backtrace.empty()) {
output->write("Backtrace not available\n");
return;
}
output->write("Backtrace:\n");
output->write(backtrace);
}
/**
* \brief Set the log file
* \param[in] path Full path to the log file
* \param[in] color True to output colored messages
*
* \sa libcamera::logSetFile()
*
* \return Zero on success, or a negative error code otherwise.
*/
int Logger::logSetFile(const char *path, bool color)
{
std::shared_ptr<LogOutput> output =
std::make_shared<LogOutput>(path, color);
if (!output->isValid())
return -EINVAL;
std::atomic_store(&output_, output);
return 0;
}
/**
* \brief Set the log stream
* \param[in] stream Stream to send log output to
* \param[in] color True to output colored messages
*
* \sa libcamera::logSetStream()
*
* \return Zero on success, or a negative error code otherwise.
*/
int Logger::logSetStream(std::ostream *stream, bool color)
{
std::shared_ptr<LogOutput> output =
std::make_shared<LogOutput>(stream, color);
std::atomic_store(&output_, output);
return 0;
}
/**
* \brief Set the log target
* \param[in] target Log destination
*
* \sa libcamera::logSetTarget()
*
* \return Zero on success, or a negative error code otherwise.
*/
int Logger::logSetTarget(enum LoggingTarget target)
{
switch (target) {
case LoggingTargetSyslog:
std::atomic_store(&output_, std::make_shared<LogOutput>());
break;
case LoggingTargetNone:
std::atomic_store(&output_, std::shared_ptr<LogOutput>());
break;
default:
return -EINVAL;
}
return 0;
}
/**
* \brief Set the log level
* \param[in] category Logging category
* \param[in] level Log level
*
* \sa libcamera::logSetLevel()
*/
void Logger::logSetLevel(const char *category, const char *level)
{
LogSeverity severity = parseLogLevel(level);
if (severity == LogInvalid)
return;
for (LogCategory *c : categories_) {
if (c->name() == category) {
c->setSeverity(severity);
break;
}
}
}
/**
* \brief Construct a logger
*
* If the environment variable is not set, log to std::cerr. The log messages
* are then colored by default. This can be overridden by setting the
* LIBCAMERA_LOG_NO_COLOR environment variable to disable coloring.
*/
Logger::Logger()
{
bool color = !utils::secure_getenv("LIBCAMERA_LOG_NO_COLOR");
logSetStream(&std::cerr, color);
parseLogFile();
parseLogLevels();
}
/**
* \brief Parse the log output file from the environment
*
* If the LIBCAMERA_LOG_FILE environment variable is set, open the file it
* points to and redirect the logger output to it. If the environment variable
* is set to "syslog", then the logger output will be directed to syslog. Errors
* are silently ignored and don't affect the logger output (set to std::cerr by
* default).
*/
void Logger::parseLogFile()
{
const char *file = utils::secure_getenv("LIBCAMERA_LOG_FILE");
if (!file)
return;
if (!strcmp(file, "syslog")) {
logSetTarget(LoggingTargetSyslog);
return;
}
logSetFile(file, false);
}
/**
* \brief Parse the log levels from the environment
*
* The log levels are stored in the LIBCAMERA_LOG_LEVELS environment variable
* as a list of "category:level" pairs, separated by commas (','). Parse the
* variable and store the levels to configure all log categories.
*/
void Logger::parseLogLevels()
{
const char *debug = utils::secure_getenv("LIBCAMERA_LOG_LEVELS");
if (!debug)
return;
for (const char *pair = debug; *debug != '\0'; pair = debug) {
const char *comma = strchrnul(debug, ',');
size_t len = comma - pair;
/* Skip over the comma. */
debug = *comma == ',' ? comma + 1 : comma;
/* Skip to the next pair if the pair is empty. */
if (!len)
continue;
std::string category;
std::string level;
const char *colon = static_cast<const char *>(memchr(pair, ':', len));
if (!colon) {
/* 'x' is a shortcut for '*:x'. */
category = "*";
level = std::string(pair, len);
} else {
category = std::string(pair, colon - pair);
level = std::string(colon + 1, comma - colon - 1);
}
/* Both the category and the level must be specified. */
if (category.empty() || level.empty())
continue;
LogSeverity severity = parseLogLevel(level);
if (severity == LogInvalid)
continue;
levels_.push_back({ category, severity });
}
}
/**
* \brief Parse a log level string into a LogSeverity
* \param[in] level The log level string
*
* Log levels can be specified as an integer value in the range from LogDebug to
* LogFatal, or as a string corresponding to the severity name in uppercase. Any
* other value is invalid.
*
* \return The log severity, or LogInvalid if the string is invalid
*/
LogSeverity Logger::parseLogLevel(const std::string &level)
{
static const char *const names[] = {
"DEBUG",
"INFO",
"WARN",
"ERROR",
"FATAL",
};
int severity;
if (std::isdigit(level[0])) {
char *endptr;
severity = strtoul(level.c_str(), &endptr, 10);
if (*endptr != '\0' || severity > LogFatal)
severity = LogInvalid;
} else {
severity = LogInvalid;
for (unsigned int i = 0; i < std::size(names); ++i) {
if (names[i] == level) {
severity = i;
break;
}
}
}
return static_cast<LogSeverity>(severity);
}
/**
* \brief Register a log category with the logger
* \param[in] category The log category
*
* Log categories must have unique names. It is invalid to call this function
* if a log category with the same name already exists.
*/
void Logger::registerCategory(LogCategory *category)
{
categories_.push_back(category);
const std::string &name = category->name();
for (const std::pair<std::string, LogSeverity> &level : levels_) {
bool match = true;
for (unsigned int i = 0; i < level.first.size(); ++i) {
if (level.first[i] == '*')
break;
if (i >= name.size() ||
name[i] != level.first[i]) {
match = false;
break;
}
}
if (match) {
category->setSeverity(level.second);
break;
}
}
}
/**
* \brief Find an existing log category with the given name
* \param[in] name Name of the log category
* \return The pointer to the found log category or nullptr if not found
*/
LogCategory *Logger::findCategory(const char *name) const
{
if (auto it = std::find_if(categories_.begin(), categories_.end(),
[name](auto c) { return c->name() == name; });
it != categories_.end()) {
return *it;
}
return nullptr;
}
/**
* \enum LogSeverity
* Log message severity
* \var LogDebug
* Debug message
* \var LogInfo
* Informational message
* \var LogWarning
* Warning message, signals a potential issue
* \var LogError
* Error message, signals an unrecoverable issue
* \var LogFatal
* Fatal message, signals an unrecoverable issue and aborts execution
*/
/**
* \class LogCategory
* \brief A category of log message
*
* The LogCategory class represents a category of log messages, related to an
* area of the library. It groups all messages belonging to the same category,
* and is used to control the log level per group.
*/
/**
* \brief Create a new LogCategory or return an existing one
* \param[in] name Name of the log category
*
* Create and return a new LogCategory with the given name if such a category
* does not yet exist, or return the existing one.
*
* \return The pointer to the LogCategory
*/
LogCategory *LogCategory::create(const char *name)
{
static Mutex mutex_;
MutexLocker locker(mutex_);
LogCategory *category = Logger::instance()->findCategory(name);
if (!category) {
category = new LogCategory(name);
Logger::instance()->registerCategory(category);
}
return category;
}
/**
* \brief Construct a log category
* \param[in] name The category name
*/
LogCategory::LogCategory(const char *name)
: name_(name), severity_(LogSeverity::LogInfo)
{
}
/**
* \fn LogCategory::name()
* \brief Retrieve the log category name
* \return The log category name
*/
/**
* \fn LogCategory::severity()
* \brief Retrieve the severity of the log category
* \sa setSeverity()
* \return Return the severity of the log category
*/
/**
* \brief Set the severity of the log category
*
* Messages of severity higher than or equal to the severity of the log category
* are printed, other messages are discarded.
*/
void LogCategory::setSeverity(LogSeverity severity)
{
severity_ = severity;
}
/**
* \brief Retrieve the default log category
*
* The default log category is named "default" and is used by the LOG() macro
* when no log category is specified.
*
* \return A reference to the default log category
*/
const LogCategory &LogCategory::defaultCategory()
{
static const LogCategory *category = LogCategory::create("default");
return *category;
}
/**
* \class LogMessage
* \brief Internal log message representation.
*
* The LogMessage class models a single message in the log. It serves as a
* helper to provide the std::ostream API for logging, and must never be used
* directly. Use the LOG() macro instead access the log infrastructure.
*/
/**
* \brief Construct a log message for a given category
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
* \param[in] category The log message category, controlling how the message
* will be displayed
* \param[in] severity The log message severity, controlling how the message
* will be displayed
* \param[in] prefix The log message prefix
*
* Create a log message pertaining to line \a line of file \a fileName. The
* \a severity argument sets the message severity to control whether it will be
* output or dropped. The \a prefix optionally identifies the object instance
* logging the message.
*/
LogMessage::LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity,
const std::string &prefix)
: category_(category), severity_(severity), prefix_(prefix)
{
init(fileName, line);
}
/**
* \brief Move-construct a log message
* \param[in] other The other message
*
* The move constructor is meant to support the _log() functions. Thanks to copy
* elision it will likely never be called, but C++11 only permits copy elision,
* it doesn't enforce it unlike C++17. To avoid potential link errors depending
* on the compiler type and version, and optimization level, the move
* constructor is defined even if it will likely never be called, and ensures
* that the destructor of the \a other message will not output anything to the
* log by setting the severity to LogInvalid.
*/
LogMessage::LogMessage(LogMessage &&other)
: msgStream_(std::move(other.msgStream_)), category_(other.category_),
severity_(other.severity_)
{
other.severity_ = LogInvalid;
}
void LogMessage::init(const char *fileName, unsigned int line)
{
/* Log the timestamp, severity and file information. */
timestamp_ = utils::clock::now();
std::ostringstream ossFileInfo;
ossFileInfo << utils::basename(fileName) << ":" << line;
fileInfo_ = ossFileInfo.str();
}
LogMessage::~LogMessage()
{
/* Don't print anything if we have been moved to another LogMessage. */
if (severity_ == LogInvalid)
return;
Logger *logger = Logger::instance();
if (!logger)
return;
msgStream_ << std::endl;
if (severity_ >= category_.severity())
logger->write(*this);
if (severity_ == LogSeverity::LogFatal) {
logger->backtrace();
std::abort();
}
}
/**
* \fn std::ostream& LogMessage::stream()
*
* Data is added to a LogMessage through the stream returned by this function.
* The stream implements the std::ostream API and can be used for logging as
* std::cout.
*
* \return A reference to the log message stream
*/
/**
* \fn LogMessage::timestamp()
* \brief Retrieve the timestamp of the log message
* \return The timestamp of the message
*/
/**
* \fn LogMessage::severity()
* \brief Retrieve the severity of the log message
* \return The severity of the message
*/
/**
* \fn LogMessage::category()
* \brief Retrieve the category of the log message
* \return The category of the message
*/
/**
* \fn LogMessage::fileInfo()
* \brief Retrieve the file info of the log message
* \return The file info of the message
*/
/**
* \fn LogMessage::prefix()
* \brief Retrieve the prefix of the log message
* \return The prefix of the message
*/
/**
* \fn LogMessage::msg()
* \brief Retrieve the message text of the log message
* \return The message text of the message, as a string
*/
/**
* \class Loggable
* \brief Base class to support log message extensions
*
* The Loggable class allows classes to extend log messages without any change
* to the way the LOG() macro is invoked. By inheriting from Loggable and
* implementing the logPrefix() virtual function, a class can specify extra
* information to be automatically added to messages logged from class member
* function.
*/
Loggable::~Loggable()
{
}
/**
* \fn Loggable::logPrefix()
* \brief Retrieve a string to be prefixed to the log message
*
* This function allows classes inheriting from the Loggable class to extend the
* logger with an object-specific prefix output right before the log message
* contents.
*
* \return A string to be prefixed to the log message
*/
/**
* \brief Create a temporary LogMessage object to log a message
* \param[in] category The log message category
* \param[in] severity The log message severity
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
*
* This function is used as a backend by the LOG() macro to create a log message
* for locations inheriting from the Loggable class.
*
* \return A log message
*/
LogMessage Loggable::_log(const LogCategory *category, LogSeverity severity,
const char *fileName, unsigned int line) const
{
return LogMessage(fileName, line,
category ? *category : LogCategory::defaultCategory(),
severity, logPrefix());
}
/**
* \brief Create a temporary LogMessage object to log a message
* \param[in] category The log message category
* \param[in] severity The log message severity
* \param[in] fileName The file name where the message is logged from
* \param[in] line The line number where the message is logged from
*
* This function is used as a backend by the LOG() macro to create a log
* message for locations not inheriting from the Loggable class.
*
* \return A log message
*/
LogMessage _log(const LogCategory *category, LogSeverity severity,
const char *fileName, unsigned int line)
{
return LogMessage(fileName, line,
category ? *category : LogCategory::defaultCategory(),
severity);
}
/**
* \def LOG_DECLARE_CATEGORY(name)
* \hideinitializer
* \brief Declare a category of log messages
*
* This macro is used to declare a log category defined in another compilation
* unit by the LOG_DEFINE_CATEGORY() macro.
*
* The LOG_DECLARE_CATEGORY() macro must be used in the libcamera namespace.
*
* \sa LogCategory
*/
/**
* \def LOG_DEFINE_CATEGORY(name)
* \hideinitializer
* \brief Define a category of log messages
*
* This macro is used to define a log category that can then be used with the
* LOGC() macro. Category names shall be unique, if a category is shared between
* compilation units, it shall be defined in one compilation unit only and
* declared with LOG_DECLARE_CATEGORY() in the other compilation units.
*
* The LOG_DEFINE_CATEGORY() macro must be used in the libcamera namespace.
*
* \sa LogCategory
*/
/**
* \def LOG(category, severity)
* \hideinitializer
* \brief Log a message
* \param[in] category Category (optional)
* \param[in] severity Severity
*
* Return an std::ostream reference to which a message can be logged using the
* iostream API. The \a category, if specified, sets the message category. When
* absent the default category is used. The \a severity controls whether the
* message is printed or discarded, depending on the log level for the category.
*
* If the severity is set to Fatal, execution is aborted and the program
* terminates immediately after printing the message.
*
* \warning Logging from the destructor of a global object, either directly or
* indirectly, results in undefined behaviour.
*
* \todo Allow logging from destructors of global objects to the largest
* possible extent
*/
/**
* \def ASSERT(condition)
* \hideinitializer
* \brief Abort program execution if assertion fails
*
* If \a condition is false, ASSERT() logs an error message with the Fatal log
* level and aborts program execution.
*
* If the macro NDEBUG is defined before including log.h, ASSERT() generates no
* code.
*
* Using conditions that have side effects with ASSERT() is not recommended, as
* these effects would depend on whether NDEBUG is defined or not. Similarly,
* ASSERT() should not be used to check for errors that can occur under normal
* conditions as those checks would then be removed when compiling with NDEBUG.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/utils.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Miscellaneous utility functions
*/
#include <libcamera/base/utils.h>
#include <iomanip>
#include <locale.h>
#include <sstream>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/**
* \file base/utils.h
* \brief Miscellaneous utility functions
*/
namespace libcamera {
namespace utils {
/**
* \brief Strip the directory prefix from the path
* \param[in] path The path to process
*
* basename is implemented differently across different C libraries. This
* implementation matches the one provided by the GNU libc, and does not
* modify its input parameter.
*
* \return A pointer within the given path without any leading directory
* components.
*/
const char *basename(const char *path)
{
const char *base = strrchr(path, '/');
return base ? base + 1 : path;
}
/**
* \brief Get an environment variable
* \param[in] name The name of the variable to return
*
* The environment list is searched to find the variable 'name', and the
* corresponding string is returned.
*
* If 'secure execution' is required then this function always returns NULL to
* avoid vulnerabilities that could occur if set-user-ID or set-group-ID
* programs accidentally trust the environment.
*
* \note Not all platforms may support the features required to implement the
* secure execution check, in which case this function behaves as getenv(). A
* notable example of this is Android.
*
* \return A pointer to the value in the environment or NULL if the requested
* environment variable doesn't exist or if secure execution is required.
*/
char *secure_getenv(const char *name)
{
#if HAVE_SECURE_GETENV
return ::secure_getenv(name);
#else
#if HAVE_ISSETUGID
if (issetugid())
return NULL;
#endif
return getenv(name);
#endif
}
/**
* \brief Identify the dirname portion of a path
* \param[in] path The full path to parse
*
* This function conforms with the behaviour of the %dirname() function as
* defined by POSIX.
*
* \return A string of the directory component of the path
*/
std::string dirname(const std::string &path)
{
if (path.empty())
return ".";
/*
* Skip all trailing slashes. If the path is only made of slashes,
* return "/".
*/
size_t pos = path.size() - 1;
while (path[pos] == '/') {
if (!pos)
return "/";
pos--;
}
/*
* Find the previous slash. If the path contains no non-trailing slash,
* return ".".
*/
while (path[pos] != '/') {
if (!pos)
return ".";
pos--;
}
/*
* Return the directory name up to (but not including) any trailing
* slash. If this would result in an empty string, return "/".
*/
while (path[pos] == '/') {
if (!pos)
return "/";
pos--;
}
return path.substr(0, pos + 1);
}
/**
* \fn std::vector<typename T::key_type> map_keys(const T &map)
* \brief Retrieve the keys of a std::map<>
* \param[in] map The map whose keys to retrieve
* \return A std::vector<> containing the keys of \a map
*/
/**
* \fn libcamera::utils::set_overlap(InputIt1 first1, InputIt1 last1,
* InputIt2 first2, InputIt2 last2)
* \brief Count the number of elements in the intersection of two ranges
*
* Count the number of elements in the intersection of the sorted ranges [\a
* first1, \a last1) and [\a first1, \a last2). Elements are compared using
* operator< and the ranges must be sorted with respect to the same.
*
* \return The number of elements in the intersection of the two ranges
*/
/**
* \typedef clock
* \brief The libcamera clock (monotonic)
*/
/**
* \typedef duration
* \brief The libcamera duration related to libcamera::utils::clock
*/
/**
* \typedef time_point
* \brief The libcamera time point related to libcamera::utils::clock
*/
/**
* \brief Convert a duration to a timespec
* \param[in] value The duration
* \return A timespec expressing the duration
*/
struct timespec duration_to_timespec(const duration &value)
{
uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(value).count();
struct timespec ts;
ts.tv_sec = nsecs / 1000000000ULL;
ts.tv_nsec = nsecs % 1000000000ULL;
return ts;
}
/**
* \brief Convert a time point to a string representation
* \param[in] time The time point
* \return A string representing the time point in hh:mm:ss.nanoseconds format
*/
std::string time_point_to_string(const time_point &time)
{
uint64_t nsecs = std::chrono::duration_cast<std::chrono::nanoseconds>(time.time_since_epoch()).count();
unsigned int secs = nsecs / 1000000000ULL;
std::ostringstream ossTimestamp;
ossTimestamp.fill('0');
ossTimestamp << secs / (60 * 60) << ":"
<< std::setw(2) << (secs / 60) % 60 << ":"
<< std::setw(2) << secs % 60 << "."
<< std::setw(9) << nsecs % 1000000000ULL;
return ossTimestamp.str();
}
std::basic_ostream<char, std::char_traits<char>> &
operator<<(std::basic_ostream<char, std::char_traits<char>> &stream, const _hex &h)
{
stream << "0x";
std::ostream::fmtflags flags = stream.setf(std::ios_base::hex,
std::ios_base::basefield);
std::streamsize width = stream.width(h.w);
char fill = stream.fill('0');
stream << h.v;
stream.flags(flags);
stream.width(width);
stream.fill(fill);
return stream;
}
/**
* \fn hex(T value, unsigned int width)
* \brief Write an hexadecimal value to an output string
* \param value The value
* \param width The width
*
* Return an object of unspecified type such that, if \a os is the name of an
* output stream of type std::ostream, and T is an integer type, then the
* expression
*
* \code{.cpp}
* os << utils::hex(value)
* \endcode
*
* will output the \a value to the stream in hexadecimal form with the base
* prefix and the filling character set to '0'. The field width is set to \a
* width if specified to a non-zero value, or to the native width of type T
* otherwise. The \a os stream configuration is not modified.
*/
/**
* \brief Copy a string with a size limit
* \param[in] dst The destination string
* \param[in] src The source string
* \param[in] size The size of the destination string
*
* This function copies the null-terminated string \a src to \a dst with a limit
* of \a size - 1 characters, and null-terminates the result if \a size is
* larger than 0. If \a src is larger than \a size - 1, \a dst is truncated.
*
* \return The size of \a src
*/
size_t strlcpy(char *dst, const char *src, size_t size)
{
if (size) {
strncpy(dst, src, size);
dst[size - 1] = '\0';
}
return strlen(src);
}
details::StringSplitter::StringSplitter(const std::string &str, const std::string &delim)
: str_(str), delim_(delim)
{
}
details::StringSplitter::iterator::iterator(const details::StringSplitter *ss, std::string::size_type pos)
: ss_(ss), pos_(pos)
{
next_ = ss_->str_.find(ss_->delim_, pos_);
}
details::StringSplitter::iterator &details::StringSplitter::iterator::operator++()
{
pos_ = next_;
if (pos_ != std::string::npos) {
pos_ += ss_->delim_.length();
next_ = ss_->str_.find(ss_->delim_, pos_);
}
return *this;
}
std::string details::StringSplitter::iterator::operator*() const
{
std::string::size_type count;
count = next_ != std::string::npos ? next_ - pos_ : next_;
return ss_->str_.substr(pos_, count);
}
bool details::StringSplitter::iterator::operator!=(const details::StringSplitter::iterator &other) const
{
return pos_ != other.pos_;
}
details::StringSplitter::iterator details::StringSplitter::begin() const
{
return iterator(this, 0);
}
details::StringSplitter::iterator details::StringSplitter::end() const
{
return iterator(this, std::string::npos);
}
/**
* \fn template<typename Container, typename UnaryOp> \
* std::string utils::join(const Container &items, const std::string &sep, UnaryOp op)
* \brief Join elements of a container in a string with a separator
* \param[in] items The container
* \param[in] sep The separator to add between elements
* \param[in] op A function that converts individual elements to strings
*
* This function joins all elements in the \a items container into a string and
* returns it. The \a sep separator is added between elements. If the container
* elements are not implicitly convertible to std::string, the \a op function
* shall be provided to perform conversion of elements to std::string.
*
* \return A string that concatenates all elements in the container
*/
/**
* \fn split(const std::string &str, const std::string &delim)
* \brief Split a string based on a delimiter
* \param[in] str The string to split
* \param[in] delim The delimiter string
*
* This function splits the string \a str into substrings based on the
* delimiter \a delim. It returns an object of unspecified type that can be
* used in a range-based for loop and yields the substrings in sequence.
*
* \return An object that can be used in a range-based for loop to iterate over
* the substrings
*/
details::StringSplitter split(const std::string &str, const std::string &delim)
{
/** \todo Try to avoid copies of str and delim */
return details::StringSplitter(str, delim);
}
/**
* \brief Remove any non-ASCII characters from a string
* \param[in] str The string to strip
*
* Remove all non-ASCII characters from a string.
*
* \return A string equal to \a str stripped out of all non-ASCII characters
*/
std::string toAscii(const std::string &str)
{
std::string ret;
for (const char &c : str)
if (!(c & 0x80))
ret += c;
return ret;
}
/**
* \fn alignDown(unsigned int value, unsigned int alignment)
* \brief Align \a value down to \a alignment
* \param[in] value The value to align
* \param[in] alignment The alignment
* \return The value rounded down to the nearest multiple of \a alignment
*/
/**
* \fn alignUp(unsigned int value, unsigned int alignment)
* \brief Align \a value up to \a alignment
* \param[in] value The value to align
* \param[in] alignment The alignment
* \return The value rounded up to the nearest multiple of \a alignment
*/
/**
* \fn reverse(T &&iterable)
* \brief Wrap an iterable to reverse iteration in a range-based loop
* \param[in] iterable The iterable
* \return A value of unspecified type that, when used in a range-based for
* loop, will cause the loop to iterate over the \a iterable in reverse order
*/
/**
* \fn enumerate(T &iterable)
* \brief Wrap an iterable to enumerate index and value in a range-based loop
* \param[in] iterable The iterable
*
* Range-based for loops are handy and widely preferred in C++, but are limited
* in their ability to replace for loops that require access to a loop counter.
* The enumerate() function solves this problem by wrapping the \a iterable in
* an adapter that, when used as a range-expression, will provide iterators
* whose value_type is a pair of index and value reference.
*
* The iterable must support std::begin() and std::end(). This includes all
* containers provided by the standard C++ library, as well as C-style arrays.
*
* A typical usage pattern would use structured binding to store the index and
* value in two separate variables:
*
* \code{.cpp}
* std::vector<int> values = ...;
*
* for (auto [index, value] : utils::enumerate(values)) {
* ...
* }
* \endcode
*
* Note that the argument to enumerate() has to be an lvalue, as the lifetime
* of any rvalue would not be extended to the whole for loop. The compiler will
* complain if an rvalue is passed to the function, in which case it should be
* stored in a local variable before the loop.
*
* \return A value of unspecified type that, when used in a range-based for
* loop, iterates over an indexed view of the \a iterable
*/
/**
* \class Duration
* \brief Helper class from std::chrono::duration that represents a time
* duration in nanoseconds with double precision
*/
/**
* \fn Duration::Duration(const Rep &r)
* \brief Construct a Duration with \a r ticks
* \param[in] r The number of ticks
*
* The constructed \a Duration object is internally represented in double
* precision with \a r nanoseconds ticks.
*/
/**
* \fn Duration::Duration(const std::chrono::duration<Rep, Period> &d)
* \brief Construct a Duration by converting an arbitrary std::chrono::duration
* \param[in] d The std::chrono::duration object to convert from
*
* The constructed \a Duration object is internally represented in double
* precision with nanoseconds ticks.
*/
/**
* \fn Duration::get<Period>()
* \brief Retrieve the tick count, converted to the timebase provided by the
* template argument Period of type \a std::ratio
*
* A typical usage example is given below:
*
* \code{.cpp}
* utils::Duration d = 5s;
* double d_in_ms = d.get<std::milli>();
* \endcode
*
* \return The tick count of the Duration expressed in \a Period
*/
/**
* \fn Duration::operator bool()
* \brief Boolean operator to test if a \a Duration holds a non-zero time value
*
* \return True if \a Duration is a non-zero time value, False otherwise
*/
/**
* \fn abs_diff(const T& a, const T& b)
* \brief Calculates the absolute value of the difference between two elements
* \param[in] a The first element
* \param[in] b The second element
*
* This function calculates the absolute value of the difference between two
* elements of the same type, in such a way that a negative value will never
* occur during the calculation.
*
* This is inspired by the std::abs_diff() candidate proposed in N4318
* (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4318.pdf).
*
* \return The absolute value of the difference of the two parameters \a a and
* \a b
*/
#if HAVE_LOCALE_T
namespace {
/*
* RAII wrapper around locale_t instances, to support global locale instances
* without leaking memory.
*/
class Locale
{
public:
Locale(const char *locale)
{
locale_ = newlocale(LC_ALL_MASK, locale, static_cast<locale_t>(0));
}
~Locale()
{
freelocale(locale_);
}
locale_t locale() { return locale_; }
private:
locale_t locale_;
};
Locale cLocale("C");
} /* namespace */
#endif /* HAVE_LOCALE_T */
/**
* \brief Convert a string to a double independently of the current locale
* \param[in] nptr The string to convert
* \param[out] endptr Pointer to trailing portion of the string after conversion
*
* This function is a locale-independent version of the std::strtod() function.
* It behaves as the standard function, but uses the "C" locale instead of the
* current locale.
*
* \return The converted value, if any, or 0.0 if the conversion failed.
*/
double strtod(const char *__restrict nptr, char **__restrict endptr)
{
#if HAVE_LOCALE_T
return strtod_l(nptr, endptr, cLocale.locale());
#else
/*
* If the libc implementation doesn't provide locale object support,
* assume that strtod() is locale-independent.
*/
return ::strtod(nptr, endptr);
#endif
}
/**
* \fn to_underlying(Enum e)
* \brief Convert an enumeration to its underlygin type
* \param[in] e Enumeration value to convert
*
* This function is equivalent to the C++23 std::to_underlying().
*
* \return The value of e converted to its underlying type
*/
} /* namespace utils */
#ifndef __DOXYGEN__
template<class CharT, class Traits>
std::basic_ostream<CharT, Traits> &operator<<(std::basic_ostream<CharT, Traits> &os,
const utils::Duration &d)
{
std::basic_ostringstream<CharT, Traits> s;
s.flags(os.flags());
s.imbue(os.getloc());
s.setf(std::ios_base::fixed, std::ios_base::floatfield);
s.precision(2);
s << d.get<std::micro>() << "us";
return os << s.str();
}
template
std::basic_ostream<char, std::char_traits<char>> &
operator<< <char, std::char_traits<char>>(std::basic_ostream<char, std::char_traits<char>> &os,
const utils::Duration &d);
#endif
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/object.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Base object
*/
#include <libcamera/base/object.h>
#include <algorithm>
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/semaphore.h>
#include <libcamera/base/signal.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
/**
* \file base/object.h
* \brief Base object to support automatic signal disconnection
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Object)
/**
* \class Object
* \brief Base object to support automatic signal disconnection
*
* The Object class simplifies signal/slot handling for classes implementing
* slots. By inheriting from Object, an object is automatically disconnected
* from all connected signals when it gets destroyed.
*
* Object instances are bound to the thread of their parent, or the thread in
* which they're created when they have no parent. When a message is posted to
* an object, its handler will run in the object's thread. This allows
* implementing easy message passing between threads by inheriting from the
* Object class.
*
* Deleting an object from a thread other than the one the object is bound to is
* unsafe, unless the caller ensures that the object's thread is stopped and no
* parent or child of the object gets deleted concurrently. See
* Object::~Object() for more information.
*
* Object slots connected to signals will also run in the context of the
* object's thread, regardless of whether the signal is emitted in the same or
* in another thread.
*
* Objects can be connected to multiple signals, but they can only be connected
* to each signal once. Attempting to create multiple concurrent connections
* between the same signal and the same Object (to either the same or differents
* slots of the object) will cause an assertion failure. While it would be
* possible to allow the implementation to let objects connect to the same
* signal multiple times, there are no expected use cases for this in libcamera
* and this behaviour is restricted to favour defensive programming.
*
* \sa Message, Signal, Thread
*/
/**
* \brief Construct an Object instance
* \param[in] parent The object parent
*
* The new Object instance is bound to the thread of its \a parent, or to the
* current thread if the \a parent is nullptr.
*/
Object::Object(Object *parent)
: parent_(parent), pendingMessages_(0)
{
thread_ = parent ? parent->thread() : Thread::current();
if (parent)
parent->children_.push_back(this);
}
/**
* \brief Destroy an Object instance
*
* Deleting an Object automatically disconnects all signals from the Object's
* slots. All the Object's children are made orphan, but stay bound to their
* current thread.
*
* Object instances shall be destroyed from the thread they are bound to,
* otherwise undefined behaviour may occur. If deletion of an Object needs to
* be scheduled from a different thread, deleteLater() shall be used.
*
* As an exception to this rule, Object instances may be deleted from a
* different thread if the thread the instance is bound to is stopped through
* the whole duration of the object's destruction, *and* the parent and children
* of the object do not get deleted concurrently. The caller is responsible for
* fulfilling those requirements.
*
* In all cases Object instances shall be deleted before the Thread they are
* bound to.
*/
Object::~Object()
{
ASSERT(Thread::current() == thread_ || !thread_->isRunning());
/*
* Move signals to a private list to avoid concurrent iteration and
* deletion of items from Signal::disconnect().
*/
std::list<SignalBase *> signals(std::move(signals_));
for (SignalBase *signal : signals)
signal->disconnect(this);
if (pendingMessages_)
thread()->removeMessages(this);
if (parent_) {
auto it = std::find(parent_->children_.begin(),
parent_->children_.end(), this);
ASSERT(it != parent_->children_.end());
parent_->children_.erase(it);
}
for (auto child : children_)
child->parent_ = nullptr;
}
/**
* \brief Schedule deletion of the instance in the thread it belongs to
*
* This function schedules deletion of the Object when control returns to the
* event loop that the object belongs to. This ensures the object is destroyed
* from the right context, as required by the libcamera threading model.
*
* If this function is called before the thread's event loop is started or after
* it has stopped, the object will be deleted when the event loop (re)starts. If
* this never occurs, the object will be leaked.
*
* Deferred deletion can be used to control the destruction context with shared
* pointers. An object managed with shared pointers is deleted when the last
* reference is destroyed, which makes difficult to ensure through software
* design which context the deletion will take place in. With a custom deleter
* for the shared pointer using deleteLater(), the deletion can be guaranteed to
* happen in the thread the object is bound to.
*
* \code{.cpp}
* std::shared_ptr<MyObject> createObject()
* {
* struct Deleter : std::default_delete<MyObject> {
* void operator()(MyObject *obj)
* {
* obj->deleteLater();
* }
* };
*
* MyObject *obj = new MyObject();
*
* return std::shared_ptr<MyObject>(obj, Deleter());
* }
* \endcode
*
* \context This function is \threadsafe.
*/
void Object::deleteLater()
{
postMessage(std::make_unique<Message>(Message::DeferredDelete));
}
/**
* \brief Post a message to the object's thread
* \param[in] msg The message
*
* This function posts the message \a msg to the message queue of the object's
* thread, to be delivered to the object through the message() function in the
* context of its thread. Message ownership is passed to the thread, and the
* message will be deleted after being delivered.
*
* Messages are delivered through the thread's event loop. If the thread is not
* running its event loop the message will not be delivered until the event
* loop gets started.
*
* Due to their asynchronous nature, threads do not provide any guarantee that
* all posted messages are delivered before the thread is stopped. See
* \ref thread-stop for additional information.
*
* \context This function is \threadsafe.
*/
void Object::postMessage(std::unique_ptr<Message> msg)
{
thread()->postMessage(std::move(msg), this);
}
/**
* \brief Message handler for the object
* \param[in] msg The message
*
* This virtual function receives messages for the object. It is called in the
* context of the object's thread, and can be overridden to process custom
* messages. The parent Object::message() function shall be called for any
* message not handled by the override function.
*
* The message \a msg is valid only for the duration of the call, no reference
* to it shall be kept after this function returns.
*/
void Object::message(Message *msg)
{
switch (msg->type()) {
case Message::InvokeMessage: {
/*
* A static_cast should be enough, but gcc 10 and 11 choke on
* it in release mode (with -O2 or -O3).
*/
InvokeMessage *iMsg = dynamic_cast<InvokeMessage *>(msg);
Semaphore *semaphore = iMsg->semaphore();
iMsg->invoke();
if (semaphore)
semaphore->release();
break;
}
case Message::DeferredDelete:
delete this;
break;
default:
break;
}
}
/**
* \fn Object::assertThreadBound()
* \brief Check if the caller complies with thread-bound constraints
* \param[in] message The message to be printed on error
*
* This function verifies the calling constraints required by the \threadbound
* definition. It shall be called at the beginning of member functions of an
* Object subclass that are explicitly marked as thread-bound in their
* documentation.
*
* If the thread-bound constraints are not met, the function prints \a message
* as an error message. For debug builds, it additionally causes an assertion
* error.
*
* \todo Verify the thread-bound requirements for functions marked as
* thread-bound at the class level.
*
* \return True if the call is thread-bound compliant, false otherwise
*/
bool Object::assertThreadBound(const char *message)
{
if (Thread::current() == thread_)
return true;
LOG(Object, Error) << message;
ASSERT(false);
return false;
}
/**
* \fn R Object::invokeMethod()
* \brief Invoke a method asynchronously on an Object instance
* \param[in] func The object method to invoke
* \param[in] type Connection type for method invocation
* \param[in] args The method arguments
*
* This function invokes the member method \a func with arguments \a args, based
* on the connection \a type. Depending on the type, the method will be called
* synchronously in the same thread or asynchronously in the object's thread.
*
* Arguments \a args passed by value or reference are copied, while pointers
* are passed untouched. The caller shall ensure that any pointer argument
* remains valid until the method is invoked.
*
* Due to the asynchronous nature of threads, functions invoked asynchronously
* with the ConnectionTypeQueued type are not guaranteed to be called before
* the thread is stopped. See \ref thread-stop for additional information.
*
* \context This function is \threadsafe.
*
* \return For connection types ConnectionTypeDirect and
* ConnectionTypeBlocking, return the return value of the invoked method. For
* connection type ConnectionTypeQueued, return a default-constructed R value.
*/
/**
* \fn Object::thread()
* \brief Retrieve the thread the object is bound to
* \context This function is \threadsafe.
* \return The thread the object is bound to
*/
/**
* \brief Move the object and all its children to a different thread
* \param[in] thread The target thread
*
* This function moves the object and all its children from the current thread
* to the new \a thread.
*
* Before the object is moved, a Message::ThreadMoveMessage message is sent to
* it. The message() function can be reimplement in derived classes to be
* notified of the upcoming thread move and perform any required processing.
*
* Moving an object that has a parent is not allowed, and causes undefined
* behaviour.
*
* \context This function is \threadbound.
*/
void Object::moveToThread(Thread *thread)
{
if (!assertThreadBound("Object can't be moved from another thread"))
return;
if (thread_ == thread)
return;
if (parent_) {
LOG(Object, Error)
<< "Moving object to thread with a parent is not permitted";
return;
}
notifyThreadMove();
thread->moveObject(this);
}
void Object::notifyThreadMove()
{
Message msg(Message::ThreadMoveMessage);
message(&msg);
for (auto child : children_)
child->notifyThreadMove();
}
/**
* \fn Object::parent()
* \brief Retrieve the object's parent
* \return The object's parent
*/
void Object::connect(SignalBase *signal)
{
/*
* Connecting the same signal to an object multiple times is not
* supported.
*/
ASSERT(std::find(signals_.begin(), signals_.end(), signal) == signals_.end());
signals_.push_back(signal);
}
void Object::disconnect(SignalBase *signal)
{
for (auto iter = signals_.begin(); iter != signals_.end(); ) {
if (*iter == signal)
iter = signals_.erase(iter);
else
iter++;
}
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/signal.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Signal & slot implementation
*/
#include <libcamera/base/signal.h>
#include <libcamera/base/mutex.h>
#include <libcamera/base/object.h>
/**
* \file base/signal.h
* \brief Signal & slot implementation
*/
namespace libcamera {
namespace {
/*
* Mutex to protect the SignalBase::slots_ and Object::signals_ lists. If lock
* contention needs to be decreased, this could be replaced with locks in
* Object and SignalBase, or with a mutex pool.
*/
Mutex signalsLock;
} /* namespace */
void SignalBase::connect(BoundMethodBase *slot)
{
MutexLocker locker(signalsLock);
Object *object = slot->object();
if (object)
object->connect(this);
slots_.push_back(slot);
}
void SignalBase::disconnect(Object *object)
{
disconnect([object](SlotList::iterator &iter) {
return (*iter)->match(object);
});
}
void SignalBase::disconnect(std::function<bool(SlotList::iterator &)> match)
{
MutexLocker locker(signalsLock);
for (auto iter = slots_.begin(); iter != slots_.end(); ) {
if (match(iter)) {
Object *object = (*iter)->object();
if (object)
object->disconnect(this);
delete *iter;
iter = slots_.erase(iter);
} else {
++iter;
}
}
}
SignalBase::SlotList SignalBase::slots()
{
MutexLocker locker(signalsLock);
return slots_;
}
/**
* \class Signal
* \brief Generic signal and slot communication mechanism
*
* Signals and slots are a language construct aimed at communication between
* objects through the observer pattern without the need for boilerplate code.
* See http://doc.qt.io/qt-6/signalsandslots.html for more information.
*
* Signals model events that can be observed from objects unrelated to the event
* source. Slots are functions that are called in response to a signal. Signals
* can be connected to and disconnected from slots dynamically at runtime. When
* a signal is emitted, all connected slots are called sequentially in the order
* they have been connected.
*
* Signals are defined with zero, one or more typed parameters. They are emitted
* with a value for each of the parameters, and those values are passed to the
* connected slots.
*
* Slots are normal static or class member functions. In order to be connected
* to a signal, their signature must match the signal type (taking the same
* arguments as the signal and returning void).
*
* Connecting a signal to a slot results in the slot being called with the
* arguments passed to the emit() function when the signal is emitted. Multiple
* slots can be connected to the same signal, and multiple signals can connected
* to the same slot.
*
* When a slot belongs to an instance of the Object class, the slot is called
* in the context of the thread that the object is bound to. If the signal is
* emitted from the same thread, the slot will be called synchronously, before
* Signal::emit() returns. If the signal is emitted from a different thread,
* the slot will be called asynchronously from the object's thread's event
* loop, after the Signal::emit() function returns, with a copy of the signal's
* arguments. The emitter shall thus ensure that any pointer or reference
* passed through the signal will remain valid after the signal is emitted.
*
* Duplicate connections between a signal and a slot are not expected and use of
* the Object class to manage signals will enforce this restriction.
*/
/**
* \fn Signal::connect(T *object, R (T::*func)(Args...))
* \brief Connect the signal to a member function slot
* \param[in] object The slot object pointer
* \param[in] func The slot member function
*
* If the typename T inherits from Object, the signal will be automatically
* disconnected from the \a func slot of \a object when \a object is destroyed.
* Otherwise the caller shall disconnect signals manually before destroying \a
* object.
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::connect(T *object, Func func)
* \brief Connect the signal to a function object slot
* \param[in] object The slot object pointer
* \param[in] func The function object
*
* If the typename T inherits from Object, the signal will be automatically
* disconnected from the \a func slot of \a object when \a object is destroyed.
* Otherwise the caller shall disconnect signals manually before destroying \a
* object.
*
* The function object is typically a lambda function, but may be any object
* that satisfies the FunctionObject named requirements. The types of the
* function object arguments shall match the types of the signal arguments.
*
* No matching disconnect() function exist, as it wouldn't be possible to pass
* to a disconnect() function the same lambda that was passed to connect(). The
* connection created by this function can not be removed selectively if the
* signal is connected to multiple slots of the same receiver, but may be
* otherwise be removed using the disconnect(T *object) function.
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::connect(R (*func)(Args...))
* \brief Connect the signal to a static function slot
* \param[in] func The slot static function
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::disconnect()
* \brief Disconnect the signal from all slots
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::disconnect(T *object)
* \brief Disconnect the signal from all slots of the \a object
* \param[in] object The object pointer whose slots to disconnect
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::disconnect(T *object, R (T::*func)(Args...))
* \brief Disconnect the signal from the \a object slot member function \a func
* \param[in] object The object pointer whose slots to disconnect
* \param[in] func The slot member function to disconnect
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::disconnect(R (*func)(Args...))
* \brief Disconnect the signal from the slot static function \a func
* \param[in] func The slot static function to disconnect
*
* \context This function is \threadsafe.
*/
/**
* \fn Signal::emit(Args... args)
* \brief Emit the signal and call all connected slots
* \param args The arguments passed to the connected slots
*
* Emitting a signal calls all connected slots synchronously and sequentially in
* the order the slots have been connected. The arguments passed to the emit()
* function are passed to the slot functions unchanged. If a slot modifies one
* of the arguments (when passed by pointer or reference), the modification is
* thus visible to all subsequently called slots.
*
* This function is not \threadsafe, but thread-safety is guaranteed against
* concurrent connect() and disconnect() calls.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/file.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* File I/O operations
*/
#include <libcamera/base/file.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <libcamera/base/log.h>
#include <libcamera/base/shared_fd.h>
/**
* \file base/file.h
* \brief File I/O operations
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(File)
/**
* \class File
* \brief Interface for I/O operations on files
*
* The File class provides an interface to perform I/O operations on files. It
* wraps opening, closing and mapping files in memory, and handles the cleaning
* of allocated resources.
*
* File instances are usually constructed with a file name, but the name can be
* set later through the setFileName() function. Instances are not automatically
* opened when constructed, and shall be opened explictly with open().
*
* Files can be mapped to the process memory with map(). Mapped regions can be
* unmapped manually with munmap(), and are automatically unmapped when the File
* is destroyed or when it is used to reference another file with setFileName().
*/
/**
* \enum File::MapFlag
* \brief Flags for the File::map() function
* \var File::MapFlag::NoOption
* \brief No option (used as default value)
* \var File::MapFlag::Private
* \brief The memory region is mapped as private, changes are not reflected in
* the file constents
*/
/**
* \typedef File::MapFlags
* \brief A bitwise combination of File::MapFlag values
*/
/**
* \enum File::OpenModeFlag
* \brief Mode in which a file is opened
* \var File::OpenModeFlag::NotOpen
* \brief The file is not open
* \var File::OpenModeFlag::ReadOnly
* \brief The file is open for reading
* \var File::OpenModeFlag::WriteOnly
* \brief The file is open for writing
* \var File::OpenModeFlag::ReadWrite
* \brief The file is open for reading and writing
*/
/**
* \typedef File::OpenMode
* \brief A bitwise combination of File::OpenModeFlag values
*/
/**
* \brief Construct a File to represent the file \a name
* \param[in] name The file name
*
* Upon construction the File object is closed and shall be opened with open()
* before performing I/O operations.
*/
File::File(const std::string &name)
: name_(name), mode_(OpenModeFlag::NotOpen), error_(0)
{
}
/**
* \brief Construct a File without an associated name
*
* Before being used for any purpose, the file name shall be set with
* setFileName().
*/
File::File()
: mode_(OpenModeFlag::NotOpen), error_(0)
{
}
/**
* \brief Destroy a File instance
*
* Any memory mapping associated with the File is unmapped, and the File is
* closed if it is open.
*/
File::~File()
{
unmapAll();
close();
}
/**
* \fn const std::string &File::fileName() const
* \brief Retrieve the file name
* \return The file name
*/
/**
* \brief Set the name of the file
* \param[in] name The name of the file
*
* The \a name can contain an absolute path, a relative path or no path at all.
* Calling this function on an open file results in undefined behaviour.
*
* Any memory mapping associated with the File is unmapped.
*/
void File::setFileName(const std::string &name)
{
if (isOpen()) {
LOG(File, Error)
<< "Can't set file name on already open file " << name_;
return;
}
unmapAll();
name_ = name;
}
/**
* \brief Check if the file specified by fileName() exists
*
* This function checks if the file specified by fileName() exists. The File
* instance doesn't need to be open to check for file existence, and this
* function may return false even if the file is open, if it was deleted from
* the file system.
*
* \return True if the the file exists, false otherwise
*/
bool File::exists() const
{
return exists(name_);
}
/**
* \brief Open the file in the given mode
* \param[in] mode The open mode
*
* This function opens the file specified by fileName() in \a mode. If the file
* doesn't exist and the mode is WriteOnly or ReadWrite, this function will
* attempt to create the file with initial permissions set to 0666 (modified by
* the process' umask).
*
* The file is opened with the O_CLOEXEC flag, and will be closed automatically
* when a new binary is executed with one of the exec(3) functions.
*
* The error() status is updated.
*
* \return True on success, false otherwise
*/
bool File::open(File::OpenMode mode)
{
if (isOpen()) {
LOG(File, Error) << "File " << name_ << " is already open";
return false;
}
int flags = static_cast<OpenMode::Type>(mode & OpenModeFlag::ReadWrite) - 1;
if (mode & OpenModeFlag::WriteOnly)
flags |= O_CREAT;
fd_ = UniqueFD(::open(name_.c_str(), flags | O_CLOEXEC, 0666));
if (!fd_.isValid()) {
error_ = -errno;
return false;
}
mode_ = mode;
error_ = 0;
return true;
}
/**
* \fn bool File::isOpen() const
* \brief Check if the file is open
* \return True if the file is open, false otherwise
*/
/**
* \fn OpenMode File::openMode() const
* \brief Retrieve the file open mode
* \return The file open mode
*/
/**
* \brief Close the file
*
* This function closes the File. If the File is not open, it performs no
* operation. Memory mappings created with map() are not destroyed when the
* file is closed.
*/
void File::close()
{
if (!fd_.isValid())
return;
fd_.reset();
mode_ = OpenModeFlag::NotOpen;
}
/**
* \fn int File::error() const
* \brief Retrieve the file error status
*
* This function retrieves the error status from the last file open or I/O
* operation. The error status is a negative number as defined by errno.h. If
* no error occurred, this function returns 0.
*
* \return The file error status
*/
/**
* \brief Retrieve the file size
*
* This function retrieves the size of the file on the filesystem. The File
* instance shall be open to retrieve its size. The error() status is not
* modified, error codes are returned directly on failure.
*
* \return The file size in bytes on success, or a negative error code otherwise
*/
ssize_t File::size() const
{
if (!isOpen())
return -EINVAL;
struct stat st;
int ret = fstat(fd_.get(), &st);
if (ret < 0)
return -errno;
return st.st_size;
}
/**
* \brief Return current read or write position
*
* If the file is closed, this function returns 0.
*
* \return The current read or write position
*/
off_t File::pos() const
{
if (!isOpen())
return 0;
return lseek(fd_.get(), 0, SEEK_CUR);
}
/**
* \brief Set the read or write position
* \param[in] pos The desired position
* \return The resulting offset from the beginning of the file on success, or a
* negative error code otherwise
*/
off_t File::seek(off_t pos)
{
if (!isOpen())
return -EINVAL;
off_t ret = lseek(fd_.get(), pos, SEEK_SET);
if (ret < 0)
return -errno;
return ret;
}
/**
* \brief Read data from the file
* \param[in] data Memory to read data into
*
* Read at most \a data.size() bytes from the file into \a data.data(), and
* return the number of bytes read. If less data than requested is available,
* the returned byte count may be smaller than the requested size. If no more
* data is available, 0 is returned.
*
* The position of the file as returned by pos() is advanced by the number of
* bytes read. If an error occurs, the position isn't modified.
*
* \return The number of bytes read on success, or a negative error code
* otherwise
*/
ssize_t File::read(const Span<uint8_t> &data)
{
if (!isOpen())
return -EINVAL;
size_t readBytes = 0;
ssize_t ret = 0;
/* Retry in case of interrupted system calls. */
while (readBytes < data.size()) {
ret = ::read(fd_.get(), data.data() + readBytes,
data.size() - readBytes);
if (ret <= 0)
break;
readBytes += ret;
}
if (ret < 0 && !readBytes)
return -errno;
return readBytes;
}
/**
* \brief Write data to the file
* \param[in] data Memory containing data to be written
*
* Write at most \a data.size() bytes from \a data.data() to the file, and
* return the number of bytes written. If the file system doesn't have enough
* space for the data, the returned byte count may be less than requested.
*
* The position of the file as returned by pos() is advanced by the number of
* bytes written. If an error occurs, the position isn't modified.
*
* \return The number of bytes written on success, or a negative error code
* otherwise
*/
ssize_t File::write(const Span<const uint8_t> &data)
{
if (!isOpen())
return -EINVAL;
size_t writtenBytes = 0;
/* Retry in case of interrupted system calls. */
while (writtenBytes < data.size()) {
ssize_t ret = ::write(fd_.get(), data.data() + writtenBytes,
data.size() - writtenBytes);
if (ret <= 0)
break;
writtenBytes += ret;
}
if (data.size() && !writtenBytes)
return -errno;
return writtenBytes;
}
/**
* \brief Map a region of the file in the process memory
* \param[in] offset The region offset within the file
* \param[in] size The region sise
* \param[in] flags The mapping flags
*
* This function maps a region of \a size bytes of the file starting at \a
* offset into the process memory. The File instance shall be open, but may be
* closed after mapping the region. Mappings stay valid when the File is
* closed, and are destroyed automatically when the File is deleted.
*
* If \a size is a negative value, this function maps the region starting at \a
* offset until the end of the file.
*
* The mapping memory protection is controlled by the file open mode, unless \a
* flags contains MapFlag::Private in which case the region is mapped in
* read/write mode.
*
* The error() status is updated.
*
* \return The mapped memory on success, or an empty span otherwise
*/
Span<uint8_t> File::map(off_t offset, ssize_t size, File::MapFlags flags)
{
if (!isOpen()) {
error_ = -EBADF;
return {};
}
if (size < 0) {
size = File::size();
if (size < 0) {
error_ = size;
return {};
}
size -= offset;
}
int mmapFlags = flags & MapFlag::Private ? MAP_PRIVATE : MAP_SHARED;
int prot = 0;
if (mode_ & OpenModeFlag::ReadOnly)
prot |= PROT_READ;
if (mode_ & OpenModeFlag::WriteOnly)
prot |= PROT_WRITE;
if (flags & MapFlag::Private)
prot |= PROT_WRITE;
void *map = mmap(NULL, size, prot, mmapFlags, fd_.get(), offset);
if (map == MAP_FAILED) {
error_ = -errno;
return {};
}
maps_.emplace(map, size);
error_ = 0;
return { static_cast<uint8_t *>(map), static_cast<size_t>(size) };
}
/**
* \brief Unmap a region mapped with map()
* \param[in] addr The region address
*
* The error() status is updated.
*
* \return True on success, or false if an error occurs
*/
bool File::unmap(uint8_t *addr)
{
auto iter = maps_.find(static_cast<void *>(addr));
if (iter == maps_.end()) {
error_ = -ENOENT;
return false;
}
int ret = munmap(addr, iter->second);
if (ret < 0) {
error_ = -errno;
return false;
}
maps_.erase(iter);
error_ = 0;
return true;
}
void File::unmapAll()
{
for (const auto &map : maps_)
munmap(map.first, map.second);
maps_.clear();
}
/**
* \brief Check if the file specified by \a name exists
* \param[in] name The file name
* \return True if the file exists, false otherwise
*/
bool File::exists(const std::string &name)
{
struct stat st;
int ret = stat(name.c_str(), &st);
if (ret < 0)
return false;
/* Directories can not be handled here, even if they exist. */
return !S_ISDIR(st.st_mode);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/unique_fd.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* File descriptor wrapper that owns a file descriptor
*/
#include <libcamera/base/unique_fd.h>
#include <unistd.h>
#include <utility>
#include <libcamera/base/log.h>
/**
* \file base/unique_fd.h
* \brief File descriptor wrapper that owns a file descriptor
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(UniqueFD)
/**
* \class UniqueFD
* \brief unique_ptr-like wrapper for a file descriptor
*
* The UniqueFD is a wrapper that owns and manages the lifetime of a file
* descriptor. It is constructed from a numerical file descriptor, and takes
* over its ownership. The file descriptor is closed when the UniqueFD is
* destroyed, or when it is assigned another file descriptor with operator=()
* or reset().
*/
/**
* \fn UniqueFD::UniqueFD()
* \brief Construct a UniqueFD that owns no file descriptor
*/
/**
* \fn UniqueFD::UniqueFD(int fd)
* \brief Construct a UniqueFD that owns \a fd
* \param[in] fd A file descriptor to manage
*/
/**
* \fn UniqueFD::UniqueFD(UniqueFD &&other)
* \brief Move constructor, create a UniqueFD by taking over \a other
* \param[in] other The other UniqueFD
*
* Create a UniqueFD by transferring ownership of the file descriptor owned by
* \a other. Upon return, the \a other UniqueFD is invalid.
*/
/**
* \fn UniqueFD::~UniqueFD()
* \brief Destroy the UniqueFD instance
*
* If a file descriptor is owned, it is closed.
*/
/**
* \fn UniqueFD::operator=(UniqueFD &&other)
* \brief Move assignment operator, replace a UniqueFD by taking over \a other
* \param[in] other The other UniqueFD
*
* If this UniqueFD owns a file descriptor, the file descriptor is closed
* first. The file descriptor is then replaced by the one of \a other. Upon
* return, \a other is invalid.
*
* \return A reference to this UniqueFD
*/
/**
* \fn UniqueFD::release()
* \brief Release ownership of the file descriptor without closing it
*
* This function releases and returns the owned file descriptor without closing
* it. The caller owns the returned value and must take care of handling its
* life time to avoid file descriptor leakages. Upon return this UniqueFD is
* invalid.
*
* \return The managed file descriptor, or -1 if no file descriptor was owned
*/
/**
* \brief Replace the managed file descriptor
* \param[in] fd The new file descriptor to manage
*
* Close the managed file descriptor, if any, and replace it with the new \a fd.
*
* Self-resetting (passing an \a fd already managed by this instance) is invalid
* and results in undefined behaviour.
*/
void UniqueFD::reset(int fd)
{
ASSERT(!isValid() || fd != fd_);
std::swap(fd, fd_);
if (fd >= 0)
close(fd);
}
/**
* \fn UniqueFD::swap(UniqueFD &other)
* \brief Swap the managed file descriptors with another UniqueFD
* \param[in] other Another UniqueFD to swap the file descriptor with
*/
/**
* \fn UniqueFD::get()
* \brief Retrieve the managed file descriptor
* \return The managed file descriptor, or -1 if no file descriptor is owned
*/
/**
* \fn UniqueFD::isValid()
* \brief Check if the UniqueFD owns a valid file descriptor
* \return True if the UniqueFD owns a valid file descriptor, false otherwise
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/backtrace.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Ideas on Board Oy
*
* Call stack backtraces
*/
#include <libcamera/base/backtrace.h>
#if HAVE_BACKTRACE
#include <execinfo.h>
#include <stdlib.h>
#endif
#ifdef HAVE_DW
#include <elfutils/libdwfl.h>
#include <unistd.h>
#endif
#if HAVE_UNWIND
/*
* Disable support for remote unwinding to enable a more optimized
* implementation.
*/
#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#include <cxxabi.h>
#include <sstream>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h>
/**
* \file backtrace.h
* \brief Generate call stack backtraces
*/
namespace libcamera {
namespace {
#if HAVE_DW
class DwflParser
{
public:
DwflParser();
~DwflParser();
bool isValid() const { return valid_; }
std::string stackEntry(const void *ip);
private:
Dwfl_Callbacks callbacks_;
Dwfl *dwfl_;
bool valid_;
};
DwflParser::DwflParser()
: callbacks_({}), dwfl_(nullptr), valid_(false)
{
callbacks_.find_elf = dwfl_linux_proc_find_elf;
callbacks_.find_debuginfo = dwfl_standard_find_debuginfo;
dwfl_ = dwfl_begin(&callbacks_);
if (!dwfl_)
return;
int ret = dwfl_linux_proc_report(dwfl_, getpid());
if (ret)
return;
ret = dwfl_report_end(dwfl_, nullptr, nullptr);
if (ret)
return;
valid_ = true;
}
DwflParser::~DwflParser()
{
if (dwfl_)
dwfl_end(dwfl_);
}
std::string DwflParser::stackEntry(const void *ip)
{
Dwarf_Addr addr = reinterpret_cast<Dwarf_Addr>(ip);
Dwfl_Module *module = dwfl_addrmodule(dwfl_, addr);
if (!module)
return std::string();
std::ostringstream entry;
GElf_Off offset;
GElf_Sym sym;
const char *symbol = dwfl_module_addrinfo(module, addr, &offset, &sym,
nullptr, nullptr, nullptr);
if (symbol) {
char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
entry << (name ? name : symbol) << "+0x" << std::hex << offset
<< std::dec;
free(name);
} else {
entry << "??? [" << utils::hex(addr) << "]";
}
entry << " (";
Dwfl_Line *line = dwfl_module_getsrc(module, addr);
if (line) {
const char *filename;
int lineNumber = 0;
filename = dwfl_lineinfo(line, &addr, &lineNumber, nullptr,
nullptr, nullptr);
entry << (filename ? filename : "???") << ":" << lineNumber;
} else {
const char *filename = nullptr;
dwfl_module_info(module, nullptr, nullptr, nullptr, nullptr,
nullptr, &filename, nullptr);
entry << (filename ? filename : "???") << " [" << utils::hex(addr) << "]";
}
entry << ")";
return entry.str();
}
#endif /* HAVE_DW */
} /* namespace */
/**
* \class Backtrace
* \brief Representation of a call stack backtrace
*
* The Backtrace class represents a function call stack. Constructing an
* instance captures the call stack at the point the instance is constructed.
* The instance can later be used to access the call stack and to generate a
* human-readable representation with the toString() function.
*
* Depending on the platform, different backends can be used to generate the
* backtrace. The Backtrace class provides a best effort to capture accurate
* backtraces, but doesn't offer any guarantee of a particular backtrace format.
*/
/**
* \brief Construct a backtrace
*
* The backtrace captures the call stack at the point where it is constructed.
* It can later be converted to a string with toString().
*/
Backtrace::Backtrace()
{
/* Try libunwind first and fall back to backtrace() if it fails. */
if (unwindTrace())
return;
backtraceTrace();
}
/*
* Avoid inlining to make sure that the Backtrace constructor adds exactly two
* calls to the stack, which are later skipped in toString().
*/
__attribute__((__noinline__))
bool Backtrace::backtraceTrace()
{
#if HAVE_BACKTRACE
backtrace_.resize(32);
int num_entries = backtrace(backtrace_.data(), backtrace_.size());
if (num_entries < 0) {
backtrace_.clear();
return false;
}
backtrace_.resize(num_entries);
return true;
#else
return false;
#endif
}
__attribute__((__noinline__))
bool Backtrace::unwindTrace()
{
#if HAVE_UNWIND
/*
* unw_getcontext() for ARM32 is an inline assembly function using the stmia
* instruction to store SP and PC. This is considered by clang-11 as deprecated,
* and generates a warning.
*/
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winline-asm"
#endif
unw_context_t uc;
int ret = unw_getcontext(&uc);
if (ret)
return false;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
unw_cursor_t cursor;
ret = unw_init_local(&cursor, &uc);
if (ret)
return false;
do {
#if HAVE_BACKTRACE || HAVE_DW
/*
* If backtrace() or libdw is available, they will be used in
* toString() to provide symbol information for the stack
* frames using the IP register value.
*/
unw_word_t ip;
ret = unw_get_reg(&cursor, UNW_REG_IP, &ip);
if (ret) {
backtrace_.push_back(nullptr);
continue;
}
backtrace_.push_back(reinterpret_cast<void *>(ip));
#else
/*
* Otherwise, use libunwind to get the symbol information. As
* the libunwind API uses cursors, we can't store the IP values
* and delay symbol lookup to toString().
*/
char symbol[256];
unw_word_t offset = 0;
ret = unw_get_proc_name(&cursor, symbol, sizeof(symbol), &offset);
if (ret) {
backtraceText_.emplace_back("???\n");
continue;
}
std::ostringstream entry;
char *name = abi::__cxa_demangle(symbol, nullptr, nullptr, nullptr);
entry << (name ? name : symbol);
free(name);
entry << "+0x" << std::hex << offset << "\n";
backtraceText_.emplace_back(entry.str());
#endif
} while (unw_step(&cursor) > 0);
return true;
#else
return false;
#endif
}
/**
* \brief Convert a backtrace to a string representation
* \param[in] skipLevels Number of initial levels to skip in the backtrace
*
* The string representation of the backtrace is a multi-line string, with one
* line per call stack entry. The format of the entries isn't specified and is
* platform-dependent.
*
* The \a skipLevels parameter indicates how many initial entries to skip from
* the backtrace. This can be used to hide functions that wrap the construction
* of the Backtrace instance from the call stack. The Backtrace constructor
* itself is automatically skipped and never shown in the backtrace.
*
* If backtrace generation fails for any reason (usually because the platform
* doesn't support this feature), an empty string is returned.
*
* \return A string representation of the backtrace, or an empty string if
* backtrace generation isn't possible
*/
std::string Backtrace::toString(unsigned int skipLevels) const
{
/*
* Skip the first two entries, corresponding to the Backtrace
* construction.
*/
skipLevels += 2;
if (backtrace_.size() <= skipLevels &&
backtraceText_.size() <= skipLevels)
return std::string();
if (!backtraceText_.empty()) {
Span<const std::string> trace{ backtraceText_ };
return utils::join(trace.subspan(skipLevels), "");
}
#if HAVE_DW
DwflParser dwfl;
if (dwfl.isValid()) {
std::ostringstream msg;
Span<void *const> trace{ backtrace_ };
for (const void *ip : trace.subspan(skipLevels)) {
if (ip)
msg << dwfl.stackEntry(ip) << std::endl;
else
msg << "???" << std::endl;
}
return msg.str();
}
#endif
#if HAVE_BACKTRACE
Span<void *const> trace{ backtrace_ };
trace = trace.subspan(skipLevels);
char **strings = backtrace_symbols(trace.data(), trace.size());
if (strings) {
std::ostringstream msg;
for (unsigned int i = 0; i < trace.size(); ++i)
msg << strings[i] << std::endl;
free(strings);
return msg.str();
}
#endif
return std::string();
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/thread.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Thread support
*/
#include <libcamera/base/thread.h>
#include <atomic>
#include <list>
#include <sys/syscall.h>
#include <sys/types.h>
#include <unistd.h>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/event_dispatcher_poll.h>
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/mutex.h>
#include <libcamera/base/object.h>
/**
* \page thread Thread Support
*
* libcamera supports multi-threaded applications through a threading model that
* sets precise rules to guarantee thread-safe usage of the API. Additionally,
* libcamera makes internal use of threads, and offers APIs that simplify
* interactions with application threads. Careful compliance with the threading
* model will ensure avoidance of race conditions.
*
* Every thread created by libcamera is associated with an instance of the
* Thread class. Those threads run an internal event loop by default to
* dispatch events to objects. Additionally, the main thread of the application
* (defined as the thread that calls CameraManager::start()) is also associated
* with a Thread instance, but has no event loop accessible to libcamera. Other
* application threads are not visible to libcamera.
*
* \section thread-objects Threads and Objects
*
* Instances of the Object class and all its derived classes are thread-aware
* and are bound to the thread they are created in. They are said to *live* in
* a thread, and they interact with the event loop of their thread for the
* purpose of message passing and signal delivery. Messages posted to the
* object with Object::postMessage() will be delivered from the event loop of
* the thread that the object lives in. Signals delivered to the object, unless
* explicitly connected with ConnectionTypeDirect, will also be delivered from
* the object thread's event loop.
*
* All Object instances created internally by libcamera are bound to internal
* threads. As objects interact with thread event loops for proper operation,
* creating an Object instance in a thread that has no internal event loop (such
* as the main application thread, or libcamera threads that have a custom main
* loop), prevents some features of the Object class from being used. See
* Thread::exec() for more details.
*
* \section thread-signals Threads and Signals
*
* When sent to a receiver that does not inherit from the Object class, signals
* are delivered synchronously in the thread of the sender. When the receiver
* inherits from the Object class, delivery is by default asynchronous if the
* sender and receiver live in different threads. In that case, the signal is
* posted to the receiver's message queue and will be delivered from the
* receiver's event loop, running in the receiver's thread. This mechanism can
* be overridden by selecting a different connection type when calling
* Signal::connect().
*
* \section thread-reentrancy Reentrancy and Thread-Safety
*
* Through the documentation, several terms are used to define how classes and
* their member functions can be used from multiple threads.
*
* - A **reentrant** function may be called simultaneously from multiple
* threads if and only if each invocation uses a different instance of the
* class. This is the default for all member functions not explictly marked
* otherwise.
*
* - \anchor thread-safe A **thread-safe** function may be called
* simultaneously from multiple threads on the same instance of a class. A
* thread-safe function is thus reentrant. Thread-safe functions may also be
* called simultaneously with any other reentrant function of the same class
* on the same instance.
*
* - \anchor thread-bound A **thread-bound** function may be called only from
* the thread that the class instances lives in (see section \ref
* thread-objects). For instances of classes that do not derive from the
* Object class, this is the thread in which the instance was created. A
* thread-bound function is not thread-safe, and may or may not be reentrant.
*
* Neither reentrancy nor thread-safety, in this context, mean that a function
* may be called simultaneously from the same thread, for instance from a
* callback invoked by the function. This may deadlock and isn't allowed unless
* separately documented.
*
* A class is defined as reentrant, thread-safe or thread-bound if all its
* member functions are reentrant, thread-safe or thread-bound respectively.
* Some member functions may additionally be documented as having additional
* thread-related attributes.
*
* Most classes are reentrant but not thread-safe, as making them fully
* thread-safe would incur locking costs considered prohibitive for the
* expected use cases.
*/
/**
* \file base/thread.h
* \brief Thread support
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Thread)
class ThreadMain;
/**
* \brief A queue of posted messages
*/
class MessageQueue
{
public:
/**
* \brief List of queued Message instances
*/
std::list<std::unique_ptr<Message>> list_;
/**
* \brief Protects the \ref list_
*/
Mutex mutex_;
/**
* \brief The recursion level for recursive Thread::dispatchMessages()
* calls
*/
unsigned int recursion_ = 0;
};
/**
* \brief Thread-local internal data
*/
class ThreadData
{
public:
ThreadData()
: thread_(nullptr), running_(false), dispatcher_(nullptr)
{
}
static ThreadData *current();
private:
friend class Thread;
friend class ThreadMain;
Thread *thread_;
bool running_ LIBCAMERA_TSA_GUARDED_BY(mutex_);
pid_t tid_;
Mutex mutex_;
std::atomic<EventDispatcher *> dispatcher_;
ConditionVariable cv_;
std::atomic<bool> exit_;
int exitCode_;
MessageQueue messages_;
};
/**
* \brief Thread wrapper for the main thread
*/
class ThreadMain : public Thread
{
public:
ThreadMain()
{
data_->running_ = true;
}
protected:
void run() override
{
LOG(Thread, Fatal) << "The main thread can't be restarted";
}
};
static thread_local ThreadData *currentThreadData = nullptr;
static ThreadMain mainThread;
/**
* \brief Retrieve thread-local internal data for the current thread
* \return The thread-local internal data for the current thread
*/
ThreadData *ThreadData::current()
{
if (currentThreadData)
return currentThreadData;
/*
* The main thread doesn't receive thread-local data when it is
* started, set it here.
*/
ThreadData *data = mainThread.data_;
data->tid_ = syscall(SYS_gettid);
currentThreadData = data;
return data;
}
/**
* \class Thread
* \brief A thread of execution
*
* The Thread class is a wrapper around std::thread that handles integration
* with the Object, Signal and EventDispatcher classes.
*
* Thread instances by default run an event loop until the exit() function is
* called. The event loop dispatches events (messages, notifiers and timers)
* sent to the objects living in the thread. This behaviour can be modified by
* overriding the run() function.
*
* \section thread-stop Stopping Threads
*
* Threads can't be forcibly stopped. Instead, a thread user first requests the
* thread to exit and then waits for the thread's main function to react to the
* request and return, at which points the thread will stop.
*
* For threads running exec(), the exit() function is used to request the thread
* to exit. For threads subclassing the Thread class and implementing a custom
* run() function, a subclass-specific mechanism shall be provided. In either
* case, the wait() function shall be called to wait for the thread to stop.
*
* Due to their asynchronous nature, threads are subject to race conditions when
* they stop. This is of particular importance for messages posted to the thread
* with postMessage() (and the other mechanisms that rely on it, such as
* Object::invokeMethod() or asynchronous signal delivery). To understand the
* issues, three contexts need to be considered:
*
* - The worker is the Thread performing work and being instructed to stop.
* - The controller is the context which instructs the worker thread to stop.
* - The other contexts are any threads other than the worker and controller
* that interact with the worker thread.
*
* Messages posted to the worker thread from the controller context before
* calling exit() are queued to the thread's message queue, and the Thread class
* offers no guarantee that those messages will be processed before the thread
* stops. This allows threads to stop fast.
*
* A thread that requires delivery of messages posted from the controller
* context before exit() should reimplement the run() function and call
* dispatchMessages() after exec().
*
* Messages posted to the worker thread from the other contexts are asynchronous
* with respect to the exit() call from the controller context. There is no
* guarantee as to whether those messages will be processed or not before the
* thread stops.
*
* Messages that are not processed will stay in the queue, in the exact same way
* as messages posted after the thread has stopped. They will be processed when
* the thread is restarted. If the thread is never restarted, they will be
* deleted without being processed when the Thread instance is destroyed.
*/
/**
* \brief Create a thread
*/
Thread::Thread()
{
data_ = new ThreadData;
data_->thread_ = this;
}
Thread::~Thread()
{
delete data_->dispatcher_.load(std::memory_order_relaxed);
delete data_;
}
/**
* \brief Start the thread
*/
void Thread::start()
{
MutexLocker locker(data_->mutex_);
if (data_->running_)
return;
data_->running_ = true;
data_->exitCode_ = -1;
data_->exit_.store(false, std::memory_order_relaxed);
thread_ = std::thread(&Thread::startThread, this);
}
void Thread::startThread()
{
struct ThreadCleaner {
ThreadCleaner(Thread *thread, void (Thread::*cleaner)())
: thread_(thread), cleaner_(cleaner)
{
}
~ThreadCleaner()
{
(thread_->*cleaner_)();
}
Thread *thread_;
void (Thread::*cleaner_)();
};
/*
* Make sure the thread is cleaned up even if the run() function exits
* abnormally (for instance via a direct call to pthread_cancel()).
*/
thread_local ThreadCleaner cleaner(this, &Thread::finishThread);
data_->tid_ = syscall(SYS_gettid);
currentThreadData = data_;
run();
}
/**
* \brief Enter the event loop
*
* This function enters an event loop based on the event dispatcher instance for
* the thread, and blocks until the exit() function is called. It is meant to be
* called within the thread from the run() function and shall not be called
* outside of the thread.
*
* \return The exit code passed to the exit() function
*/
int Thread::exec()
{
MutexLocker locker(data_->mutex_);
EventDispatcher *dispatcher = eventDispatcher();
locker.unlock();
while (!data_->exit_.load(std::memory_order_acquire))
dispatcher->processEvents();
locker.lock();
return data_->exitCode_;
}
/**
* \brief Main function of the thread
*
* When the thread is started with start(), it calls this function in the
* context of the new thread. The run() function can be overridden to perform
* custom work, either custom initialization and cleanup before and after
* calling the Thread::exec() function, or a custom thread loop altogether. When
* this function returns the thread execution is stopped, and the \ref finished
* signal is emitted.
*
* Note that if this function is overridden and doesn't call Thread::exec(), no
* events will be dispatched to the objects living in the thread. These objects
* will not be able to use the EventNotifier, Timer or Message facilities. This
* includes functions that rely on message dispatching, such as
* Object::deleteLater().
*
* The base implementation just calls exec().
*/
void Thread::run()
{
exec();
}
void Thread::finishThread()
{
/*
* Objects may have been scheduled for deletion right before the thread
* exited. Ensure they get deleted now, before the thread stops.
*/
dispatchMessages(Message::Type::DeferredDelete);
data_->mutex_.lock();
data_->running_ = false;
data_->mutex_.unlock();
finished.emit();
data_->cv_.notify_all();
}
/**
* \brief Stop the thread's event loop
* \param[in] code The exit code
*
* This function interrupts the event loop started by the exec() function,
* causing exec() to return \a code.
*
* Calling exit() on a thread that reimplements the run() function and doesn't
* call exec() will likely have no effect.
*
* \context This function is \threadsafe.
*/
void Thread::exit(int code)
{
data_->exitCode_ = code;
data_->exit_.store(true, std::memory_order_release);
EventDispatcher *dispatcher = data_->dispatcher_.load(std::memory_order_relaxed);
if (!dispatcher)
return;
dispatcher->interrupt();
}
/**
* \brief Wait for the thread to finish
* \param[in] duration Maximum wait duration
*
* This function waits until the thread finishes or the \a duration has
* elapsed, whichever happens first. If \a duration is equal to
* utils::duration::max(), the wait never times out. If the thread is not
* running the function returns immediately.
*
* \context This function is \threadsafe.
*
* \return True if the thread has finished, or false if the wait timed out
*/
bool Thread::wait(utils::duration duration)
{
bool hasFinished = true;
{
MutexLocker locker(data_->mutex_);
auto isRunning = ([&]() LIBCAMERA_TSA_REQUIRES(data_->mutex_) {
return !data_->running_;
});
if (duration == utils::duration::max())
data_->cv_.wait(locker, isRunning);
else
hasFinished = data_->cv_.wait_for(locker, duration,
isRunning);
}
if (thread_.joinable())
thread_.join();
return hasFinished;
}
/**
* \brief Check if the thread is running
*
* A Thread instance is considered as running once the underlying thread has
* started. This function guarantees that it returns true after the start()
* function returns, and false after the wait() function returns.
*
* \context This function is \threadsafe.
*
* \return True if the thread is running, false otherwise
*/
bool Thread::isRunning()
{
MutexLocker locker(data_->mutex_);
return data_->running_;
}
/**
* \var Thread::finished
* \brief Signal the end of thread execution
*/
/**
* \brief Retrieve the Thread instance for the current thread
* \context This function is \threadsafe.
* \return The Thread instance for the current thread
*/
Thread *Thread::current()
{
ThreadData *data = ThreadData::current();
return data->thread_;
}
/**
* \brief Retrieve the ID of the current thread
*
* The thread ID corresponds to the Linux thread ID (TID) as returned by the
* gettid system call.
*
* \context This function is \threadsafe.
*
* \return The ID of the current thread
*/
pid_t Thread::currentId()
{
ThreadData *data = ThreadData::current();
return data->tid_;
}
/**
* \brief Retrieve the event dispatcher
*
* This function retrieves the internal event dispatcher for the thread. The
* returned event dispatcher is valid until the thread is destroyed.
*
* \context This function is \threadsafe.
*
* \return Pointer to the event dispatcher
*/
EventDispatcher *Thread::eventDispatcher()
{
if (!data_->dispatcher_.load(std::memory_order_relaxed))
data_->dispatcher_.store(new EventDispatcherPoll(),
std::memory_order_release);
return data_->dispatcher_.load(std::memory_order_relaxed);
}
/**
* \brief Post a message to the thread for the \a receiver
* \param[in] msg The message
* \param[in] receiver The receiver
*
* This function stores the message \a msg in the message queue of the thread
* for the \a receiver and wake up the thread's event loop. Message ownership is
* passed to the thread, and the message will be deleted after being delivered.
*
* Messages are delivered through the thread's event loop. If the thread is not
* running its event loop the message will not be delivered until the event
* loop gets started.
*
* When the thread is stopped, posted messages may not have all been processed.
* See \ref thread-stop for additional information.
*
* If the \a receiver is not bound to this thread the behaviour is undefined.
*
* \context This function is \threadsafe.
*
* \sa exec()
*/
void Thread::postMessage(std::unique_ptr<Message> msg, Object *receiver)
{
msg->receiver_ = receiver;
ASSERT(data_ == receiver->thread()->data_);
MutexLocker locker(data_->messages_.mutex_);
data_->messages_.list_.push_back(std::move(msg));
receiver->pendingMessages_++;
locker.unlock();
EventDispatcher *dispatcher =
data_->dispatcher_.load(std::memory_order_acquire);
if (dispatcher)
dispatcher->interrupt();
}
/**
* \brief Remove all posted messages for the \a receiver
* \param[in] receiver The receiver
*
* If the \a receiver is not bound to this thread the behaviour is undefined.
*/
void Thread::removeMessages(Object *receiver)
{
ASSERT(data_ == receiver->thread()->data_);
MutexLocker locker(data_->messages_.mutex_);
if (!receiver->pendingMessages_)
return;
std::vector<std::unique_ptr<Message>> toDelete;
for (std::unique_ptr<Message> &msg : data_->messages_.list_) {
if (!msg)
continue;
if (msg->receiver_ != receiver)
continue;
/*
* Move the message to the pending deletion list to delete it
* after releasing the lock. The messages list element will
* contain a null pointer, and will be removed when dispatching
* messages.
*/
toDelete.push_back(std::move(msg));
receiver->pendingMessages_--;
}
ASSERT(!receiver->pendingMessages_);
locker.unlock();
toDelete.clear();
}
/**
* \brief Dispatch posted messages for this thread
* \param[in] type The message type
*
* This function immediately dispatches all the messages previously posted for
* this thread with postMessage() that match the message \a type. If the \a type
* is Message::Type::None, all messages are dispatched.
*
* Messages shall only be dispatched from the current thread, typically within
* the thread from the run() function. Calling this function outside of the
* thread results in undefined behaviour.
*
* This function is not thread-safe, but it may be called recursively in the
* same thread from an object's message handler. It guarantees delivery of
* messages in the order they have been posted in all cases.
*/
void Thread::dispatchMessages(Message::Type type)
{
ASSERT(data_ == ThreadData::current());
++data_->messages_.recursion_;
MutexLocker locker(data_->messages_.mutex_);
std::list<std::unique_ptr<Message>> &messages = data_->messages_.list_;
for (std::unique_ptr<Message> &msg : messages) {
if (!msg)
continue;
if (type != Message::Type::None && msg->type() != type)
continue;
/*
* Move the message, setting the entry in the list to null. It
* will cause recursive calls to ignore the entry, and the erase
* loop at the end of the function to delete it from the list.
*/
std::unique_ptr<Message> message = std::move(msg);
Object *receiver = message->receiver_;
ASSERT(data_ == receiver->thread()->data_);
receiver->pendingMessages_--;
locker.unlock();
receiver->message(message.get());
message.reset();
locker.lock();
}
/*
* If the recursion level is 0, erase all null messages in the list. We
* can't do so during recursion, as it would invalidate the iterator of
* the outer calls.
*/
if (!--data_->messages_.recursion_) {
for (auto iter = messages.begin(); iter != messages.end(); ) {
if (!*iter)
iter = messages.erase(iter);
else
++iter;
}
}
}
/**
* \brief Move an \a object and all its children to the thread
* \param[in] object The object
*/
void Thread::moveObject(Object *object)
{
ThreadData *currentData = object->thread_->data_;
ThreadData *targetData = data_;
MutexLocker lockerFrom(currentData->messages_.mutex_, std::defer_lock);
MutexLocker lockerTo(targetData->messages_.mutex_, std::defer_lock);
std::lock(lockerFrom, lockerTo);
moveObject(object, currentData, targetData);
}
void Thread::moveObject(Object *object, ThreadData *currentData,
ThreadData *targetData)
{
/* Move pending messages to the message queue of the new thread. */
if (object->pendingMessages_) {
unsigned int movedMessages = 0;
for (std::unique_ptr<Message> &msg : currentData->messages_.list_) {
if (!msg)
continue;
if (msg->receiver_ != object)
continue;
targetData->messages_.list_.push_back(std::move(msg));
movedMessages++;
}
if (movedMessages) {
EventDispatcher *dispatcher =
targetData->dispatcher_.load(std::memory_order_acquire);
if (dispatcher)
dispatcher->interrupt();
}
}
object->thread_ = this;
/* Move all children. */
for (auto child : object->children_)
moveObject(child, currentData, targetData);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/event_dispatcher.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Event dispatcher
*/
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/log.h>
/**
* \file base/event_dispatcher.h
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Event)
/**
* \class EventDispatcher
* \brief Interface to manage the libcamera events and timers
*
* The EventDispatcher class allows the integration of the application event
* loop with libcamera by abstracting how events and timers are managed and
* processed.
*
* To listen to events, libcamera creates EventNotifier instances and registers
* them with the dispatcher with registerEventNotifier(). The event notifier
* \ref EventNotifier::activated signal is then emitted by the dispatcher
* whenever the event is detected.
*
* To set timers, libcamera creates Timer instances and registers them with the
* dispatcher with registerTimer(). The timer \ref Timer::timeout signal is then
* emitted by the dispatcher when the timer times out.
*/
EventDispatcher::~EventDispatcher()
{
}
/**
* \fn EventDispatcher::registerEventNotifier()
* \brief Register an event notifier
* \param[in] notifier The event notifier to register
*
* Once the \a notifier is registered with the dispatcher, the dispatcher will
* emit the notifier \ref EventNotifier::activated signal whenever a
* corresponding event is detected on the notifier's file descriptor. The event
* is monitored until the notifier is unregistered with
* unregisterEventNotifier().
*
* Registering multiple notifiers for the same file descriptor and event type is
* not allowed and results in undefined behaviour.
*/
/**
* \fn EventDispatcher::unregisterEventNotifier()
* \brief Unregister an event notifier
* \param[in] notifier The event notifier to unregister
*
* After this function returns the \a notifier is guaranteed not to emit the
* \ref EventNotifier::activated signal.
*
* If the notifier isn't registered, this function performs no operation.
*/
/**
* \fn EventDispatcher::registerTimer()
* \brief Register a timer
* \param[in] timer The timer to register
*
* Once the \a timer is registered with the dispatcher, the dispatcher will emit
* the timer \ref Timer::timeout signal when the timer times out. The timer can
* be unregistered with unregisterTimer() before it times out, in which case the
* signal will not be emitted.
*
* When the \a timer times out, it is automatically unregistered by the
* dispatcher and can be registered back as early as from the \ref Timer::timeout
* signal handlers.
*
* Registering the same timer multiple times is not allowed and results in
* undefined behaviour.
*/
/**
* \fn EventDispatcher::unregisterTimer()
* \brief Unregister a timer
* \param[in] timer The timer to unregister
*
* After this function returns the \a timer is guaranteed not to emit the
* \ref Timer::timeout signal.
*
* If the timer isn't registered, this function performs no operation.
*/
/**
* \fn EventDispatcher::processEvents()
* \brief Wait for and process pending events
*
* This function processes all pending events associated with registered event
* notifiers and timers and signals the corresponding EventNotifier and Timer
* objects. If no events are pending, it waits for the first event and processes
* it before returning.
*/
/**
* \fn EventDispatcher::interrupt()
* \brief Interrupt any running processEvents() call as soon as possible
*
* Calling this function interrupts any blocking processEvents() call in
* progress. The processEvents() function will return as soon as possible,
* after processing pending timers and events. If processEvents() isn't in
* progress, it will be interrupted immediately the next time it gets called.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/event_dispatcher_poll.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Poll-based event dispatcher
*/
#include <libcamera/base/event_dispatcher_poll.h>
#include <algorithm>
#include <chrono>
#include <iomanip>
#include <poll.h>
#include <stdint.h>
#include <string.h>
#include <sys/eventfd.h>
#include <unistd.h>
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/log.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/timer.h>
#include <libcamera/base/utils.h>
/**
* \file base/event_dispatcher_poll.h
*/
namespace libcamera {
LOG_DECLARE_CATEGORY(Event)
static const char *notifierType(EventNotifier::Type type)
{
if (type == EventNotifier::Read)
return "read";
if (type == EventNotifier::Write)
return "write";
if (type == EventNotifier::Exception)
return "exception";
return "";
}
/**
* \class EventDispatcherPoll
* \brief A poll-based event dispatcher
*/
EventDispatcherPoll::EventDispatcherPoll()
: processingEvents_(false)
{
/*
* Create the event fd. Failures are fatal as we can't implement an
* interruptible dispatcher without the fd.
*/
eventfd_ = UniqueFD(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK));
if (!eventfd_.isValid())
LOG(Event, Fatal) << "Unable to create eventfd";
}
EventDispatcherPoll::~EventDispatcherPoll()
{
}
void EventDispatcherPoll::registerEventNotifier(EventNotifier *notifier)
{
EventNotifierSetPoll &set = notifiers_[notifier->fd()];
EventNotifier::Type type = notifier->type();
if (set.notifiers[type] && set.notifiers[type] != notifier) {
LOG(Event, Warning)
<< "Ignoring duplicate " << notifierType(type)
<< " notifier for fd " << notifier->fd();
return;
}
set.notifiers[type] = notifier;
}
void EventDispatcherPoll::unregisterEventNotifier(EventNotifier *notifier)
{
auto iter = notifiers_.find(notifier->fd());
if (iter == notifiers_.end())
return;
EventNotifierSetPoll &set = iter->second;
EventNotifier::Type type = notifier->type();
if (!set.notifiers[type])
return;
if (set.notifiers[type] != notifier) {
LOG(Event, Warning)
<< notifierType(type) << " notifier for fd "
<< notifier->fd() << " is not registered";
return;
}
set.notifiers[type] = nullptr;
/*
* Don't race with event processing if this function is called from an
* event notifier. The notifiers_ entry will be erased by
* processEvents().
*/
if (processingEvents_)
return;
if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
notifiers_.erase(iter);
}
void EventDispatcherPoll::registerTimer(Timer *timer)
{
for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
if ((*iter)->deadline() > timer->deadline()) {
timers_.insert(iter, timer);
return;
}
}
timers_.push_back(timer);
}
void EventDispatcherPoll::unregisterTimer(Timer *timer)
{
for (auto iter = timers_.begin(); iter != timers_.end(); ++iter) {
if (*iter == timer) {
timers_.erase(iter);
return;
}
/*
* As the timers list is ordered, we can stop as soon as we go
* past the deadline.
*/
if ((*iter)->deadline() > timer->deadline())
break;
}
}
void EventDispatcherPoll::processEvents()
{
int ret;
Thread::current()->dispatchMessages();
/* Create the pollfd array. */
std::vector<struct pollfd> pollfds;
pollfds.reserve(notifiers_.size() + 1);
for (auto notifier : notifiers_)
pollfds.push_back({ notifier.first, notifier.second.events(), 0 });
pollfds.push_back({ eventfd_.get(), POLLIN, 0 });
/* Wait for events and process notifiers and timers. */
do {
ret = poll(&pollfds);
} while (ret == -1 && errno == EINTR);
if (ret < 0) {
ret = -errno;
LOG(Event, Warning) << "poll() failed with " << strerror(-ret);
} else if (ret > 0) {
processInterrupt(pollfds.back());
pollfds.pop_back();
processNotifiers(pollfds);
}
processTimers();
}
void EventDispatcherPoll::interrupt()
{
uint64_t value = 1;
ssize_t ret = write(eventfd_.get(), &value, sizeof(value));
if (ret != sizeof(value)) {
if (ret < 0)
ret = -errno;
LOG(Event, Error)
<< "Failed to interrupt event dispatcher ("
<< ret << ")";
}
}
short EventDispatcherPoll::EventNotifierSetPoll::events() const
{
short events = 0;
if (notifiers[EventNotifier::Read])
events |= POLLIN;
if (notifiers[EventNotifier::Write])
events |= POLLOUT;
if (notifiers[EventNotifier::Exception])
events |= POLLPRI;
return events;
}
int EventDispatcherPoll::poll(std::vector<struct pollfd> *pollfds)
{
/* Compute the timeout. */
Timer *nextTimer = !timers_.empty() ? timers_.front() : nullptr;
struct timespec timeout;
if (nextTimer) {
utils::time_point now = utils::clock::now();
if (nextTimer->deadline() > now)
timeout = utils::duration_to_timespec(nextTimer->deadline() - now);
else
timeout = { 0, 0 };
LOG(Event, Debug)
<< "next timer " << nextTimer << " expires in "
<< timeout.tv_sec << "."
<< std::setfill('0') << std::setw(9)
<< timeout.tv_nsec;
}
return ppoll(pollfds->data(), pollfds->size(),
nextTimer ? &timeout : nullptr, nullptr);
}
void EventDispatcherPoll::processInterrupt(const struct pollfd &pfd)
{
if (!(pfd.revents & POLLIN))
return;
uint64_t value;
ssize_t ret = read(eventfd_.get(), &value, sizeof(value));
if (ret != sizeof(value)) {
if (ret < 0)
ret = -errno;
LOG(Event, Error)
<< "Failed to process interrupt (" << ret << ")";
}
}
void EventDispatcherPoll::processNotifiers(const std::vector<struct pollfd> &pollfds)
{
static const struct {
EventNotifier::Type type;
short events;
} events[] = {
{ EventNotifier::Read, POLLIN },
{ EventNotifier::Write, POLLOUT },
{ EventNotifier::Exception, POLLPRI },
};
processingEvents_ = true;
for (const pollfd &pfd : pollfds) {
auto iter = notifiers_.find(pfd.fd);
ASSERT(iter != notifiers_.end());
EventNotifierSetPoll &set = iter->second;
for (const auto &event : events) {
EventNotifier *notifier = set.notifiers[event.type];
if (!notifier)
continue;
/*
* If the file descriptor is invalid, disable the
* notifier immediately.
*/
if (pfd.revents & POLLNVAL) {
LOG(Event, Warning)
<< "Disabling " << notifierType(event.type)
<< " due to invalid file descriptor "
<< pfd.fd;
unregisterEventNotifier(notifier);
continue;
}
if (pfd.revents & event.events)
notifier->activated.emit();
}
/* Erase the notifiers_ entry if it is now empty. */
if (!set.notifiers[0] && !set.notifiers[1] && !set.notifiers[2])
notifiers_.erase(iter);
}
processingEvents_ = false;
}
void EventDispatcherPoll::processTimers()
{
utils::time_point now = utils::clock::now();
while (!timers_.empty()) {
Timer *timer = timers_.front();
if (timer->deadline() > now)
break;
timers_.pop_front();
timer->stop();
timer->timeout.emit();
}
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/event_notifier.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* File descriptor event notifier
*/
#include <libcamera/base/event_notifier.h>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/thread.h>
#include <libcamera/camera_manager.h>
/**
* \file event_notifier.h
* \brief File descriptor event notifier
*/
namespace libcamera {
LOG_DECLARE_CATEGORY(Event)
/**
* \class EventNotifier
* \brief Notify of activity on a file descriptor
*
* The EventNotifier models a file descriptor event source that can be
* monitored. It is created with the file descriptor to be monitored and the
* type of event, and is enabled by default. It will emit the \ref activated
* signal whenever an event of the monitored type occurs on the file descriptor.
*
* Supported type of events are EventNotifier::Read, EventNotifier::Write and
* EventNotifier::Exception. The type is specified when constructing the
* notifier, and can be retrieved using the type() function. To listen to
* multiple event types on the same file descriptor multiple notifiers must be
* created.
*
* The notifier can be disabled with the setEnabled() function. When the notifier
* is disabled it ignores events and does not emit the \ref activated signal.
* The notifier can then be re-enabled with the setEnabled() function.
*
* Creating multiple notifiers of the same type for the same file descriptor is
* not allowed and results in undefined behaviour.
*
* Notifier events are detected and dispatched from the
* EventDispatcher::processEvents() function.
*/
/**
* \enum EventNotifier::Type
* Type of file descriptor event to listen for.
* \var EventNotifier::Read
* Data is available to be read from the file descriptor
* \var EventNotifier::Write
* Data can be written to the file descriptor
* \var EventNotifier::Exception
* An exception has occurred on the file descriptor
*/
/**
* \brief Construct an event notifier with a file descriptor and event type
* \param[in] fd The file descriptor to monitor
* \param[in] type The event type to monitor
* \param[in] parent The parent Object
*/
EventNotifier::EventNotifier(int fd, Type type, Object *parent)
: Object(parent), fd_(fd), type_(type), enabled_(false)
{
setEnabled(true);
}
EventNotifier::~EventNotifier()
{
setEnabled(false);
}
/**
* \fn EventNotifier::type()
* \brief Retrieve the type of the event being monitored
* \return The type of the event
*/
/**
* \fn EventNotifier::fd()
* \brief Retrieve the file descriptor being monitored
* \return The file descriptor
*/
/**
* \fn EventNotifier::enabled()
* \brief Retrieve the notifier state
* \return True if the notifier is enabled, or false otherwise
* \sa setEnabled()
*/
/**
* \brief Enable or disable the notifier
* \param[in] enable True to enable the notifier, false to disable it
*
* This function enables or disables the notifier. A disabled notifier ignores
* events and does not emit the \ref activated signal.
*
* \context This function is \threadbound.
*/
void EventNotifier::setEnabled(bool enable)
{
if (!assertThreadBound("EventNotifier can't be enabled from another thread"))
return;
if (enabled_ == enable)
return;
enabled_ = enable;
EventDispatcher *dispatcher = thread()->eventDispatcher();
if (enable)
dispatcher->registerEventNotifier(this);
else
dispatcher->unregisterEventNotifier(this);
}
/**
* \var EventNotifier::activated
* \brief Signal emitted when the event occurs
*
* This signal is emitted when the event \ref type() occurs on the file
* descriptor monitored by the notifier. The notifier pointer is passed as a
* parameter.
*/
void EventNotifier::message(Message *msg)
{
if (msg->type() == Message::ThreadMoveMessage) {
if (enabled_) {
setEnabled(false);
invokeMethod(&EventNotifier::setEnabled,
ConnectionTypeQueued, true);
}
}
Object::message(msg);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/timer.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Generic timer
*/
#include <libcamera/base/timer.h>
#include <chrono>
#include <libcamera/base/event_dispatcher.h>
#include <libcamera/base/log.h>
#include <libcamera/base/message.h>
#include <libcamera/base/thread.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera_manager.h>
/**
* \file base/timer.h
* \brief Generic timer
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Timer)
/**
* \class Timer
* \brief Single-shot timer interface
*
* The Timer class models a single-shot timer that is started with start() and
* emits the \ref timeout signal when it times out.
*
* Once started the timer will run until it times out. It can be stopped with
* stop(), and once it times out or is stopped, can be started again with
* start().
*
* The timer deadline is specified as either a duration in milliseconds or an
* absolute time point. If the deadline is set to the current time or to the
* past, the timer will time out immediately when execution returns to the
* event loop of the timer's thread.
*
* Timers run in the thread they belong to, and thus emit the \a ref timeout
* signal from that thread. To avoid race conditions they must not be started
* or stopped from a different thread, attempts to do so will be rejected and
* logged, and may cause undefined behaviour.
*/
/**
* \brief Construct a timer
* \param[in] parent The parent Object
*/
Timer::Timer(Object *parent)
: Object(parent), running_(false)
{
}
Timer::~Timer()
{
stop();
}
/**
* \brief Start or restart the timer with a timeout of \a duration
* \param[in] duration The timer duration in milliseconds
*
* If the timer is already running it will be stopped and restarted.
*
* \context This function is \threadbound.
*/
void Timer::start(std::chrono::milliseconds duration)
{
start(utils::clock::now() + duration);
}
/**
* \brief Start or restart the timer with a \a deadline
* \param[in] deadline The timer deadline
*
* If the timer is already running it will be stopped and restarted.
*
* \context This function is \threadbound.
*/
void Timer::start(std::chrono::steady_clock::time_point deadline)
{
if (!assertThreadBound("Timer can't be started from another thread"))
return;
deadline_ = deadline;
LOG(Timer, Debug)
<< "Starting timer " << this << ": deadline "
<< utils::time_point_to_string(deadline_);
if (isRunning())
unregisterTimer();
registerTimer();
}
/**
* \brief Stop the timer
*
* After this function returns the timer is guaranteed not to emit the
* \ref timeout signal.
*
* If the timer is not running this function performs no operation.
*
* \context This function is \threadbound.
*/
void Timer::stop()
{
if (!assertThreadBound("Timer can't be stopped from another thread"))
return;
if (!isRunning())
return;
unregisterTimer();
}
void Timer::registerTimer()
{
thread()->eventDispatcher()->registerTimer(this);
running_ = true;
}
void Timer::unregisterTimer()
{
running_ = false;
thread()->eventDispatcher()->unregisterTimer(this);
}
/**
* \brief Check if the timer is running
* \return True if the timer is running, false otherwise
*/
bool Timer::isRunning() const
{
return running_;
}
/**
* \fn Timer::deadline()
* \brief Retrieve the timer deadline
* \return The timer deadline
*/
/**
* \var Timer::timeout
* \brief Signal emitted when the timer times out
*
* The timer pointer is passed as a parameter.
*/
void Timer::message(Message *msg)
{
if (msg->type() == Message::ThreadMoveMessage) {
if (isRunning()) {
unregisterTimer();
invokeMethod(&Timer::registerTimer,
ConnectionTypeQueued);
}
}
Object::message(msg);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/message.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Message queue support
*/
#include <libcamera/base/message.h>
#include <libcamera/base/log.h>
#include <libcamera/base/signal.h>
/**
* \file base/message.h
* \brief Message queue support
*
* The messaging API enables inter-thread communication through message
* posting. Messages can be sent from any thread to any recipient deriving from
* the Object class.
*
* To post a message, the sender allocates it dynamically as instance of a class
* derived from Message. It then posts the message to an Object recipient
* through Object::postMessage(). Message ownership is passed to the object,
* thus the message shall not store any temporary data.
*
* The message is delivered in the context of the object's thread, through the
* Object::message() virtual function. After delivery the message is
* automatically deleted.
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(Message)
std::atomic_uint Message::nextUserType_{ Message::UserMessage };
/**
* \class Message
* \brief A message that can be posted to a Thread
*/
/**
* \enum Message::Type
* \brief The message type
* \var Message::None
* \brief Invalid message type
* \var Message::InvokeMessage
* \brief Asynchronous method invocation across threads
* \var Message::ThreadMoveMessage
* \brief Object is being moved to a different thread
* \var Message::DeferredDelete
* \brief Object is scheduled for deletion
* \var Message::UserMessage
* \brief First value available for user-defined messages
*/
/**
* \brief Construct a message object of type \a type
* \param[in] type The message type
*/
Message::Message(Message::Type type)
: type_(type)
{
}
Message::~Message()
{
}
/**
* \fn Message::type()
* \brief Retrieve the message type
* \return The message type
*/
/**
* \fn Message::receiver()
* \brief Retrieve the message receiver
* \return The message receiver
*/
/**
* \brief Reserve and register a custom user-defined message type
*
* Custom message types use values starting at Message::UserMessage. Assigning
* custom types manually may lead to accidental duplicated types. To avoid this
* problem, this function reserves and returns the next available user-defined
* message type.
*
* The recommended way to use this function is to subclass Message and provide a
* static accessor for the custom message type.
*
* \code{.cpp}
* class MyCustomMessage : public Message
* {
* public:
* MyCustomMessage() : Message(type()) {}
*
* static Message::Type type()
* {
* static MessageType type = registerMessageType();
* return type;
* }
* };
* \endcode
*
* \return A new unique message type
*/
Message::Type Message::registerMessageType()
{
return static_cast<Message::Type>(nextUserType_++);
}
/**
* \class InvokeMessage
* \brief A message carrying a method invocation across threads
*/
/**
* \brief Construct an InvokeMessage for method invocation on an Object
* \param[in] method The bound method
* \param[in] pack The packed method arguments
* \param[in] semaphore The semaphore used to signal message delivery
* \param[in] deleteMethod True to delete the \a method when the message is
* destroyed
*/
InvokeMessage::InvokeMessage(BoundMethodBase *method,
std::shared_ptr<BoundMethodPackBase> pack,
Semaphore *semaphore, bool deleteMethod)
: Message(Message::InvokeMessage), method_(method), pack_(pack),
semaphore_(semaphore), deleteMethod_(deleteMethod)
{
}
InvokeMessage::~InvokeMessage()
{
if (deleteMethod_)
delete method_;
}
/**
* \fn InvokeMessage::semaphore()
* \brief Retrieve the message semaphore passed to the constructor
* \return The message semaphore
*/
/**
* \brief Invoke the method bound to InvokeMessage::method_ with arguments
* InvokeMessage::pack_
*/
void InvokeMessage::invoke()
{
method_->invokePack(pack_.get());
}
/**
* \var InvokeMessage::method_
* \brief The method to be invoked
*/
/**
* \var InvokeMessage::pack_
* \brief The packed method invocation arguments
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera | repos/libcamera/src/libcamera/base/flags.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Type-safe enum-based bitfields
*/
#include <libcamera/base/flags.h>
/**
* \file base/flags.h
* \brief Enum-based bit fields
*/
namespace libcamera {
/**
* \class Flags
* \brief Type-safe container for enum-based bitfields
*
* The Flags template class provides type-safe bitwise operators on enum values.
* It allows using enum types for bitfields, while preventing unsafe casts from
* integer types and mixing of flags from different enum types.
*
* To use the Flags class, declare an enum containing the desired bit flags, and
* use the Flags<enum> class to store bitfields based on the enum. If bitwise
* operators on the underlying enum are also desired, they can be enabled with
* the LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum) macro.
*/
/**
* \typedef Flags::Type
* \brief The underlying data type of the enum
*/
/**
* \fn Flags::Flags()
* \brief Construct a Flags instance with a zero value
*/
/**
* \fn Flags::Flags(E flag)
* \brief Construct a Flags instance storing the \a flag
* \param[in] flag The initial value
*/
/**
* \fn Flags &Flags::operator&=(E flag)
* \brief Store the bitwise AND of this Flags and the \a flag in this Flags
* \param[in] flag The second operand
* \return A reference to this Flags
*/
/**
* \fn Flags &Flags::operator&=(Flags other)
* \brief Store the bitwise AND of this Flags and the \a other Flags in this Flags
* \param[in] other The second operand
* \return A reference to this Flags
*/
/**
* \fn Flags &Flags::operator|=(E flag)
* \brief Store the bitwise OR of this Flags and the \a flag in this Flags
* \param[in] flag The second operand
* \return A reference to this Flags
*/
/**
* \fn Flags &Flags::operator|=(Flags other)
* \brief Store the bitwise OR of this Flags and the \a other Flags in this Flags
* \param[in] other The second operand
* \return A reference to this Flags
*/
/**
* \fn Flags &Flags::operator^=(E flag)
* \brief Store the bitwise XOR of this Flags and the \a flag in this Flags
* \param[in] flag The second operand
* \return A reference to this Flags
*/
/**
* \fn Flags &Flags::operator^=(Flags other)
* \brief Store the bitwise XOR of this Flags and the \a other Flags in this Flags
* \param[in] other The second operand
* \return A reference to this Flags
*/
/**
* \fn bool Flags::operator==(E flag)
* \brief Compare flags for equality
* \param[in] flag The second operand
* \return True if the Flags and \a flag are equal, false otherwise
*/
/**
* \fn bool Flags::operator==(Flags other)
* \brief Compare flags for equality
* \param[in] other The second operand
* \return True if the Flags and \a other are equal, false otherwise
*/
/**
* \fn bool Flags::operator!=(E flag)
* \brief Compare flags for non-equality
* \param[in] flag The second operand
* \return True if the Flags and \a flag are not equal, false otherwise
*/
/**
* \fn bool Flags::operator!=(Flags other)
* \brief Compare flags for non-equality
* \param[in] other The second operand
* \return True if the Flags and \a other are not equal, false otherwise
*/
/**
* \fn Flags::operator Type() const
* \brief Convert the Flags to the underlying integer type
* \return The Flags value as an integer
*/
/**
* \fn Flags::operator bool() const
* \brief Convert the Flags to a boolean
* \return True if at least one flag is set, false otherwise
*/
/**
* \fn Flags Flags::operator&(E flag) const
* \brief Compute the bitwise AND of this Flags and the \a flag
* \param[in] flag The second operand
* \return A Flags containing the result of the AND operation
*/
/**
* \fn Flags Flags::operator&(Flags other) const
* \brief Compute the bitwise AND of this Flags and the \a other Flags
* \param[in] other The second operand
* \return A Flags containing the result of the AND operation
*/
/**
* \fn Flags Flags::operator|(E flag) const
* \brief Compute the bitwise OR of this Flags and the \a flag
* \param[in] flag The second operand
* \return A Flags containing the result of the OR operation
*/
/**
* \fn Flags Flags::operator|(Flags other) const
* \brief Compute the bitwise OR of this Flags and the \a other Flags
* \param[in] other The second operand
* \return A Flags containing the result of the OR operation
*/
/**
* \fn Flags Flags::operator^(E flag) const
* \brief Compute the bitwise XOR of this Flags and the \a flag
* \param[in] flag The second operand
* \return A Flags containing the result of the XOR operation
*/
/**
* \fn Flags Flags::operator^(Flags other) const
* \brief Compute the bitwise XOR of this Flags and the \a other Flags
* \param[in] other The second operand
* \return A Flags containing the result of the XOR operation
*/
/**
* \fn Flags Flags::operator~() const
* \brief Compute the bitwise NOT of this Flags
* \return A Flags containing the result of the NOT operation
*/
/**
* \fn bool Flags::operator!() const
* \brief Check if flags are set
* \return True if no flags is set, false otherwise
*/
/**
* \def LIBCAMERA_FLAGS_ENABLE_OPERATORS(enum)
* \brief Enable bitwise operations on the \a enum enumeration
*
* This macro enables the bitwise AND, OR, XOR and NOT operators on the given
* \a enum. This allows the enum values to be safely used in bitwise operations
* with the Flags<> class.
*/
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/frames.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Intel IPU3 Frames helper
*/
#include "frames.h"
#include <libcamera/framebuffer.h>
#include <libcamera/request.h>
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(IPU3)
IPU3Frames::IPU3Frames()
{
}
void IPU3Frames::init(const std::vector<std::unique_ptr<FrameBuffer>> ¶mBuffers,
const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers)
{
for (const std::unique_ptr<FrameBuffer> &buffer : paramBuffers)
availableParamBuffers_.push(buffer.get());
for (const std::unique_ptr<FrameBuffer> &buffer : statBuffers)
availableStatBuffers_.push(buffer.get());
frameInfo_.clear();
}
void IPU3Frames::clear()
{
availableParamBuffers_ = {};
availableStatBuffers_ = {};
}
IPU3Frames::Info *IPU3Frames::create(Request *request)
{
unsigned int id = request->sequence();
if (availableParamBuffers_.empty()) {
LOG(IPU3, Debug) << "Parameters buffer underrun";
return nullptr;
}
if (availableStatBuffers_.empty()) {
LOG(IPU3, Debug) << "Statistics buffer underrun";
return nullptr;
}
FrameBuffer *paramBuffer = availableParamBuffers_.front();
FrameBuffer *statBuffer = availableStatBuffers_.front();
paramBuffer->_d()->setRequest(request);
statBuffer->_d()->setRequest(request);
availableParamBuffers_.pop();
availableStatBuffers_.pop();
/* \todo Remove the dynamic allocation of Info */
std::unique_ptr<Info> info = std::make_unique<Info>();
info->id = id;
info->request = request;
info->rawBuffer = nullptr;
info->paramBuffer = paramBuffer;
info->statBuffer = statBuffer;
info->paramDequeued = false;
info->metadataProcessed = false;
frameInfo_[id] = std::move(info);
return frameInfo_[id].get();
}
void IPU3Frames::remove(IPU3Frames::Info *info)
{
/* Return params and stat buffer for reuse. */
availableParamBuffers_.push(info->paramBuffer);
availableStatBuffers_.push(info->statBuffer);
/* Delete the extended frame information. */
frameInfo_.erase(info->id);
}
bool IPU3Frames::tryComplete(IPU3Frames::Info *info)
{
Request *request = info->request;
if (request->hasPendingBuffers())
return false;
if (!info->metadataProcessed)
return false;
if (!info->paramDequeued)
return false;
remove(info);
bufferAvailable.emit();
return true;
}
IPU3Frames::Info *IPU3Frames::find(unsigned int id)
{
const auto &itInfo = frameInfo_.find(id);
if (itInfo != frameInfo_.end())
return itInfo->second.get();
LOG(IPU3, Fatal) << "Can't find tracking information for frame " << id;
return nullptr;
}
IPU3Frames::Info *IPU3Frames::find(FrameBuffer *buffer)
{
for (auto const &itInfo : frameInfo_) {
Info *info = itInfo.second.get();
for (auto const itBuffers : info->request->buffers())
if (itBuffers.second == buffer)
return info;
if (info->rawBuffer == buffer || info->paramBuffer == buffer ||
info->statBuffer == buffer)
return info;
}
LOG(IPU3, Fatal) << "Can't find tracking information from buffer";
return nullptr;
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/imgu.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Intel IPU3 ImgU
*/
#pragma once
#include <memory>
#include <string>
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
class FrameBuffer;
class MediaDevice;
class Size;
struct StreamConfiguration;
class ImgUDevice
{
public:
static constexpr unsigned int kFilterWidth = 4;
static constexpr unsigned int kFilterHeight = 4;
static constexpr unsigned int kIFAlignWidth = 2;
static constexpr unsigned int kIFAlignHeight = 4;
static constexpr unsigned int kIFMaxCropWidth = 40;
static constexpr unsigned int kIFMaxCropHeight = 540;
static constexpr unsigned int kBDSAlignWidth = 2;
static constexpr unsigned int kBDSAlignHeight = 4;
static constexpr float kBDSSfMax = 2.5;
static constexpr float kBDSSfMin = 1.0;
static constexpr float kBDSSfStep = 0.03125;
static constexpr Size kOutputMinSize = { 2, 2 };
static constexpr Size kOutputMaxSize = { 4480, 34004 };
static constexpr unsigned int kOutputAlignWidth = 64;
static constexpr unsigned int kOutputAlignHeight = 4;
static constexpr unsigned int kOutputMarginWidth = 64;
static constexpr unsigned int kOutputMarginHeight = 32;
struct PipeConfig {
float bds_sf;
Size iif;
Size bds;
Size gdc;
bool isNull() const
{
return iif.isNull() || bds.isNull() || gdc.isNull();
}
};
struct Pipe {
Size input;
Size main;
Size viewfinder;
};
int init(MediaDevice *media, unsigned int index);
PipeConfig calculatePipeConfig(Pipe *pipe);
int configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat);
int configureOutput(const StreamConfiguration &cfg,
V4L2DeviceFormat *outputFormat)
{
return configureVideoDevice(output_.get(), PAD_OUTPUT, cfg,
outputFormat);
}
int configureViewfinder(const StreamConfiguration &cfg,
V4L2DeviceFormat *outputFormat)
{
return configureVideoDevice(viewfinder_.get(), PAD_VF, cfg,
outputFormat);
}
int allocateBuffers(unsigned int bufferCount);
void freeBuffers();
int start();
int stop();
int enableLinks(bool enable);
std::unique_ptr<V4L2Subdevice> imgu_;
std::unique_ptr<V4L2VideoDevice> input_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> output_;
std::unique_ptr<V4L2VideoDevice> viewfinder_;
std::unique_ptr<V4L2VideoDevice> stat_;
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
private:
static constexpr unsigned int PAD_INPUT = 0;
static constexpr unsigned int PAD_PARAM = 1;
static constexpr unsigned int PAD_OUTPUT = 2;
static constexpr unsigned int PAD_VF = 3;
static constexpr unsigned int PAD_STAT = 4;
int linkSetup(const std::string &source, unsigned int sourcePad,
const std::string &sink, unsigned int sinkPad,
bool enable);
int configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
const StreamConfiguration &cfg,
V4L2DeviceFormat *outputFormat);
std::string name_;
MediaDevice *media_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/cio2.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Intel IPU3 CIO2
*/
#pragma once
#include <memory>
#include <queue>
#include <vector>
#include <libcamera/base/signal.h>
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
class CameraSensor;
class FrameBuffer;
class MediaDevice;
class PixelFormat;
class Request;
class Size;
class SizeRange;
struct StreamConfiguration;
enum class Transform;
class CIO2Device
{
public:
static constexpr unsigned int kBufferCount = 4;
CIO2Device();
std::vector<PixelFormat> formats() const;
std::vector<SizeRange> sizes(const PixelFormat &format) const;
int init(const MediaDevice *media, unsigned int index);
int configure(const Size &size, const Transform &transform,
V4L2DeviceFormat *outputFormat);
StreamConfiguration generateConfiguration(Size size) const;
int exportBuffers(unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
V4L2SubdeviceFormat getSensorFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const;
int start();
int stop();
CameraSensor *sensor() { return sensor_.get(); }
const CameraSensor *sensor() const { return sensor_.get(); }
FrameBuffer *queueBuffer(Request *request, FrameBuffer *rawBuffer);
void tryReturnBuffer(FrameBuffer *buffer);
Signal<FrameBuffer *> &bufferReady() { return output_->bufferReady; }
Signal<uint32_t> &frameStart() { return csi2_->frameStart; }
Signal<> bufferAvailable;
private:
void freeBuffers();
void cio2BufferReady(FrameBuffer *buffer);
std::unique_ptr<CameraSensor> sensor_;
std::unique_ptr<V4L2Subdevice> csi2_;
std::unique_ptr<V4L2VideoDevice> output_;
std::vector<std::unique_ptr<FrameBuffer>> buffers_;
std::queue<FrameBuffer *> availableBuffers_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/cio2.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Intel IPU3 CIO2
*/
#include "cio2.h"
#include <limits>
#include <math.h>
#include <linux/media-bus-format.h>
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
#include <libcamera/transform.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(IPU3)
namespace {
const std::map<uint32_t, PixelFormat> mbusCodesToPixelFormat = {
{ MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10_IPU3 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10_IPU3 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10_IPU3 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10_IPU3 },
};
} /* namespace */
CIO2Device::CIO2Device()
{
}
/**
* \brief Retrieve the list of supported PixelFormats
*
* Retrieve the list of supported pixel formats by matching the sensor produced
* media bus codes with the formats supported by the CIO2 unit.
*
* \return The list of supported PixelFormat
*/
std::vector<PixelFormat> CIO2Device::formats() const
{
if (!sensor_)
return {};
std::vector<PixelFormat> formats;
for (unsigned int code : sensor_->mbusCodes()) {
auto it = mbusCodesToPixelFormat.find(code);
if (it != mbusCodesToPixelFormat.end())
formats.push_back(it->second);
}
return formats;
}
/**
* \brief Retrieve the list of supported size ranges
* \param[in] format The pixel format
*
* Retrieve the list of supported sizes for a particular \a format by matching
* the sensor produced media bus codes formats supported by the CIO2 unit.
*
* \return A list of supported sizes for the \a format or an empty list
* otherwise
*/
std::vector<SizeRange> CIO2Device::sizes(const PixelFormat &format) const
{
int mbusCode = -1;
if (!sensor_)
return {};
std::vector<SizeRange> sizes;
for (const auto &iter : mbusCodesToPixelFormat) {
if (iter.second != format)
continue;
mbusCode = iter.first;
break;
}
if (mbusCode == -1)
return {};
for (const Size &sz : sensor_->sizes(mbusCode))
sizes.emplace_back(sz);
return sizes;
}
/**
* \brief Initialize components of the CIO2 device with \a index
* \param[in] media The CIO2 media device
* \param[in] index The CIO2 device index
*
* Create and open the video device and subdevices in the CIO2 instance at \a
* index, if a supported image sensor is connected to the CSI-2 receiver of
* this CIO2 instance. Enable the media links connecting the CIO2 components
* to prepare for capture operations and cached the sensor maximum size.
*
* \return 0 on success or a negative error code otherwise
* \retval -ENODEV No supported image sensor is connected to this CIO2 instance
*/
int CIO2Device::init(const MediaDevice *media, unsigned int index)
{
int ret;
/*
* Verify that a sensor subdevice is connected to this CIO2 instance
* and enable the media link between the two.
*/
std::string csi2Name = "ipu3-csi2 " + std::to_string(index);
MediaEntity *csi2Entity = media->getEntityByName(csi2Name);
const std::vector<MediaPad *> &pads = csi2Entity->pads();
if (pads.empty())
return -ENODEV;
/* IPU3 CSI-2 receivers have a single sink pad at index 0. */
MediaPad *sink = pads[0];
const std::vector<MediaLink *> &links = sink->links();
if (links.empty())
return -ENODEV;
MediaLink *link = links[0];
MediaEntity *sensorEntity = link->source()->entity();
sensor_ = std::make_unique<CameraSensor>(sensorEntity);
ret = sensor_->init();
if (ret)
return ret;
ret = link->setEnabled(true);
if (ret)
return ret;
/*
* Make sure the sensor produces at least one format compatible with
* the CIO2 requirements.
*
* utils::set_overlap requires the ranges to be sorted, keep the
* cio2Codes vector sorted in ascending order.
*/
std::vector<unsigned int> cio2Codes = utils::map_keys(mbusCodesToPixelFormat);
const std::vector<unsigned int> &sensorCodes = sensor_->mbusCodes();
if (!utils::set_overlap(sensorCodes.begin(), sensorCodes.end(),
cio2Codes.begin(), cio2Codes.end())) {
LOG(IPU3, Error)
<< "Sensor " << sensor_->entity()->name()
<< " has not format compatible with the IPU3";
return -EINVAL;
}
/*
* \todo Define when to open and close video device nodes, as they
* might impact on power consumption.
*/
csi2_ = std::make_unique<V4L2Subdevice>(csi2Entity);
ret = csi2_->open();
if (ret)
return ret;
std::string cio2Name = "ipu3-cio2 " + std::to_string(index);
output_ = V4L2VideoDevice::fromEntityName(media, cio2Name);
return output_->open();
}
/**
* \brief Configure the CIO2 unit
* \param[in] size The requested CIO2 output frame size
* \param[in] transform The transformation to be applied on the image sensor
* \param[out] outputFormat The CIO2 unit output image format
* \return 0 on success or a negative error code otherwise
*/
int CIO2Device::configure(const Size &size, const Transform &transform,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat sensorFormat;
int ret;
/*
* Apply the selected format to the sensor, the CSI-2 receiver and
* the CIO2 output device.
*/
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
sensorFormat = getSensorFormat(mbusCodes, size);
ret = sensor_->setFormat(&sensorFormat, transform);
if (ret)
return ret;
ret = csi2_->setFormat(0, &sensorFormat);
if (ret)
return ret;
const auto &itInfo = mbusCodesToPixelFormat.find(sensorFormat.code);
if (itInfo == mbusCodesToPixelFormat.end())
return -EINVAL;
outputFormat->fourcc = output_->toV4L2PixelFormat(itInfo->second);
outputFormat->size = sensorFormat.size;
outputFormat->planesCount = 1;
ret = output_->setFormat(outputFormat);
if (ret)
return ret;
LOG(IPU3, Debug) << "CIO2 output format " << *outputFormat;
return 0;
}
StreamConfiguration CIO2Device::generateConfiguration(Size size) const
{
StreamConfiguration cfg;
/* If no desired size use the sensor resolution. */
if (size.isNull())
size = sensor_->resolution();
/* Query the sensor static information for closest match. */
std::vector<unsigned int> mbusCodes = utils::map_keys(mbusCodesToPixelFormat);
V4L2SubdeviceFormat sensorFormat = getSensorFormat(mbusCodes, size);
if (!sensorFormat.code) {
LOG(IPU3, Error) << "Sensor does not support mbus code";
return {};
}
cfg.size = sensorFormat.size;
cfg.pixelFormat = mbusCodesToPixelFormat.at(sensorFormat.code);
cfg.bufferCount = kBufferCount;
return cfg;
}
/**
* \brief Retrieve the best sensor format for a desired output
* \param[in] mbusCodes The list of acceptable media bus codes
* \param[in] size The desired size
*
* Media bus codes are selected from \a mbusCodes, which lists all acceptable
* codes in decreasing order of preference. Media bus codes supported by the
* sensor but not listed in \a mbusCodes are ignored. If none of the desired
* codes is supported, it returns an error.
*
* \a size indicates the desired size at the output of the sensor. This method
* selects the best media bus code and size supported by the sensor according
* to the following criteria.
*
* - The desired \a size shall fit in the sensor output size to avoid the need
* to up-scale.
* - The aspect ratio of sensor output size shall be as close as possible to
* the sensor's native resolution field of view.
* - The sensor output size shall be as small as possible to lower the required
* bandwidth.
* - The desired \a size shall be supported by one of the media bus code listed
* in \a mbusCodes.
*
* When multiple media bus codes can produce the same size, the code at the
* lowest position in \a mbusCodes is selected.
*
* The returned sensor output format is guaranteed to be acceptable by the
* setFormat() method without any modification.
*
* \return The best sensor output format matching the desired media bus codes
* and size on success, or an empty format otherwise.
*/
V4L2SubdeviceFormat CIO2Device::getSensorFormat(const std::vector<unsigned int> &mbusCodes,
const Size &size) const
{
unsigned int desiredArea = size.width * size.height;
unsigned int bestArea = std::numeric_limits<unsigned int>::max();
const Size &resolution = sensor_->resolution();
float desiredRatio = static_cast<float>(resolution.width) /
resolution.height;
float bestRatio = std::numeric_limits<float>::max();
Size bestSize;
uint32_t bestCode = 0;
for (unsigned int code : mbusCodes) {
const auto sizes = sensor_->sizes(code);
if (!sizes.size())
continue;
for (const Size &sz : sizes) {
if (sz.width < size.width || sz.height < size.height)
continue;
float ratio = static_cast<float>(sz.width) / sz.height;
/*
* Ratios can differ by small mantissa difference which
* can affect the selection of the sensor output size
* wildly. We are interested in selection of the closest
* size with respect to the desired output size, hence
* comparing it with a single precision digit is enough.
*/
ratio = static_cast<unsigned int>(ratio * 10) / 10.0;
float ratioDiff = fabsf(ratio - desiredRatio);
unsigned int area = sz.width * sz.height;
unsigned int areaDiff = area - desiredArea;
if (ratioDiff > bestRatio)
continue;
if (ratioDiff < bestRatio || areaDiff < bestArea) {
bestRatio = ratioDiff;
bestArea = areaDiff;
bestSize = sz;
bestCode = code;
}
}
}
if (bestSize.isNull()) {
LOG(IPU3, Debug) << "No supported format or size found";
return {};
}
V4L2SubdeviceFormat format{};
format.code = bestCode;
format.size = bestSize;
return format;
}
int CIO2Device::exportBuffers(unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return output_->exportBuffers(count, buffers);
}
int CIO2Device::start()
{
int ret = output_->exportBuffers(kBufferCount, &buffers_);
if (ret < 0)
return ret;
ret = output_->importBuffers(kBufferCount);
if (ret)
LOG(IPU3, Error) << "Failed to import CIO2 buffers";
for (std::unique_ptr<FrameBuffer> &buffer : buffers_)
availableBuffers_.push(buffer.get());
ret = output_->streamOn();
if (ret) {
freeBuffers();
return ret;
}
ret = csi2_->setFrameStartEnabled(true);
if (ret) {
stop();
return ret;
}
return 0;
}
int CIO2Device::stop()
{
int ret;
csi2_->setFrameStartEnabled(false);
ret = output_->streamOff();
freeBuffers();
return ret;
}
FrameBuffer *CIO2Device::queueBuffer(Request *request, FrameBuffer *rawBuffer)
{
FrameBuffer *buffer = rawBuffer;
/* If no buffer is provided in the request, use an internal one. */
if (!buffer) {
if (availableBuffers_.empty()) {
LOG(IPU3, Debug) << "CIO2 buffer underrun";
return nullptr;
}
buffer = availableBuffers_.front();
availableBuffers_.pop();
buffer->_d()->setRequest(request);
}
int ret = output_->queueBuffer(buffer);
if (ret)
return nullptr;
return buffer;
}
void CIO2Device::tryReturnBuffer(FrameBuffer *buffer)
{
/*
* \todo Once more pipelines deal with buffers that may be allocated
* internally or externally this pattern might become a common need. At
* that point this check should be moved to something clever in
* FrameBuffer.
*/
for (const std::unique_ptr<FrameBuffer> &buf : buffers_) {
if (buf.get() == buffer) {
availableBuffers_.push(buffer);
break;
}
}
bufferAvailable.emit();
}
void CIO2Device::freeBuffers()
{
availableBuffers_ = {};
buffers_.clear();
if (output_->releaseBuffers())
LOG(IPU3, Error) << "Failed to release CIO2 buffers";
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/imgu.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Intel IPU3 ImgU
*/
#include "imgu.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include <linux/media-bus-format.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/formats.h>
#include <libcamera/stream.h>
#include "libcamera/internal/media_device.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(IPU3)
namespace {
/*
* The procedure to calculate the ImgU pipe configuration has been ported
* from the pipe_config.py python script, available at:
* https://github.com/intel/intel-ipu3-pipecfg
* at revision: 243d13446e44 ("Fix some bug for some resolutions")
*/
/* BSD scaling factors: min=1, max=2.5, step=1/32 */
const std::vector<float> bdsScalingFactors = {
1, 1.03125, 1.0625, 1.09375, 1.125, 1.15625, 1.1875, 1.21875, 1.25,
1.28125, 1.3125, 1.34375, 1.375, 1.40625, 1.4375, 1.46875, 1.5, 1.53125,
1.5625, 1.59375, 1.625, 1.65625, 1.6875, 1.71875, 1.75, 1.78125, 1.8125,
1.84375, 1.875, 1.90625, 1.9375, 1.96875, 2, 2.03125, 2.0625, 2.09375,
2.125, 2.15625, 2.1875, 2.21875, 2.25, 2.28125, 2.3125, 2.34375, 2.375,
2.40625, 2.4375, 2.46875, 2.5
};
/* GDC scaling factors: min=1, max=16, step=1/4 */
const std::vector<float> gdcScalingFactors = {
1, 1.25, 1.5, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25,
4.5, 4.75, 5, 5.25, 5.5, 5.75, 6, 6.25, 6.5, 6.75, 7, 7.25, 7.5, 7.75,
8, 8.25, 8.5, 8.75, 9, 9.25, 9.5, 9.75, 10, 10.25, 10.5, 10.75, 11,
11.25, 11.5, 11.75, 12, 12.25, 12.5, 12.75, 13, 13.25, 13.5, 13.75, 14,
14.25, 14.5, 14.75, 15, 15.25, 15.5, 15.75, 16,
};
std::vector<ImgUDevice::PipeConfig> pipeConfigs;
struct FOV {
float w;
float h;
bool isLarger(const FOV &other)
{
if (w > other.w)
return true;
if (w == other.w && h > other.h)
return true;
return false;
}
};
/* Approximate a scaling factor sf to the closest one available in a range. */
float findScaleFactor(float sf, const std::vector<float> &range,
bool roundDown = false)
{
if (sf <= range[0])
return range[0];
if (sf >= range[range.size() - 1])
return range[range.size() - 1];
float bestDiff = std::numeric_limits<float>::max();
unsigned int index = 0;
for (unsigned int i = 0; i < range.size(); ++i) {
float diff = utils::abs_diff(sf, range[i]);
if (diff < bestDiff) {
bestDiff = diff;
index = i;
}
}
if (roundDown && index > 0 && sf < range[index])
index--;
return range[index];
}
bool isSameRatio(const Size &in, const Size &out)
{
float inRatio = static_cast<float>(in.width) / in.height;
float outRatio = static_cast<float>(out.width) / out.height;
if (utils::abs_diff(inRatio, outRatio) > 0.1)
return false;
return true;
}
void calculateBDSHeight(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc,
unsigned int bdsWidth, float bdsSF)
{
unsigned int minIFHeight = iif.height - ImgUDevice::kIFMaxCropHeight;
unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
unsigned int ifHeight;
float bdsHeight;
if (!isSameRatio(pipe->input, gdc)) {
unsigned int foundIfHeight = 0;
float estIFHeight = (iif.width * gdc.height) /
static_cast<float>(gdc.width);
estIFHeight = std::clamp<float>(estIFHeight, minIFHeight, iif.height);
ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
ifHeight / bdsSF >= minBDSHeight) {
float height = ifHeight / bdsSF;
if (std::fmod(height, 1.0) == 0) {
unsigned int bdsIntHeight = static_cast<unsigned int>(height);
if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
foundIfHeight = ifHeight;
bdsHeight = height;
break;
}
}
ifHeight -= ImgUDevice::kIFAlignHeight;
}
ifHeight = utils::alignUp(estIFHeight, ImgUDevice::kIFAlignHeight);
while (ifHeight >= minIFHeight && ifHeight <= iif.height &&
ifHeight / bdsSF >= minBDSHeight) {
float height = ifHeight / bdsSF;
if (std::fmod(height, 1.0) == 0) {
unsigned int bdsIntHeight = static_cast<unsigned int>(height);
if (!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
foundIfHeight = ifHeight;
bdsHeight = height;
break;
}
}
ifHeight += ImgUDevice::kIFAlignHeight;
}
if (foundIfHeight) {
unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
pipeConfigs.push_back({ bdsSF, { iif.width, foundIfHeight },
{ bdsWidth, bdsIntHeight }, gdc });
return;
}
} else {
ifHeight = utils::alignUp(iif.height, ImgUDevice::kIFAlignHeight);
while (ifHeight >= minIFHeight && ifHeight / bdsSF >= minBDSHeight) {
bdsHeight = ifHeight / bdsSF;
if (std::fmod(ifHeight, 1.0) == 0 && std::fmod(bdsHeight, 1.0) == 0) {
unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
if (!(ifHeight % ImgUDevice::kIFAlignHeight) &&
!(bdsIntHeight % ImgUDevice::kBDSAlignHeight)) {
pipeConfigs.push_back({ bdsSF, { iif.width, ifHeight },
{ bdsWidth, bdsIntHeight }, gdc });
}
}
ifHeight -= ImgUDevice::kIFAlignHeight;
}
}
}
void calculateBDS(ImgUDevice::Pipe *pipe, const Size &iif, const Size &gdc, float bdsSF)
{
unsigned int minBDSWidth = gdc.width + ImgUDevice::kFilterWidth * 2;
unsigned int minBDSHeight = gdc.height + ImgUDevice::kFilterHeight * 2;
float sf = bdsSF;
while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
float bdsWidth = static_cast<float>(iif.width) / sf;
float bdsHeight = static_cast<float>(iif.height) / sf;
if (std::fmod(bdsWidth, 1.0) == 0 &&
std::fmod(bdsHeight, 1.0) == 0) {
unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
!(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
}
sf += ImgUDevice::kBDSSfStep;
}
sf = bdsSF;
while (sf <= ImgUDevice::kBDSSfMax && sf >= ImgUDevice::kBDSSfMin) {
float bdsWidth = static_cast<float>(iif.width) / sf;
float bdsHeight = static_cast<float>(iif.height) / sf;
if (std::fmod(bdsWidth, 1.0) == 0 &&
std::fmod(bdsHeight, 1.0) == 0) {
unsigned int bdsIntWidth = static_cast<unsigned int>(bdsWidth);
unsigned int bdsIntHeight = static_cast<unsigned int>(bdsHeight);
if (!(bdsIntWidth % ImgUDevice::kBDSAlignWidth) && bdsWidth >= minBDSWidth &&
!(bdsIntHeight % ImgUDevice::kBDSAlignHeight) && bdsHeight >= minBDSHeight)
calculateBDSHeight(pipe, iif, gdc, bdsIntWidth, sf);
}
sf -= ImgUDevice::kBDSSfStep;
}
}
Size calculateGDC(ImgUDevice::Pipe *pipe)
{
const Size &in = pipe->input;
const Size &main = pipe->main;
const Size &vf = pipe->viewfinder;
Size gdc;
if (!vf.isNull()) {
gdc.width = main.width;
float ratio = (main.width * vf.height) / static_cast<float>(vf.width);
gdc.height = std::max(static_cast<float>(main.height), ratio);
return gdc;
}
if (!isSameRatio(in, main)) {
gdc = main;
return gdc;
}
float totalSF = static_cast<float>(in.width) / main.width;
float bdsSF = totalSF > 2 ? 2 : 1;
float yuvSF = totalSF / bdsSF;
float sf = findScaleFactor(yuvSF, gdcScalingFactors);
gdc.width = main.width * sf;
gdc.height = main.height * sf;
return gdc;
}
FOV calcFOV(const Size &in, const ImgUDevice::PipeConfig &pipe)
{
FOV fov{};
float inW = static_cast<float>(in.width);
float inH = static_cast<float>(in.height);
float ifCropW = static_cast<float>(in.width - pipe.iif.width);
float ifCropH = static_cast<float>(in.height - pipe.iif.height);
float gdcCropW = static_cast<float>(pipe.bds.width - pipe.gdc.width) * pipe.bds_sf;
float gdcCropH = static_cast<float>(pipe.bds.height - pipe.gdc.height) * pipe.bds_sf;
fov.w = (inW - (ifCropW + gdcCropW)) / inW;
fov.h = (inH - (ifCropH + gdcCropH)) / inH;
return fov;
}
} /* namespace */
/**
* \struct PipeConfig
* \brief The ImgU pipe configuration parameters
*
* The ImgU image pipeline is composed of several hardware blocks that crop
* and scale the input image to obtain the desired output sizes. The
* scaling/cropping operations of those components is configured though the
* V4L2 selection API and the V4L2 subdev API applied to the ImgU media entity.
*
* The configurable components in the pipeline are:
* - IF: image feeder
* - BDS: bayer downscaler
* - GDC: geometric distorsion correction
*
* The IF crop rectangle is controlled by the V4L2_SEL_TGT_CROP selection target
* applied to the ImgU media entity sink pad number 0. The BDS scaler is
* controlled by the V4L2_SEL_TGT_COMPOSE target on the same pad, while the GDC
* output size is configured with the VIDIOC_SUBDEV_S_FMT IOCTL, again on pad
* number 0.
*
* The PipeConfig structure collects the sizes of each of those components
* plus the BDS scaling factor used to calculate the field of view
* of the final images.
*/
/**
* \struct Pipe
* \brief Describe the ImgU requested configuration
*
* The ImgU unit processes images through several components, which have
* to be properly configured inspecting the input image size and the desired
* output sizes. This structure collects the ImgU input configuration and the
* requested main output and viewfinder configurations.
*
* \var Pipe::input
* \brief The input image size
*
* \var Pipe::main
* \brief The requested main output size
*
* \var Pipe::viewfinder
* \brief The requested viewfinder output size
*/
/**
* \brief Initialize components of the ImgU instance
* \param[in] mediaDevice The ImgU instance media device
* \param[in] index The ImgU instance index
*
* Create and open the V4L2 devices and subdevices of the ImgU instance
* with \a index.
*
* In case of errors the created V4L2VideoDevice and V4L2Subdevice instances
* are destroyed at pipeline handler delete time.
*
* \return 0 on success or a negative error code otherwise
*/
int ImgUDevice::init(MediaDevice *media, unsigned int index)
{
int ret;
name_ = "ipu3-imgu " + std::to_string(index);
media_ = media;
/*
* The media entities presence in the media device has been verified
* by the match() function: no need to check for newly created
* video devices and subdevice validity here.
*/
imgu_ = V4L2Subdevice::fromEntityName(media, name_);
ret = imgu_->open();
if (ret)
return ret;
input_ = V4L2VideoDevice::fromEntityName(media, name_ + " input");
ret = input_->open();
if (ret)
return ret;
output_ = V4L2VideoDevice::fromEntityName(media, name_ + " output");
ret = output_->open();
if (ret)
return ret;
viewfinder_ = V4L2VideoDevice::fromEntityName(media, name_ + " viewfinder");
ret = viewfinder_->open();
if (ret)
return ret;
param_ = V4L2VideoDevice::fromEntityName(media, name_ + " parameters");
ret = param_->open();
if (ret)
return ret;
stat_ = V4L2VideoDevice::fromEntityName(media, name_ + " 3a stat");
ret = stat_->open();
if (ret)
return ret;
return 0;
}
/**
* \brief Calculate the ImgU pipe configuration parameters
* \param[in] pipe The requested ImgU configuration
* \return An ImgUDevice::PipeConfig instance on success, an empty configuration
* otherwise
*/
ImgUDevice::PipeConfig ImgUDevice::calculatePipeConfig(Pipe *pipe)
{
pipeConfigs.clear();
LOG(IPU3, Debug) << "Calculating pipe configuration for: ";
LOG(IPU3, Debug) << "input: " << pipe->input;
LOG(IPU3, Debug) << "main: " << pipe->main;
LOG(IPU3, Debug) << "vf: " << pipe->viewfinder;
const Size &in = pipe->input;
/*
* \todo Filter out all resolutions < IF_CROP_MAX.
* See https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (in.width < ImgUDevice::kIFMaxCropWidth || in.height < ImgUDevice::kIFMaxCropHeight) {
LOG(IPU3, Error) << "Input resolution " << in << " not supported";
return {};
}
Size gdc = calculateGDC(pipe);
float bdsSF = static_cast<float>(in.width) / gdc.width;
float sf = findScaleFactor(bdsSF, bdsScalingFactors, true);
/* Populate the configurations vector by scaling width and height. */
unsigned int ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
unsigned int ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
unsigned int minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
unsigned int minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
while (ifWidth >= minIfWidth) {
while (ifHeight >= minIfHeight) {
Size iif{ ifWidth, ifHeight };
calculateBDS(pipe, iif, gdc, sf);
ifHeight -= ImgUDevice::kIFAlignHeight;
}
ifWidth -= ImgUDevice::kIFAlignWidth;
}
/* Repeat search by scaling width first. */
ifWidth = utils::alignUp(in.width, ImgUDevice::kIFAlignWidth);
ifHeight = utils::alignUp(in.height, ImgUDevice::kIFAlignHeight);
minIfWidth = in.width - ImgUDevice::kIFMaxCropWidth;
minIfHeight = in.height - ImgUDevice::kIFMaxCropHeight;
while (ifHeight >= minIfHeight) {
/*
* \todo This procedure is probably broken:
* https://github.com/intel/intel-ipu3-pipecfg/issues/2
*/
while (ifWidth >= minIfWidth) {
Size iif{ ifWidth, ifHeight };
calculateBDS(pipe, iif, gdc, sf);
ifWidth -= ImgUDevice::kIFAlignWidth;
}
ifHeight -= ImgUDevice::kIFAlignHeight;
}
if (pipeConfigs.size() == 0) {
LOG(IPU3, Error) << "Failed to calculate pipe configuration";
return {};
}
FOV bestFov = calcFOV(pipe->input, pipeConfigs[0]);
unsigned int bestIndex = 0;
unsigned int p = 0;
for (auto pipeConfig : pipeConfigs) {
FOV fov = calcFOV(pipe->input, pipeConfig);
if (fov.isLarger(bestFov)) {
bestFov = fov;
bestIndex = p;
}
++p;
}
LOG(IPU3, Debug) << "Computed pipe configuration: ";
LOG(IPU3, Debug) << "IF: " << pipeConfigs[bestIndex].iif;
LOG(IPU3, Debug) << "BDS: " << pipeConfigs[bestIndex].bds;
LOG(IPU3, Debug) << "GDC: " << pipeConfigs[bestIndex].gdc;
return pipeConfigs[bestIndex];
}
/**
* \brief Configure the ImgU pipeline
* \param[in] config The ImgU pipe configuration parameters
* \param[in] inputFormat The format to be applied to ImgU input
* \return 0 on success or a negative error code otherwise
*/
int ImgUDevice::configure(const PipeConfig &pipeConfig, V4L2DeviceFormat *inputFormat)
{
/* Configure the ImgU input video device with the requested sizes. */
int ret = input_->setFormat(inputFormat);
if (ret)
return ret;
LOG(IPU3, Debug) << "ImgU input format = " << *inputFormat;
/*
* \todo The IPU3 driver implementation shall be changed to use the
* input sizes as 'ImgU Input' subdevice sizes, and use the desired
* GDC output sizes to configure the crop/compose rectangles.
*
* The current IPU3 driver implementation uses GDC sizes as the
* 'ImgU Input' subdevice sizes, and the input video device sizes
* to configure the crop/compose rectangles, contradicting the
* V4L2 specification.
*/
Rectangle iif{ 0, 0, pipeConfig.iif };
ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_CROP, &iif);
if (ret)
return ret;
LOG(IPU3, Debug) << "ImgU IF rectangle = " << iif;
Rectangle bds{ 0, 0, pipeConfig.bds };
ret = imgu_->setSelection(PAD_INPUT, V4L2_SEL_TGT_COMPOSE, &bds);
if (ret)
return ret;
LOG(IPU3, Debug) << "ImgU BDS rectangle = " << bds;
V4L2SubdeviceFormat gdcFormat = {};
gdcFormat.code = MEDIA_BUS_FMT_FIXED;
gdcFormat.size = pipeConfig.gdc;
ret = imgu_->setFormat(PAD_INPUT, &gdcFormat);
if (ret)
return ret;
LOG(IPU3, Debug) << "ImgU GDC format = " << gdcFormat;
StreamConfiguration paramCfg = {};
paramCfg.size = inputFormat->size;
V4L2DeviceFormat paramFormat;
ret = configureVideoDevice(param_.get(), PAD_PARAM, paramCfg, ¶mFormat);
if (ret)
return ret;
StreamConfiguration statCfg = {};
statCfg.size = inputFormat->size;
V4L2DeviceFormat statFormat;
ret = configureVideoDevice(stat_.get(), PAD_STAT, statCfg, &statFormat);
if (ret)
return ret;
return 0;
}
/**
* \brief Configure a video device on the ImgU
* \param[in] dev The video device to configure
* \param[in] pad The pad of the ImgU subdevice
* \param[in] cfg The requested configuration
* \param[out] outputFormat The format set on the video device
* \return 0 on success or a negative error code otherwise
*/
int ImgUDevice::configureVideoDevice(V4L2VideoDevice *dev, unsigned int pad,
const StreamConfiguration &cfg,
V4L2DeviceFormat *outputFormat)
{
V4L2SubdeviceFormat imguFormat = {};
imguFormat.code = MEDIA_BUS_FMT_FIXED;
imguFormat.size = cfg.size;
int ret = imgu_->setFormat(pad, &imguFormat);
if (ret)
return ret;
/*
* No need to apply format to the param or stat video devices as the
* driver ignores the operation.
*/
if (dev == param_.get() || dev == stat_.get())
return 0;
*outputFormat = {};
outputFormat->fourcc = dev->toV4L2PixelFormat(formats::NV12);
outputFormat->size = cfg.size;
outputFormat->planesCount = 2;
ret = dev->setFormat(outputFormat);
if (ret)
return ret;
const char *name = dev == output_.get() ? "output" : "viewfinder";
LOG(IPU3, Debug) << "ImgU " << name << " format = "
<< *outputFormat;
return 0;
}
/**
* \brief Allocate buffers for all the ImgU video devices
*/
int ImgUDevice::allocateBuffers(unsigned int bufferCount)
{
/* Share buffers between CIO2 output and ImgU input. */
int ret = input_->importBuffers(bufferCount);
if (ret) {
LOG(IPU3, Error) << "Failed to import ImgU input buffers";
return ret;
}
ret = param_->allocateBuffers(bufferCount, ¶mBuffers_);
if (ret < 0) {
LOG(IPU3, Error) << "Failed to allocate ImgU param buffers";
goto error;
}
ret = stat_->allocateBuffers(bufferCount, &statBuffers_);
if (ret < 0) {
LOG(IPU3, Error) << "Failed to allocate ImgU stat buffers";
goto error;
}
/*
* Import buffers for all outputs, regardless of whether the
* corresponding stream is active or inactive, as the driver needs
* buffers to be requested on the V4L2 devices in order to operate.
*/
ret = output_->importBuffers(bufferCount);
if (ret < 0) {
LOG(IPU3, Error) << "Failed to import ImgU output buffers";
goto error;
}
ret = viewfinder_->importBuffers(bufferCount);
if (ret < 0) {
LOG(IPU3, Error) << "Failed to import ImgU viewfinder buffers";
goto error;
}
return 0;
error:
freeBuffers();
return ret;
}
/**
* \brief Release buffers for all the ImgU video devices
*/
void ImgUDevice::freeBuffers()
{
int ret;
paramBuffers_.clear();
statBuffers_.clear();
ret = output_->releaseBuffers();
if (ret)
LOG(IPU3, Error) << "Failed to release ImgU output buffers";
ret = param_->releaseBuffers();
if (ret)
LOG(IPU3, Error) << "Failed to release ImgU param buffers";
ret = stat_->releaseBuffers();
if (ret)
LOG(IPU3, Error) << "Failed to release ImgU stat buffers";
ret = viewfinder_->releaseBuffers();
if (ret)
LOG(IPU3, Error) << "Failed to release ImgU viewfinder buffers";
ret = input_->releaseBuffers();
if (ret)
LOG(IPU3, Error) << "Failed to release ImgU input buffers";
}
int ImgUDevice::start()
{
int ret;
/* Start the ImgU video devices. */
ret = output_->streamOn();
if (ret) {
LOG(IPU3, Error) << "Failed to start ImgU output";
return ret;
}
ret = viewfinder_->streamOn();
if (ret) {
LOG(IPU3, Error) << "Failed to start ImgU viewfinder";
return ret;
}
ret = param_->streamOn();
if (ret) {
LOG(IPU3, Error) << "Failed to start ImgU param";
return ret;
}
ret = stat_->streamOn();
if (ret) {
LOG(IPU3, Error) << "Failed to start ImgU stat";
return ret;
}
ret = input_->streamOn();
if (ret) {
LOG(IPU3, Error) << "Failed to start ImgU input";
return ret;
}
return 0;
}
int ImgUDevice::stop()
{
int ret;
ret = output_->streamOff();
ret |= viewfinder_->streamOff();
ret |= param_->streamOff();
ret |= stat_->streamOff();
ret |= input_->streamOff();
return ret;
}
/**
* \brief Enable or disable a single link on the ImgU instance
*
* This function assumes the media device associated with the ImgU instance
* is open.
*
* \return 0 on success or a negative error code otherwise
*/
int ImgUDevice::linkSetup(const std::string &source, unsigned int sourcePad,
const std::string &sink, unsigned int sinkPad,
bool enable)
{
MediaLink *link = media_->link(source, sourcePad, sink, sinkPad);
if (!link) {
LOG(IPU3, Error)
<< "Failed to get link: '" << source << "':"
<< sourcePad << " -> '" << sink << "':" << sinkPad;
return -ENODEV;
}
return link->setEnabled(enable);
}
/**
* \brief Enable or disable all media links in the ImgU instance to prepare
* for capture operations
*
* \todo This function will probably be removed or changed once links will be
* enabled or disabled selectively.
*
* \return 0 on success or a negative error code otherwise
*/
int ImgUDevice::enableLinks(bool enable)
{
std::string viewfinderName = name_ + " viewfinder";
std::string paramName = name_ + " parameters";
std::string outputName = name_ + " output";
std::string statName = name_ + " 3a stat";
std::string inputName = name_ + " input";
int ret;
ret = linkSetup(inputName, 0, name_, PAD_INPUT, enable);
if (ret)
return ret;
ret = linkSetup(name_, PAD_OUTPUT, outputName, 0, enable);
if (ret)
return ret;
ret = linkSetup(name_, PAD_VF, viewfinderName, 0, enable);
if (ret)
return ret;
ret = linkSetup(paramName, 0, name_, PAD_PARAM, enable);
if (ret)
return ret;
return linkSetup(name_, PAD_STAT, statName, 0, enable);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/frames.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Intel IPU3 Frames helper
*/
#pragma once
#include <map>
#include <memory>
#include <queue>
#include <vector>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
namespace libcamera {
class FrameBuffer;
class IPAProxy;
class PipelineHandler;
class Request;
class V4L2VideoDevice;
struct IPABuffer;
class IPU3Frames
{
public:
struct Info {
unsigned int id;
Request *request;
FrameBuffer *rawBuffer;
FrameBuffer *paramBuffer;
FrameBuffer *statBuffer;
ControlList effectiveSensorControls;
bool paramDequeued;
bool metadataProcessed;
};
IPU3Frames();
void init(const std::vector<std::unique_ptr<FrameBuffer>> ¶mBuffers,
const std::vector<std::unique_ptr<FrameBuffer>> &statBuffers);
void clear();
Info *create(Request *request);
void remove(Info *info);
bool tryComplete(Info *info);
Info *find(unsigned int id);
Info *find(FrameBuffer *buffer);
Signal<> bufferAvailable;
private:
std::queue<FrameBuffer *> availableParamBuffers_;
std::queue<FrameBuffer *> availableStatBuffers_;
std::map<unsigned int, std::unique_ptr<Info>> frameInfo_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/ipu3/ipu3.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Pipeline handler for Intel IPU3
*/
#include <algorithm>
#include <iomanip>
#include <memory>
#include <queue>
#include <vector>
#include <linux/intel-ipu3.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/ipa/ipu3_ipa_interface.h>
#include <libcamera/ipa/ipu3_ipa_proxy.h>
#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "cio2.h"
#include "frames.h"
#include "imgu.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(IPU3)
static const ControlInfoMap::Map IPU3Controls = {
{ &controls::draft::PipelineDepth, ControlInfo(2, 3) },
};
class IPU3CameraData : public Camera::Private
{
public:
IPU3CameraData(PipelineHandler *pipe)
: Camera::Private(pipe)
{
}
int loadIPA();
void imguOutputBufferReady(FrameBuffer *buffer);
void cio2BufferReady(FrameBuffer *buffer);
void paramBufferReady(FrameBuffer *buffer);
void statBufferReady(FrameBuffer *buffer);
void queuePendingRequests();
void cancelPendingRequests();
void frameStart(uint32_t sequence);
CIO2Device cio2_;
ImgUDevice *imgu_;
Stream outStream_;
Stream vfStream_;
Stream rawStream_;
Rectangle cropRegion_;
std::unique_ptr<DelayedControls> delayedCtrls_;
IPU3Frames frameInfos_;
std::unique_ptr<ipa::ipu3::IPAProxyIPU3> ipa_;
/* Requests for which no buffer has been queued to the CIO2 device yet. */
std::queue<Request *> pendingRequests_;
/* Requests queued to the CIO2 device but not yet processed by the ImgU. */
std::queue<Request *> processingRequests_;
ControlInfoMap ipaControls_;
private:
void metadataReady(unsigned int id, const ControlList &metadata);
void paramsBufferReady(unsigned int id);
void setSensorControls(unsigned int id, const ControlList &sensorControls,
const ControlList &lensControls);
};
class IPU3CameraConfiguration : public CameraConfiguration
{
public:
static constexpr unsigned int kBufferCount = 4;
static constexpr unsigned int kMaxStreams = 3;
IPU3CameraConfiguration(IPU3CameraData *data);
Status validate() override;
const StreamConfiguration &cio2Format() const { return cio2Configuration_; }
const ImgUDevice::PipeConfig imguConfig() const { return pipeConfig_; }
/* Cache the combinedTransform_ that will be applied to the sensor */
Transform combinedTransform_;
private:
/*
* The IPU3CameraData instance is guaranteed to be valid as long as the
* corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
const IPU3CameraData *data_;
StreamConfiguration cio2Configuration_;
ImgUDevice::PipeConfig pipeConfig_;
};
class PipelineHandlerIPU3 : public PipelineHandler
{
public:
static constexpr unsigned int V4L2_CID_IPU3_PIPE_MODE = 0x009819c1;
static constexpr Size kViewfinderSize{ 1280, 720 };
enum IPU3PipeModes {
IPU3PipeModeVideo = 0,
IPU3PipeModeStillCapture = 1,
};
PipelineHandlerIPU3(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
IPU3CameraData *cameraData(Camera *camera)
{
return static_cast<IPU3CameraData *>(camera->_d());
}
int initControls(IPU3CameraData *data);
int updateControls(IPU3CameraData *data);
int registerCameras();
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
ImgUDevice imgu0_;
ImgUDevice imgu1_;
MediaDevice *cio2MediaDev_;
MediaDevice *imguMediaDev_;
std::vector<IPABuffer> ipaBuffers_;
};
IPU3CameraConfiguration::IPU3CameraConfiguration(IPU3CameraData *data)
: CameraConfiguration()
{
data_ = data;
}
CameraConfiguration::Status IPU3CameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
/*
* Validate the requested transform against the sensor capabilities and
* rotation and store the final combined transform that configure() will
* need to apply to the sensor to save us working it out again.
*/
Orientation requestedOrientation = orientation;
combinedTransform_ = data_->cio2_.sensor()->computeTransform(&orientation);
if (orientation != requestedOrientation)
status = Adjusted;
/* Cap the number of entries to the available streams. */
if (config_.size() > kMaxStreams) {
config_.resize(kMaxStreams);
status = Adjusted;
}
/*
* Validate the requested stream configuration and select the sensor
* format by collecting the maximum RAW stream width and height and
* picking the closest larger match.
*
* If no RAW stream is requested use the one of the largest YUV stream,
* plus margin pixels for the IF and BDS rectangle to downscale.
*
* \todo Clarify the IF and BDS margins requirements.
*/
unsigned int rawCount = 0;
unsigned int yuvCount = 0;
Size rawRequirement;
Size maxYuvSize;
Size rawSize;
for (const StreamConfiguration &cfg : config_) {
const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
rawCount++;
rawSize = std::max(rawSize, cfg.size);
} else {
yuvCount++;
maxYuvSize = std::max(maxYuvSize, cfg.size);
rawRequirement.expandTo(cfg.size);
}
}
if (rawCount > 1 || yuvCount > 2) {
LOG(IPU3, Debug) << "Camera configuration not supported";
return Invalid;
} else if (rawCount && !yuvCount) {
/*
* Disallow raw-only camera configuration. Currently, ImgU does
* not get configured for raw-only streams and has early return
* in configure(). To support raw-only stream, we do need the IPA
* to get configured since it will setup the sensor controls for
* the capture.
*
* \todo Configure the ImgU with internal buffers which will enable
* the IPA to get configured for the raw-only camera configuration.
*/
LOG(IPU3, Debug)
<< "Camera configuration cannot support raw-only streams";
return Invalid;
}
/*
* Generate raw configuration from CIO2.
*
* The output YUV streams will be limited in size to the maximum frame
* size requested for the RAW stream, if present.
*
* If no raw stream is requested, generate a size from the largest YUV
* stream, aligned to the ImgU constraints and bound
* by the sensor's maximum resolution. See
* https://bugs.libcamera.org/show_bug.cgi?id=32
*/
if (rawSize.isNull())
rawSize = rawRequirement.expandedTo({ ImgUDevice::kIFMaxCropWidth,
ImgUDevice::kIFMaxCropHeight })
.grownBy({ ImgUDevice::kOutputMarginWidth,
ImgUDevice::kOutputMarginHeight })
.boundedTo(data_->cio2_.sensor()->resolution());
cio2Configuration_ = data_->cio2_.generateConfiguration(rawSize);
if (!cio2Configuration_.pixelFormat.isValid())
return Invalid;
LOG(IPU3, Debug) << "CIO2 configuration: " << cio2Configuration_.toString();
ImgUDevice::Pipe pipe{};
pipe.input = cio2Configuration_.size;
/*
* Adjust the configurations if needed and assign streams while
* iterating them.
*/
bool mainOutputAvailable = true;
for (unsigned int i = 0; i < config_.size(); ++i) {
const PixelFormatInfo &info = PixelFormatInfo::info(config_[i].pixelFormat);
const StreamConfiguration originalCfg = config_[i];
StreamConfiguration *cfg = &config_[i];
LOG(IPU3, Debug) << "Validating stream: " << config_[i].toString();
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
/* Initialize the RAW stream with the CIO2 configuration. */
cfg->size = cio2Configuration_.size;
cfg->pixelFormat = cio2Configuration_.pixelFormat;
cfg->bufferCount = cio2Configuration_.bufferCount;
cfg->stride = info.stride(cfg->size.width, 0, 64);
cfg->frameSize = info.frameSize(cfg->size, 64);
cfg->setStream(const_cast<Stream *>(&data_->rawStream_));
LOG(IPU3, Debug) << "Assigned " << cfg->toString()
<< " to the raw stream";
} else {
/* Assign and configure the main and viewfinder outputs. */
/*
* Clamp the size to match the ImgU size limits and the
* margins from the CIO2 output frame size.
*
* The ImgU outputs needs to be strictly smaller than
* the CIO2 output frame and rounded down to 64 pixels
* in width and 32 pixels in height. This assumption
* comes from inspecting the pipe configuration script
* results and the available suggested configurations in
* the ChromeOS BSP .xml camera tuning files and shall
* be validated.
*
* \todo Clarify what are the hardware constraints
* that require this alignements, if any. It might
* depend on the BDS scaling factor of 1/32, as the main
* output has no YUV scaler as the viewfinder output has.
*/
unsigned int limit;
limit = utils::alignDown(cio2Configuration_.size.width - 1,
ImgUDevice::kOutputMarginWidth);
cfg->size.width = std::clamp(cfg->size.width,
ImgUDevice::kOutputMinSize.width,
limit);
limit = utils::alignDown(cio2Configuration_.size.height - 1,
ImgUDevice::kOutputMarginHeight);
cfg->size.height = std::clamp(cfg->size.height,
ImgUDevice::kOutputMinSize.height,
limit);
cfg->size.alignDownTo(ImgUDevice::kOutputAlignWidth,
ImgUDevice::kOutputAlignHeight);
cfg->pixelFormat = formats::NV12;
cfg->bufferCount = kBufferCount;
cfg->stride = info.stride(cfg->size.width, 0, 1);
cfg->frameSize = info.frameSize(cfg->size, 1);
/*
* Use the main output stream in case only one stream is
* requested or if the current configuration is the one
* with the maximum YUV output size.
*/
if (mainOutputAvailable &&
(originalCfg.size == maxYuvSize || yuvCount == 1)) {
cfg->setStream(const_cast<Stream *>(&data_->outStream_));
mainOutputAvailable = false;
pipe.main = cfg->size;
if (yuvCount == 1)
pipe.viewfinder = pipe.main;
LOG(IPU3, Debug) << "Assigned " << cfg->toString()
<< " to the main output";
} else {
cfg->setStream(const_cast<Stream *>(&data_->vfStream_));
pipe.viewfinder = cfg->size;
LOG(IPU3, Debug) << "Assigned " << cfg->toString()
<< " to the viewfinder output";
}
}
if (cfg->pixelFormat != originalCfg.pixelFormat ||
cfg->size != originalCfg.size) {
LOG(IPU3, Debug)
<< "Stream " << i << " configuration adjusted to "
<< cfg->toString();
status = Adjusted;
}
}
/* Only compute the ImgU configuration if a YUV stream has been requested. */
if (yuvCount) {
pipeConfig_ = data_->imgu_->calculatePipeConfig(&pipe);
if (pipeConfig_.isNull()) {
LOG(IPU3, Error) << "Failed to calculate pipe configuration: "
<< "unsupported resolutions.";
return Invalid;
}
}
return status;
}
PipelineHandlerIPU3::PipelineHandlerIPU3(CameraManager *manager)
: PipelineHandler(manager), cio2MediaDev_(nullptr), imguMediaDev_(nullptr)
{
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerIPU3::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
IPU3CameraData *data = cameraData(camera);
std::unique_ptr<IPU3CameraConfiguration> config =
std::make_unique<IPU3CameraConfiguration>(data);
if (roles.empty())
return config;
Size sensorResolution = data->cio2_.sensor()->resolution();
for (const StreamRole role : roles) {
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
unsigned int bufferCount;
PixelFormat pixelFormat;
Size size;
switch (role) {
case StreamRole::StillCapture:
/*
* Use as default full-frame configuration a value
* strictly smaller than the sensor resolution (limited
* to the ImgU maximum output size) and aligned down to
* the required frame margin.
*
* \todo Clarify the alignment constraints as explained
* in validate()
*/
size = sensorResolution.boundedTo(ImgUDevice::kOutputMaxSize)
.shrunkBy({ 1, 1 })
.alignedDownTo(ImgUDevice::kOutputMarginWidth,
ImgUDevice::kOutputMarginHeight);
pixelFormat = formats::NV12;
bufferCount = IPU3CameraConfiguration::kBufferCount;
streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
case StreamRole::Raw: {
StreamConfiguration cio2Config =
data->cio2_.generateConfiguration(sensorResolution);
pixelFormat = cio2Config.pixelFormat;
size = cio2Config.size;
bufferCount = cio2Config.bufferCount;
for (const PixelFormat &format : data->cio2_.formats())
streamFormats[format] = data->cio2_.sizes(format);
break;
}
case StreamRole::Viewfinder:
case StreamRole::VideoRecording: {
/*
* Default viewfinder and videorecording to 1280x720,
* capped to the maximum sensor resolution and aligned
* to the ImgU output constraints.
*/
size = sensorResolution.boundedTo(kViewfinderSize)
.alignedDownTo(ImgUDevice::kOutputAlignWidth,
ImgUDevice::kOutputAlignHeight);
pixelFormat = formats::NV12;
bufferCount = IPU3CameraConfiguration::kBufferCount;
streamFormats[pixelFormat] = { { ImgUDevice::kOutputMinSize, size } };
break;
}
default:
LOG(IPU3, Error)
<< "Requested stream role not supported: " << role;
return nullptr;
}
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
cfg.size = size;
cfg.pixelFormat = pixelFormat;
cfg.bufferCount = bufferCount;
config->addConfiguration(cfg);
}
if (config->validate() == CameraConfiguration::Invalid)
return {};
return config;
}
int PipelineHandlerIPU3::configure(Camera *camera, CameraConfiguration *c)
{
IPU3CameraConfiguration *config =
static_cast<IPU3CameraConfiguration *>(c);
IPU3CameraData *data = cameraData(camera);
Stream *outStream = &data->outStream_;
Stream *vfStream = &data->vfStream_;
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
V4L2DeviceFormat outputFormat;
int ret;
/*
* FIXME: enabled links in one ImgU pipe interfere with capture
* operations on the other one. This can be easily triggered by
* capturing from one camera and then trying to capture from the other
* one right after, without disabling media links on the first used
* pipe.
*
* The tricky part here is where to disable links on the ImgU instance
* which is currently not in use:
* 1) Link enable/disable cannot be done at start()/stop() time as video
* devices needs to be linked first before format can be configured on
* them.
* 2) As link enable has to be done at the least in configure(),
* before configuring formats, the only place where to disable links
* would be 'stop()', but the Camera class state machine allows
* start()<->stop() sequences without any configure() in between.
*
* As of now, disable all links in the ImgU media graph before
* configuring the device, to allow alternate the usage of the two
* ImgU pipes.
*
* As a consequence, a Camera using an ImgU shall be configured before
* any start()/stop() sequence. An application that wants to
* pre-configure all the camera and then start/stop them alternatively
* without going through any re-configuration (a sequence that is
* allowed by the Camera state machine) would now fail on the IPU3.
*/
ret = imguMediaDev_->disableLinks();
if (ret)
return ret;
/*
* \todo Enable links selectively based on the requested streams.
* As of now, enable all links unconditionally.
* \todo Don't configure the ImgU at all if we only have a single
* stream which is for raw capture, in which case no buffers will
* ever be queued to the ImgU.
*/
ret = data->imgu_->enableLinks(true);
if (ret)
return ret;
/*
* Pass the requested stream size to the CIO2 unit and get back the
* adjusted format to be propagated to the ImgU output devices.
*/
const Size &sensorSize = config->cio2Format().size;
V4L2DeviceFormat cio2Format;
ret = cio2->configure(sensorSize, config->combinedTransform_, &cio2Format);
if (ret)
return ret;
IPACameraSensorInfo sensorInfo;
cio2->sensor()->sensorInfo(&sensorInfo);
data->cropRegion_ = sensorInfo.analogCrop;
/*
* If the ImgU gets configured, its driver seems to expect that
* buffers will be queued to its outputs, as otherwise the next
* capture session that uses the ImgU fails when queueing
* buffers to its input.
*
* If no ImgU configuration has been computed, it means only a RAW
* stream has been requested: return here to skip the ImgU configuration
* part.
*/
ImgUDevice::PipeConfig imguConfig = config->imguConfig();
if (imguConfig.isNull())
return 0;
ret = imgu->configure(imguConfig, &cio2Format);
if (ret)
return ret;
/* Apply the format to the configured streams output devices. */
StreamConfiguration *mainCfg = nullptr;
StreamConfiguration *vfCfg = nullptr;
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = (*config)[i];
Stream *stream = cfg.stream();
if (stream == outStream) {
mainCfg = &cfg;
ret = imgu->configureOutput(cfg, &outputFormat);
if (ret)
return ret;
} else if (stream == vfStream) {
vfCfg = &cfg;
ret = imgu->configureViewfinder(cfg, &outputFormat);
if (ret)
return ret;
}
}
/*
* As we need to set format also on the non-active streams, use
* the configuration of the active one for that purpose (there should
* be at least one active stream in the configuration request).
*/
if (!vfCfg) {
ret = imgu->configureViewfinder(*mainCfg, &outputFormat);
if (ret)
return ret;
}
/* Apply the "pipe_mode" control to the ImgU subdevice. */
ControlList ctrls(imgu->imgu_->controls());
/*
* Set the ImgU pipe mode to 'Video' unconditionally to have statistics
* generated.
*
* \todo Figure out what the 'Still Capture' mode is meant for, and use
* it accordingly.
*/
ctrls.set(V4L2_CID_IPU3_PIPE_MODE,
static_cast<int32_t>(IPU3PipeModeVideo));
ret = imgu->imgu_->setControls(&ctrls);
if (ret) {
LOG(IPU3, Error) << "Unable to set pipe_mode control";
return ret;
}
ipa::ipu3::IPAConfigInfo configInfo;
configInfo.sensorControls = data->cio2_.sensor()->controls();
CameraLens *lens = data->cio2_.sensor()->focusLens();
if (lens)
configInfo.lensControls = lens->controls();
configInfo.sensorInfo = sensorInfo;
configInfo.bdsOutputSize = config->imguConfig().bds;
configInfo.iif = config->imguConfig().iif;
ret = data->ipa_->configure(configInfo, &data->ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to configure IPA: "
<< strerror(-ret);
return ret;
}
return updateControls(data);
}
int PipelineHandlerIPU3::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
IPU3CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
if (stream == &data->outStream_)
return data->imgu_->output_->exportBuffers(count, buffers);
else if (stream == &data->vfStream_)
return data->imgu_->viewfinder_->exportBuffers(count, buffers);
else if (stream == &data->rawStream_)
return data->cio2_.exportBuffers(count, buffers);
return -EINVAL;
}
/**
* \todo Clarify if 'viewfinder' and 'stat' nodes have to be set up and
* started even if not in use. As of now, if not properly configured and
* enabled, the ImgU processing pipeline stalls.
*
* In order to be able to start the 'viewfinder' and 'stat' nodes, we need
* memory to be reserved.
*/
int PipelineHandlerIPU3::allocateBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
ImgUDevice *imgu = data->imgu_;
unsigned int bufferCount;
int ret;
bufferCount = std::max({
data->outStream_.configuration().bufferCount,
data->vfStream_.configuration().bufferCount,
data->rawStream_.configuration().bufferCount,
});
ret = imgu->allocateBuffers(bufferCount);
if (ret < 0)
return ret;
/* Map buffers to the IPA. */
unsigned int ipaBufferId = 1;
for (const std::unique_ptr<FrameBuffer> &buffer : imgu->paramBuffers_) {
buffer->setCookie(ipaBufferId++);
ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
}
for (const std::unique_ptr<FrameBuffer> &buffer : imgu->statBuffers_) {
buffer->setCookie(ipaBufferId++);
ipaBuffers_.emplace_back(buffer->cookie(), buffer->planes());
}
data->ipa_->mapBuffers(ipaBuffers_);
data->frameInfos_.init(imgu->paramBuffers_, imgu->statBuffers_);
data->frameInfos_.bufferAvailable.connect(
data, &IPU3CameraData::queuePendingRequests);
return 0;
}
int PipelineHandlerIPU3::freeBuffers(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
data->frameInfos_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : ipaBuffers_)
ids.push_back(ipabuf.id);
data->ipa_->unmapBuffers(ids);
ipaBuffers_.clear();
data->imgu_->freeBuffers();
return 0;
}
int PipelineHandlerIPU3::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
IPU3CameraData *data = cameraData(camera);
CIO2Device *cio2 = &data->cio2_;
ImgUDevice *imgu = data->imgu_;
int ret;
/* Disable test pattern mode on the sensor, if any. */
ret = cio2->sensor()->setTestPatternMode(
controls::draft::TestPatternModeEnum::TestPatternModeOff);
if (ret)
return ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
ret = data->ipa_->start();
if (ret)
goto error;
data->delayedCtrls_->reset();
/*
* Start the ImgU video devices, buffers will be queued to the
* ImgU output and viewfinder when requests will be queued.
*/
ret = cio2->start();
if (ret)
goto error;
ret = imgu->start();
if (ret)
goto error;
return 0;
error:
imgu->stop();
cio2->stop();
data->ipa_->stop();
freeBuffers(camera);
LOG(IPU3, Error) << "Failed to start camera " << camera->id();
return ret;
}
void PipelineHandlerIPU3::stopDevice(Camera *camera)
{
IPU3CameraData *data = cameraData(camera);
int ret = 0;
data->cancelPendingRequests();
data->ipa_->stop();
ret |= data->imgu_->stop();
ret |= data->cio2_.stop();
if (ret)
LOG(IPU3, Warning) << "Failed to stop camera " << camera->id();
freeBuffers(camera);
}
void IPU3CameraData::cancelPendingRequests()
{
processingRequests_ = {};
while (!pendingRequests_.empty()) {
Request *request = pendingRequests_.front();
for (auto it : request->buffers()) {
FrameBuffer *buffer = it.second;
buffer->_d()->cancel();
pipe()->completeBuffer(request, buffer);
}
pipe()->completeRequest(request);
pendingRequests_.pop();
}
}
void IPU3CameraData::queuePendingRequests()
{
while (!pendingRequests_.empty()) {
Request *request = pendingRequests_.front();
IPU3Frames::Info *info = frameInfos_.create(request);
if (!info)
break;
/*
* Queue a buffer on the CIO2, using the raw stream buffer
* provided in the request, if any, or a CIO2 internal buffer
* otherwise.
*/
FrameBuffer *reqRawBuffer = request->findBuffer(&rawStream_);
FrameBuffer *rawBuffer = cio2_.queueBuffer(request, reqRawBuffer);
/*
* \todo If queueBuffer fails in queuing a buffer to the device,
* report the request as error by cancelling the request and
* calling PipelineHandler::completeRequest().
*/
if (!rawBuffer) {
frameInfos_.remove(info);
break;
}
info->rawBuffer = rawBuffer;
ipa_->queueRequest(info->id, request->controls());
pendingRequests_.pop();
processingRequests_.push(request);
}
}
int PipelineHandlerIPU3::queueRequestDevice(Camera *camera, Request *request)
{
IPU3CameraData *data = cameraData(camera);
data->pendingRequests_.push(request);
data->queuePendingRequests();
return 0;
}
bool PipelineHandlerIPU3::match(DeviceEnumerator *enumerator)
{
int ret;
DeviceMatch cio2_dm("ipu3-cio2");
cio2_dm.add("ipu3-csi2 0");
cio2_dm.add("ipu3-cio2 0");
cio2_dm.add("ipu3-csi2 1");
cio2_dm.add("ipu3-cio2 1");
cio2_dm.add("ipu3-csi2 2");
cio2_dm.add("ipu3-cio2 2");
cio2_dm.add("ipu3-csi2 3");
cio2_dm.add("ipu3-cio2 3");
DeviceMatch imgu_dm("ipu3-imgu");
imgu_dm.add("ipu3-imgu 0");
imgu_dm.add("ipu3-imgu 0 input");
imgu_dm.add("ipu3-imgu 0 parameters");
imgu_dm.add("ipu3-imgu 0 output");
imgu_dm.add("ipu3-imgu 0 viewfinder");
imgu_dm.add("ipu3-imgu 0 3a stat");
imgu_dm.add("ipu3-imgu 1");
imgu_dm.add("ipu3-imgu 1 input");
imgu_dm.add("ipu3-imgu 1 parameters");
imgu_dm.add("ipu3-imgu 1 output");
imgu_dm.add("ipu3-imgu 1 viewfinder");
imgu_dm.add("ipu3-imgu 1 3a stat");
cio2MediaDev_ = acquireMediaDevice(enumerator, cio2_dm);
if (!cio2MediaDev_)
return false;
imguMediaDev_ = acquireMediaDevice(enumerator, imgu_dm);
if (!imguMediaDev_)
return false;
/*
* Disable all links that are enabled by default on CIO2, as camera
* creation enables all valid links it finds.
*/
if (cio2MediaDev_->disableLinks())
return false;
ret = imguMediaDev_->disableLinks();
if (ret)
return ret;
ret = registerCameras();
return ret == 0;
}
/**
* \brief Initialize the camera controls
* \param[in] data The camera data
*
* Initialize the camera controls by calculating controls which the pipeline
* is reponsible for and merge them with the controls computed by the IPA.
*
* This function needs data->ipaControls_ to be initialized by the IPA init()
* function at camera creation time. Always call this function after IPA init().
*
* \return 0 on success or a negative error code otherwise
*/
int PipelineHandlerIPU3::initControls(IPU3CameraData *data)
{
/*
* \todo The controls initialized here depend on sensor configuration
* and their limits should be updated once the configuration gets
* changed.
*
* Initialize the sensor using its resolution and compute the control
* limits.
*/
CameraSensor *sensor = data->cio2_.sensor();
V4L2SubdeviceFormat sensorFormat = {};
sensorFormat.size = sensor->resolution();
int ret = sensor->setFormat(&sensorFormat);
if (ret)
return ret;
return updateControls(data);
}
/**
* \brief Update the camera controls
* \param[in] data The camera data
*
* Compute the camera controls by calculating controls which the pipeline
* is reponsible for and merge them with the controls computed by the IPA.
*
* This function needs data->ipaControls_ to be refreshed when a new
* configuration is applied to the camera by the IPA configure() function.
*
* Always call this function after IPA configure() to make sure to have a
* properly refreshed IPA controls list.
*
* \return 0 on success or a negative error code otherwise
*/
int PipelineHandlerIPU3::updateControls(IPU3CameraData *data)
{
CameraSensor *sensor = data->cio2_.sensor();
IPACameraSensorInfo sensorInfo{};
int ret = sensor->sensorInfo(&sensorInfo);
if (ret)
return ret;
ControlInfoMap::Map controls = IPU3Controls;
const std::vector<controls::draft::TestPatternModeEnum>
&testPatternModes = sensor->testPatternModes();
if (!testPatternModes.empty()) {
std::vector<ControlValue> values;
values.reserve(testPatternModes.size());
for (auto pattern : testPatternModes)
values.emplace_back(static_cast<int32_t>(pattern));
controls[&controls::draft::TestPatternMode] = ControlInfo(values);
}
/*
* Compute the scaler crop limits.
*
* Initialize the control use the 'Viewfinder' configuration (1280x720)
* as the pipeline output resolution and the full sensor size as input
* frame (see the todo note in the validate() function about the usage
* of the sensor's full frame as ImgU input).
*/
/*
* The maximum scaler crop rectangle is the analogue crop used to
* produce the maximum frame size.
*/
const Rectangle &analogueCrop = sensorInfo.analogCrop;
Rectangle maxCrop = analogueCrop;
/*
* As the ImgU cannot up-scale, the minimum selection rectangle has to
* be as large as the pipeline output size. Use the default viewfinder
* configuration as the desired output size and calculate the minimum
* rectangle required to satisfy the ImgU processing margins, unless the
* sensor resolution is smaller.
*
* \todo This implementation is based on the same assumptions about the
* ImgU pipeline configuration described in then viewfinder and main
* output sizes calculation in the validate() function.
*/
/* The strictly smaller size than the sensor resolution, aligned to margins. */
Size minSize = sensor->resolution().shrunkBy({ 1, 1 })
.alignedDownTo(ImgUDevice::kOutputMarginWidth,
ImgUDevice::kOutputMarginHeight);
/*
* Either the smallest margin-aligned size larger than the viewfinder
* size or the adjusted sensor resolution.
*/
minSize = kViewfinderSize.grownBy({ 1, 1 })
.alignedUpTo(ImgUDevice::kOutputMarginWidth,
ImgUDevice::kOutputMarginHeight)
.boundedTo(minSize);
/*
* Re-scale in the sensor's native coordinates. Report (0,0) as
* top-left corner as we allow application to freely pan the crop area.
*/
Rectangle minCrop = Rectangle(minSize).scaledBy(analogueCrop.size(),
sensorInfo.outputSize);
controls[&controls::ScalerCrop] = ControlInfo(minCrop, maxCrop, maxCrop);
/* Add the IPA registered controls to list of camera controls. */
for (const auto &ipaControl : data->ipaControls_)
controls[ipaControl.first] = ipaControl.second;
data->controlInfo_ = ControlInfoMap(std::move(controls),
controls::controls);
return 0;
}
/**
* \brief Initialise ImgU and CIO2 devices associated with cameras
*
* Initialise the two ImgU instances and create cameras with an associated
* CIO2 device instance.
*
* \return 0 on success or a negative error code for error or if no camera
* has been created
* \retval -ENODEV no camera has been created
*/
int PipelineHandlerIPU3::registerCameras()
{
int ret;
ret = imgu0_.init(imguMediaDev_, 0);
if (ret)
return ret;
ret = imgu1_.init(imguMediaDev_, 1);
if (ret)
return ret;
/*
* For each CSI-2 receiver on the IPU3, create a Camera if an
* image sensor is connected to it and the sensor can produce images
* in a compatible format.
*/
unsigned int numCameras = 0;
for (unsigned int id = 0; id < 4 && numCameras < 2; ++id) {
std::unique_ptr<IPU3CameraData> data =
std::make_unique<IPU3CameraData>(this);
std::set<Stream *> streams = {
&data->outStream_,
&data->vfStream_,
&data->rawStream_,
};
CIO2Device *cio2 = &data->cio2_;
ret = cio2->init(cio2MediaDev_, id);
if (ret)
continue;
ret = data->loadIPA();
if (ret)
continue;
/* Initialize the camera properties. */
data->properties_ = cio2->sensor()->properties();
ret = initControls(data.get());
if (ret)
continue;
/*
* \todo Read delay values from the sensor itself or from a
* a sensor database. For now use generic values taken from
* the Raspberry Pi and listed as 'generic values'.
*/
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
{ V4L2_CID_ANALOGUE_GAIN, { 1, false } },
{ V4L2_CID_EXPOSURE, { 2, false } },
};
data->delayedCtrls_ =
std::make_unique<DelayedControls>(cio2->sensor()->device(),
params);
data->cio2_.frameStart().connect(data.get(),
&IPU3CameraData::frameStart);
/* Convert the sensor rotation to a transformation */
const auto &rotation = data->properties_.get(properties::Rotation);
if (!rotation)
LOG(IPU3, Warning) << "Rotation control not exposed by "
<< cio2->sensor()->id()
<< ". Assume rotation 0";
/**
* \todo Dynamically assign ImgU and output devices to each
* stream and camera; as of now, limit support to two cameras
* only, and assign imgu0 to the first one and imgu1 to the
* second.
*/
data->imgu_ = numCameras ? &imgu1_ : &imgu0_;
/*
* Connect video devices' 'bufferReady' signals to their
* slot to implement the image processing pipeline.
*
* Frames produced by the CIO2 unit are passed to the
* associated ImgU input where they get processed and
* returned through the ImgU main and secondary outputs.
*/
data->cio2_.bufferReady().connect(data.get(),
&IPU3CameraData::cio2BufferReady);
data->cio2_.bufferAvailable.connect(
data.get(), &IPU3CameraData::queuePendingRequests);
data->imgu_->input_->bufferReady.connect(&data->cio2_,
&CIO2Device::tryReturnBuffer);
data->imgu_->output_->bufferReady.connect(data.get(),
&IPU3CameraData::imguOutputBufferReady);
data->imgu_->viewfinder_->bufferReady.connect(data.get(),
&IPU3CameraData::imguOutputBufferReady);
data->imgu_->param_->bufferReady.connect(data.get(),
&IPU3CameraData::paramBufferReady);
data->imgu_->stat_->bufferReady.connect(data.get(),
&IPU3CameraData::statBufferReady);
/* Create and register the Camera instance. */
const std::string &cameraId = cio2->sensor()->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), cameraId, streams);
registerCamera(std::move(camera));
LOG(IPU3, Info)
<< "Registered Camera[" << numCameras << "] \""
<< cameraId << "\""
<< " connected to CSI-2 receiver " << id;
numCameras++;
}
return numCameras ? 0 : -ENODEV;
}
int IPU3CameraData::loadIPA()
{
ipa_ = IPAManager::createIPA<ipa::ipu3::IPAProxyIPU3>(pipe(), 1, 1);
if (!ipa_)
return -ENOENT;
ipa_->setSensorControls.connect(this, &IPU3CameraData::setSensorControls);
ipa_->paramsBufferReady.connect(this, &IPU3CameraData::paramsBufferReady);
ipa_->metadataReady.connect(this, &IPU3CameraData::metadataReady);
/*
* Pass the sensor info to the IPA to initialize controls.
*
* \todo Find a way to initialize IPA controls without basing their
* limits on a particular sensor mode. We currently pass sensor
* information corresponding to the largest sensor resolution, and the
* IPA uses this to compute limits for supported controls. There's a
* discrepancy between the need to compute IPA control limits at init
* time, and the fact that those limits may depend on the sensor mode.
* Research is required to find out to handle this issue.
*/
CameraSensor *sensor = cio2_.sensor();
V4L2SubdeviceFormat sensorFormat = {};
sensorFormat.size = sensor->resolution();
int ret = sensor->setFormat(&sensorFormat);
if (ret)
return ret;
IPACameraSensorInfo sensorInfo{};
ret = sensor->sensorInfo(&sensorInfo);
if (ret)
return ret;
/*
* The API tuning file is made from the sensor name. If the tuning file
* isn't found, fall back to the 'uncalibrated' file.
*/
std::string ipaTuningFile = ipa_->configurationFile(sensor->model() + ".yaml");
if (ipaTuningFile.empty())
ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
ret = ipa_->init(IPASettings{ ipaTuningFile, sensor->model() },
sensorInfo, sensor->controls(), &ipaControls_);
if (ret) {
LOG(IPU3, Error) << "Failed to initialise the IPU3 IPA";
return ret;
}
return 0;
}
void IPU3CameraData::setSensorControls([[maybe_unused]] unsigned int id,
const ControlList &sensorControls,
const ControlList &lensControls)
{
delayedCtrls_->push(sensorControls);
CameraLens *focusLens = cio2_.sensor()->focusLens();
if (!focusLens)
return;
if (!lensControls.contains(V4L2_CID_FOCUS_ABSOLUTE))
return;
const ControlValue &focusValue = lensControls.get(V4L2_CID_FOCUS_ABSOLUTE);
focusLens->setFocusPosition(focusValue.get<int32_t>());
}
void IPU3CameraData::paramsBufferReady(unsigned int id)
{
IPU3Frames::Info *info = frameInfos_.find(id);
if (!info)
return;
/* Queue all buffers from the request aimed for the ImgU. */
for (auto it : info->request->buffers()) {
const Stream *stream = it.first;
FrameBuffer *outbuffer = it.second;
if (stream == &outStream_)
imgu_->output_->queueBuffer(outbuffer);
else if (stream == &vfStream_)
imgu_->viewfinder_->queueBuffer(outbuffer);
}
info->paramBuffer->_d()->metadata().planes()[0].bytesused =
sizeof(struct ipu3_uapi_params);
imgu_->param_->queueBuffer(info->paramBuffer);
imgu_->stat_->queueBuffer(info->statBuffer);
imgu_->input_->queueBuffer(info->rawBuffer);
}
void IPU3CameraData::metadataReady(unsigned int id, const ControlList &metadata)
{
IPU3Frames::Info *info = frameInfos_.find(id);
if (!info)
return;
Request *request = info->request;
request->metadata().merge(metadata);
info->metadataProcessed = true;
if (frameInfos_.tryComplete(info))
pipe()->completeRequest(request);
}
/* -----------------------------------------------------------------------------
* Buffer Ready slots
*/
/**
* \brief Handle buffers completion at the ImgU output
* \param[in] buffer The completed buffer
*
* Buffers completed from the ImgU output are directed to the application.
*/
void IPU3CameraData::imguOutputBufferReady(FrameBuffer *buffer)
{
IPU3Frames::Info *info = frameInfos_.find(buffer);
if (!info)
return;
Request *request = info->request;
pipe()->completeBuffer(request, buffer);
request->metadata().set(controls::draft::PipelineDepth, 3);
/* \todo Actually apply the scaler crop region to the ImgU. */
const auto &scalerCrop = request->controls().get(controls::ScalerCrop);
if (scalerCrop)
cropRegion_ = *scalerCrop;
request->metadata().set(controls::ScalerCrop, cropRegion_);
if (frameInfos_.tryComplete(info))
pipe()->completeRequest(request);
}
/**
* \brief Handle buffers completion at the CIO2 output
* \param[in] buffer The completed buffer
*
* Buffers completed from the CIO2 are immediately queued to the ImgU unit
* for further processing.
*/
void IPU3CameraData::cio2BufferReady(FrameBuffer *buffer)
{
IPU3Frames::Info *info = frameInfos_.find(buffer);
if (!info)
return;
Request *request = info->request;
/* If the buffer is cancelled force a complete of the whole request. */
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
for (auto it : request->buffers()) {
FrameBuffer *b = it.second;
b->_d()->cancel();
pipe()->completeBuffer(request, b);
}
frameInfos_.remove(info);
pipe()->completeRequest(request);
return;
}
/*
* Record the sensor's timestamp in the request metadata.
*
* \todo The sensor timestamp should be better estimated by connecting
* to the V4L2Device::frameStart signal.
*/
request->metadata().set(controls::SensorTimestamp,
buffer->metadata().timestamp);
info->effectiveSensorControls = delayedCtrls_->get(buffer->metadata().sequence);
if (request->findBuffer(&rawStream_))
pipe()->completeBuffer(request, buffer);
ipa_->fillParamsBuffer(info->id, info->paramBuffer->cookie());
}
void IPU3CameraData::paramBufferReady(FrameBuffer *buffer)
{
IPU3Frames::Info *info = frameInfos_.find(buffer);
if (!info)
return;
info->paramDequeued = true;
/*
* tryComplete() will delete info if it completes the IPU3Frame.
* In that event, we must have obtained the Request before hand.
*
* \todo Improve the FrameInfo API to avoid this type of issue
*/
Request *request = info->request;
if (frameInfos_.tryComplete(info))
pipe()->completeRequest(request);
}
void IPU3CameraData::statBufferReady(FrameBuffer *buffer)
{
IPU3Frames::Info *info = frameInfos_.find(buffer);
if (!info)
return;
Request *request = info->request;
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
/*
* tryComplete() will delete info if it completes the IPU3Frame.
* In that event, we must have obtained the Request before hand.
*/
if (frameInfos_.tryComplete(info))
pipe()->completeRequest(request);
return;
}
ipa_->processStatsBuffer(info->id, request->metadata().get(controls::SensorTimestamp).value_or(0),
info->statBuffer->cookie(), info->effectiveSensorControls);
}
/*
* \brief Handle the start of frame exposure signal
* \param[in] sequence The sequence number of frame
*
* Inspect the list of pending requests waiting for a RAW frame to be
* produced and apply controls for the 'next' one.
*
* Some controls need to be applied immediately, such as the
* TestPatternMode one. Other controls are handled through the delayed
* controls class.
*/
void IPU3CameraData::frameStart(uint32_t sequence)
{
delayedCtrls_->applyControls(sequence);
if (processingRequests_.empty())
return;
/*
* Handle controls to be set immediately on the next frame.
* This currently only handle the TestPatternMode control.
*
* \todo Synchronize with the sequence number
*/
Request *request = processingRequests_.front();
processingRequests_.pop();
const auto &testPatternMode = request->controls().get(controls::draft::TestPatternMode);
if (!testPatternMode)
return;
int ret = cio2_.sensor()->setTestPatternMode(
static_cast<controls::draft::TestPatternModeEnum>(*testPatternMode));
if (ret) {
LOG(IPU3, Error) << "Failed to set test pattern mode: "
<< ret;
return;
}
request->metadata().set(controls::draft::TestPatternMode,
*testPatternMode);
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerIPU3, "ipu3")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/rkisp1/rkisp1_path.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Rockchip ISP1 path helper
*/
#pragma once
#include <memory>
#include <set>
#include <vector>
#include <libcamera/base/signal.h>
#include <libcamera/base/span.h>
#include <libcamera/camera.h>
#include <libcamera/geometry.h>
#include <libcamera/pixel_format.h>
#include "libcamera/internal/media_object.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
class CameraSensor;
class MediaDevice;
class V4L2Subdevice;
struct StreamConfiguration;
struct V4L2SubdeviceFormat;
class RkISP1Path
{
public:
RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
const Size &minResolution, const Size &maxResolution);
bool init(MediaDevice *media);
int setEnabled(bool enable) { return link_->setEnabled(enable); }
bool isEnabled() const { return link_->flags() & MEDIA_LNK_FL_ENABLED; }
StreamConfiguration generateConfiguration(const CameraSensor *sensor,
const Size &resolution,
StreamRole role);
CameraConfiguration::Status validate(const CameraSensor *sensor,
StreamConfiguration *cfg);
int configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat);
int exportBuffers(unsigned int bufferCount,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
return video_->exportBuffers(bufferCount, buffers);
}
int start();
void stop();
int queueBuffer(FrameBuffer *buffer) { return video_->queueBuffer(buffer); }
Signal<FrameBuffer *> &bufferReady() { return video_->bufferReady; }
private:
void populateFormats();
static constexpr unsigned int RKISP1_BUFFER_COUNT = 4;
const char *name_;
bool running_;
const Span<const PixelFormat> formats_;
std::set<PixelFormat> streamFormats_;
Size minResolution_;
Size maxResolution_;
std::unique_ptr<V4L2Subdevice> resizer_;
std::unique_ptr<V4L2VideoDevice> video_;
MediaLink *link_;
};
class RkISP1MainPath : public RkISP1Path
{
public:
RkISP1MainPath();
};
class RkISP1SelfPath : public RkISP1Path
{
public:
RkISP1SelfPath();
};
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/rkisp1/rkisp1_path.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Rockchip ISP1 path helper
*/
#include "rkisp1_path.h"
#include <linux/media-bus-format.h>
#include <libcamera/formats.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(RkISP1)
namespace {
/* Keep in sync with the supported raw formats in rkisp1.cpp. */
const std::map<PixelFormat, uint32_t> formatToMediaBus = {
{ formats::UYVY, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::YUYV, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::NV12, MEDIA_BUS_FMT_YUYV8_1_5X8 },
{ formats::NV21, MEDIA_BUS_FMT_YUYV8_1_5X8 },
{ formats::NV16, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::NV61, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::YUV420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
{ formats::YVU420, MEDIA_BUS_FMT_YUYV8_1_5X8 },
{ formats::YUV422, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::YVU422, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::R8, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::RGB565, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::XRGB8888, MEDIA_BUS_FMT_YUYV8_2X8 },
{ formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
{ formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
{ formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
{ formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
{ formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
{ formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
{ formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
{ formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
{ formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
{ formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
};
} /* namespace */
RkISP1Path::RkISP1Path(const char *name, const Span<const PixelFormat> &formats,
const Size &minResolution, const Size &maxResolution)
: name_(name), running_(false), formats_(formats),
minResolution_(minResolution), maxResolution_(maxResolution),
link_(nullptr)
{
}
bool RkISP1Path::init(MediaDevice *media)
{
std::string resizer = std::string("rkisp1_resizer_") + name_ + "path";
std::string video = std::string("rkisp1_") + name_ + "path";
resizer_ = V4L2Subdevice::fromEntityName(media, resizer);
if (resizer_->open() < 0)
return false;
video_ = V4L2VideoDevice::fromEntityName(media, video);
if (video_->open() < 0)
return false;
populateFormats();
link_ = media->link("rkisp1_isp", 2, resizer, 0);
if (!link_)
return false;
return true;
}
void RkISP1Path::populateFormats()
{
V4L2VideoDevice::Formats v4l2Formats = video_->formats();
if (v4l2Formats.empty()) {
LOG(RkISP1, Info)
<< "Failed to enumerate supported formats and sizes, using defaults";
for (const PixelFormat &format : formats_)
streamFormats_.insert(format);
return;
}
minResolution_ = { 65535, 65535 };
maxResolution_ = { 0, 0 };
std::vector<PixelFormat> formats;
for (const auto &[format, sizes] : v4l2Formats) {
const PixelFormat pixelFormat = format.toPixelFormat();
/*
* As a defensive measure, skip any pixel format exposed by the
* driver that we don't know about. This ensures that looking up
* formats in formatToMediaBus using a key from streamFormats_
* will never fail in any of the other functions.
*/
if (!formatToMediaBus.count(pixelFormat)) {
LOG(RkISP1, Warning)
<< "Unsupported pixel format " << pixelFormat;
continue;
}
streamFormats_.insert(pixelFormat);
for (const auto &size : sizes) {
if (minResolution_ > size.min)
minResolution_ = size.min;
if (maxResolution_ < size.max)
maxResolution_ = size.max;
}
}
}
StreamConfiguration
RkISP1Path::generateConfiguration(const CameraSensor *sensor, const Size &size,
StreamRole role)
{
const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
const Size &resolution = sensor->resolution();
/* Min and max resolutions to populate the available stream formats. */
Size maxResolution = maxResolution_.boundedToAspectRatio(resolution)
.boundedTo(resolution);
Size minResolution = minResolution_.expandedToAspectRatio(resolution);
/* The desired stream size, bound to the max resolution. */
Size streamSize = size.boundedTo(maxResolution);
/* Create the list of supported stream formats. */
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
unsigned int rawBitsPerPixel = 0;
PixelFormat rawFormat;
for (const auto &format : streamFormats_) {
const PixelFormatInfo &info = PixelFormatInfo::info(format);
/* Populate stream formats for non-RAW configurations. */
if (info.colourEncoding != PixelFormatInfo::ColourEncodingRAW) {
if (role == StreamRole::Raw)
continue;
streamFormats[format] = { { minResolution, maxResolution } };
continue;
}
/* Skip RAW formats for non-raw roles. */
if (role != StreamRole::Raw)
continue;
/* Populate stream formats for RAW configurations. */
uint32_t mbusCode = formatToMediaBus.at(format);
if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
mbusCodes.end())
/* Skip formats not supported by sensor. */
continue;
/* Add all the RAW sizes the sensor can produce for this code. */
for (const auto &rawSize : sensor->sizes(mbusCode)) {
if (rawSize.width > maxResolution_.width ||
rawSize.height > maxResolution_.height)
continue;
streamFormats[format].push_back({ rawSize, rawSize });
}
/*
* Store the raw format with the highest bits per pixel for
* later usage.
*/
if (info.bitsPerPixel > rawBitsPerPixel) {
rawBitsPerPixel = info.bitsPerPixel;
rawFormat = format;
}
}
/*
* Pick a suitable pixel format for the role. Raw streams need to use a
* raw format, processed streams use NV12 by default.
*/
PixelFormat format;
if (role == StreamRole::Raw) {
if (!rawFormat.isValid()) {
LOG(RkISP1, Error)
<< "Sensor " << sensor->model()
<< " doesn't support raw capture";
return {};
}
format = rawFormat;
} else {
format = formats::NV12;
}
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
cfg.pixelFormat = format;
cfg.size = streamSize;
cfg.bufferCount = RKISP1_BUFFER_COUNT;
return cfg;
}
CameraConfiguration::Status RkISP1Path::validate(const CameraSensor *sensor,
StreamConfiguration *cfg)
{
const std::vector<unsigned int> &mbusCodes = sensor->mbusCodes();
const Size &resolution = sensor->resolution();
const StreamConfiguration reqCfg = *cfg;
CameraConfiguration::Status status = CameraConfiguration::Valid;
/*
* Validate the pixel format. If the requested format isn't supported,
* default to either NV12 (all versions of the ISP are guaranteed to
* support NV12 on both the main and self paths) if the requested format
* is not a raw format, or to the supported raw format with the highest
* bits per pixel otherwise.
*/
unsigned int rawBitsPerPixel = 0;
PixelFormat rawFormat;
bool found = false;
for (const auto &format : streamFormats_) {
const PixelFormatInfo &info = PixelFormatInfo::info(format);
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW) {
/* Skip raw formats not supported by the sensor. */
uint32_t mbusCode = formatToMediaBus.at(format);
if (std::find(mbusCodes.begin(), mbusCodes.end(), mbusCode) ==
mbusCodes.end())
continue;
/*
* Store the raw format with the highest bits per pixel
* for later usage.
*/
if (info.bitsPerPixel > rawBitsPerPixel) {
rawBitsPerPixel = info.bitsPerPixel;
rawFormat = format;
}
}
if (cfg->pixelFormat == format) {
found = true;
break;
}
}
bool isRaw = PixelFormatInfo::info(cfg->pixelFormat).colourEncoding ==
PixelFormatInfo::ColourEncodingRAW;
/*
* If no raw format supported by the sensor has been found, use a
* processed format.
*/
if (!rawFormat.isValid())
isRaw = false;
if (!found)
cfg->pixelFormat = isRaw ? rawFormat : formats::NV12;
Size minResolution;
Size maxResolution;
if (isRaw) {
/*
* Use the sensor output size closest to the requested stream
* size.
*/
uint32_t mbusCode = formatToMediaBus.at(cfg->pixelFormat);
V4L2SubdeviceFormat sensorFormat =
sensor->getFormat({ mbusCode }, cfg->size);
minResolution = sensorFormat.size;
maxResolution = sensorFormat.size;
} else {
/*
* Adjust the size based on the sensor resolution and absolute
* limits of the ISP.
*/
minResolution = minResolution_.expandedToAspectRatio(resolution);
maxResolution = maxResolution_.boundedToAspectRatio(resolution)
.boundedTo(resolution);
}
cfg->size.boundTo(maxResolution);
cfg->size.expandTo(minResolution);
cfg->bufferCount = RKISP1_BUFFER_COUNT;
V4L2DeviceFormat format;
format.fourcc = video_->toV4L2PixelFormat(cfg->pixelFormat);
format.size = cfg->size;
int ret = video_->tryFormat(&format);
if (ret)
return CameraConfiguration::Invalid;
cfg->stride = format.planes[0].bpl;
cfg->frameSize = format.planes[0].size;
if (cfg->pixelFormat != reqCfg.pixelFormat || cfg->size != reqCfg.size) {
LOG(RkISP1, Debug)
<< "Adjusting format from " << reqCfg.toString()
<< " to " << cfg->toString();
status = CameraConfiguration::Adjusted;
}
return status;
}
int RkISP1Path::configure(const StreamConfiguration &config,
const V4L2SubdeviceFormat &inputFormat)
{
int ret;
V4L2SubdeviceFormat ispFormat = inputFormat;
ret = resizer_->setFormat(0, &ispFormat);
if (ret < 0)
return ret;
/*
* Crop on the resizer input to maintain FOV before downscaling.
*
* \todo The alignment to a multiple of 2 pixels is required but may
* change the aspect ratio very slightly. A more advanced algorithm to
* compute the resizer input crop rectangle is needed, and it should
* also take into account the need to crop away the edge pixels affected
* by the ISP processing blocks.
*/
Size ispCrop = inputFormat.size.boundedToAspectRatio(config.size)
.alignedUpTo(2, 2);
Rectangle rect = ispCrop.centeredTo(Rectangle(inputFormat.size).center());
ret = resizer_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "Configured " << name_ << " resizer input pad with "
<< ispFormat << " crop " << rect;
ispFormat.size = config.size;
LOG(RkISP1, Debug)
<< "Configuring " << name_ << " resizer output pad with "
<< ispFormat;
/*
* The configuration has been validated, the pixel format is guaranteed
* to be supported and thus found in formatToMediaBus.
*/
ispFormat.code = formatToMediaBus.at(config.pixelFormat);
ret = resizer_->setFormat(1, &ispFormat);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "Configured " << name_ << " resizer output pad with "
<< ispFormat;
const PixelFormatInfo &info = PixelFormatInfo::info(config.pixelFormat);
V4L2DeviceFormat outputFormat;
outputFormat.fourcc = video_->toV4L2PixelFormat(config.pixelFormat);
outputFormat.size = config.size;
outputFormat.planesCount = info.numPlanes();
ret = video_->setFormat(&outputFormat);
if (ret)
return ret;
if (outputFormat.size != config.size ||
outputFormat.fourcc != video_->toV4L2PixelFormat(config.pixelFormat)) {
LOG(RkISP1, Error)
<< "Unable to configure capture in " << config.toString();
return -EINVAL;
}
return 0;
}
int RkISP1Path::start()
{
int ret;
if (running_)
return -EBUSY;
/* \todo Make buffer count user configurable. */
ret = video_->importBuffers(RKISP1_BUFFER_COUNT);
if (ret)
return ret;
ret = video_->streamOn();
if (ret) {
LOG(RkISP1, Error)
<< "Failed to start " << name_ << " path";
video_->releaseBuffers();
return ret;
}
running_ = true;
return 0;
}
void RkISP1Path::stop()
{
if (!running_)
return;
if (video_->streamOff())
LOG(RkISP1, Warning) << "Failed to stop " << name_ << " path";
video_->releaseBuffers();
running_ = false;
}
/*
* \todo Remove the hardcoded resolutions and formats once all users will have
* migrated to a recent enough kernel.
*/
namespace {
constexpr Size RKISP1_RSZ_MP_SRC_MIN{ 32, 16 };
constexpr Size RKISP1_RSZ_MP_SRC_MAX{ 4416, 3312 };
constexpr std::array<PixelFormat, 18> RKISP1_RSZ_MP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
formats::SBGGR8,
formats::SGBRG8,
formats::SGRBG8,
formats::SRGGB8,
formats::SBGGR10,
formats::SGBRG10,
formats::SGRBG10,
formats::SRGGB10,
formats::SBGGR12,
formats::SGBRG12,
formats::SGRBG12,
formats::SRGGB12,
};
constexpr Size RKISP1_RSZ_SP_SRC_MIN{ 32, 16 };
constexpr Size RKISP1_RSZ_SP_SRC_MAX{ 1920, 1920 };
constexpr std::array<PixelFormat, 8> RKISP1_RSZ_SP_FORMATS{
formats::YUYV,
formats::NV16,
formats::NV61,
formats::NV21,
formats::NV12,
formats::R8,
formats::RGB565,
formats::XRGB8888,
};
} /* namespace */
RkISP1MainPath::RkISP1MainPath()
: RkISP1Path("main", RKISP1_RSZ_MP_FORMATS,
RKISP1_RSZ_MP_SRC_MIN, RKISP1_RSZ_MP_SRC_MAX)
{
}
RkISP1SelfPath::RkISP1SelfPath()
: RkISP1Path("self", RKISP1_RSZ_SP_FORMATS,
RKISP1_RSZ_SP_SRC_MIN, RKISP1_RSZ_SP_SRC_MAX)
{
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/rkisp1/rkisp1.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Pipeline handler for Rockchip ISP1
*/
#include <algorithm>
#include <array>
#include <iomanip>
#include <memory>
#include <numeric>
#include <queue>
#include <linux/media-bus-format.h>
#include <linux/rkisp1-config.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/color_space.h>
#include <libcamera/control_ids.h>
#include <libcamera/formats.h>
#include <libcamera/framebuffer.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include <libcamera/transform.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_interface.h>
#include <libcamera/ipa/rkisp1_ipa_proxy.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/delayed_controls.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
#include "rkisp1_path.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(RkISP1)
class PipelineHandlerRkISP1;
class RkISP1CameraData;
struct RkISP1FrameInfo {
unsigned int frame;
Request *request;
FrameBuffer *paramBuffer;
FrameBuffer *statBuffer;
FrameBuffer *mainPathBuffer;
FrameBuffer *selfPathBuffer;
bool paramDequeued;
bool metadataProcessed;
};
class RkISP1Frames
{
public:
RkISP1Frames(PipelineHandler *pipe);
RkISP1FrameInfo *create(const RkISP1CameraData *data, Request *request,
bool isRaw);
int destroy(unsigned int frame);
void clear();
RkISP1FrameInfo *find(unsigned int frame);
RkISP1FrameInfo *find(FrameBuffer *buffer);
RkISP1FrameInfo *find(Request *request);
private:
PipelineHandlerRkISP1 *pipe_;
std::map<unsigned int, RkISP1FrameInfo *> frameInfo_;
};
class RkISP1CameraData : public Camera::Private
{
public:
RkISP1CameraData(PipelineHandler *pipe, RkISP1MainPath *mainPath,
RkISP1SelfPath *selfPath)
: Camera::Private(pipe), frame_(0), frameInfo_(pipe),
mainPath_(mainPath), selfPath_(selfPath)
{
}
PipelineHandlerRkISP1 *pipe();
int loadIPA(unsigned int hwRevision);
Stream mainPathStream_;
Stream selfPathStream_;
std::unique_ptr<CameraSensor> sensor_;
std::unique_ptr<DelayedControls> delayedCtrls_;
unsigned int frame_;
std::vector<IPABuffer> ipaBuffers_;
RkISP1Frames frameInfo_;
RkISP1MainPath *mainPath_;
RkISP1SelfPath *selfPath_;
std::unique_ptr<ipa::rkisp1::IPAProxyRkISP1> ipa_;
private:
void paramFilled(unsigned int frame);
void setSensorControls(unsigned int frame,
const ControlList &sensorControls);
void metadataReady(unsigned int frame, const ControlList &metadata);
};
class RkISP1CameraConfiguration : public CameraConfiguration
{
public:
RkISP1CameraConfiguration(Camera *camera, RkISP1CameraData *data);
Status validate() override;
const V4L2SubdeviceFormat &sensorFormat() { return sensorFormat_; }
const Transform &combinedTransform() { return combinedTransform_; }
private:
bool fitsAllPaths(const StreamConfiguration &cfg);
/*
* The RkISP1CameraData instance is guaranteed to be valid as long as the
* corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
std::shared_ptr<Camera> camera_;
const RkISP1CameraData *data_;
V4L2SubdeviceFormat sensorFormat_;
Transform combinedTransform_;
};
class PipelineHandlerRkISP1 : public PipelineHandler
{
public:
PipelineHandlerRkISP1(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
static constexpr Size kRkISP1PreviewSize = { 1920, 1080 };
RkISP1CameraData *cameraData(Camera *camera)
{
return static_cast<RkISP1CameraData *>(camera->_d());
}
friend RkISP1CameraData;
friend RkISP1Frames;
int initLinks(Camera *camera, const CameraSensor *sensor,
const RkISP1CameraConfiguration &config);
int createCamera(MediaEntity *sensor);
void tryCompleteRequest(RkISP1FrameInfo *info);
void bufferReady(FrameBuffer *buffer);
void paramReady(FrameBuffer *buffer);
void statReady(FrameBuffer *buffer);
void frameStart(uint32_t sequence);
int allocateBuffers(Camera *camera);
int freeBuffers(Camera *camera);
MediaDevice *media_;
std::unique_ptr<V4L2Subdevice> isp_;
std::unique_ptr<V4L2VideoDevice> param_;
std::unique_ptr<V4L2VideoDevice> stat_;
std::unique_ptr<V4L2Subdevice> csi_;
bool hasSelfPath_;
bool isRaw_;
RkISP1MainPath mainPath_;
RkISP1SelfPath selfPath_;
std::vector<std::unique_ptr<FrameBuffer>> paramBuffers_;
std::vector<std::unique_ptr<FrameBuffer>> statBuffers_;
std::queue<FrameBuffer *> availableParamBuffers_;
std::queue<FrameBuffer *> availableStatBuffers_;
Camera *activeCamera_;
const MediaPad *ispSink_;
};
RkISP1Frames::RkISP1Frames(PipelineHandler *pipe)
: pipe_(static_cast<PipelineHandlerRkISP1 *>(pipe))
{
}
RkISP1FrameInfo *RkISP1Frames::create(const RkISP1CameraData *data, Request *request,
bool isRaw)
{
unsigned int frame = data->frame_;
FrameBuffer *paramBuffer = nullptr;
FrameBuffer *statBuffer = nullptr;
if (!isRaw) {
if (pipe_->availableParamBuffers_.empty()) {
LOG(RkISP1, Error) << "Parameters buffer underrun";
return nullptr;
}
if (pipe_->availableStatBuffers_.empty()) {
LOG(RkISP1, Error) << "Statistic buffer underrun";
return nullptr;
}
paramBuffer = pipe_->availableParamBuffers_.front();
pipe_->availableParamBuffers_.pop();
statBuffer = pipe_->availableStatBuffers_.front();
pipe_->availableStatBuffers_.pop();
}
FrameBuffer *mainPathBuffer = request->findBuffer(&data->mainPathStream_);
FrameBuffer *selfPathBuffer = request->findBuffer(&data->selfPathStream_);
RkISP1FrameInfo *info = new RkISP1FrameInfo;
info->frame = frame;
info->request = request;
info->paramBuffer = paramBuffer;
info->mainPathBuffer = mainPathBuffer;
info->selfPathBuffer = selfPathBuffer;
info->statBuffer = statBuffer;
info->paramDequeued = false;
info->metadataProcessed = false;
frameInfo_[frame] = info;
return info;
}
int RkISP1Frames::destroy(unsigned int frame)
{
RkISP1FrameInfo *info = find(frame);
if (!info)
return -ENOENT;
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
frameInfo_.erase(info->frame);
delete info;
return 0;
}
void RkISP1Frames::clear()
{
for (const auto &entry : frameInfo_) {
RkISP1FrameInfo *info = entry.second;
pipe_->availableParamBuffers_.push(info->paramBuffer);
pipe_->availableStatBuffers_.push(info->statBuffer);
delete info;
}
frameInfo_.clear();
}
RkISP1FrameInfo *RkISP1Frames::find(unsigned int frame)
{
auto itInfo = frameInfo_.find(frame);
if (itInfo != frameInfo_.end())
return itInfo->second;
LOG(RkISP1, Fatal) << "Can't locate info from frame";
return nullptr;
}
RkISP1FrameInfo *RkISP1Frames::find(FrameBuffer *buffer)
{
for (auto &itInfo : frameInfo_) {
RkISP1FrameInfo *info = itInfo.second;
if (info->paramBuffer == buffer ||
info->statBuffer == buffer ||
info->mainPathBuffer == buffer ||
info->selfPathBuffer == buffer)
return info;
}
LOG(RkISP1, Fatal) << "Can't locate info from buffer";
return nullptr;
}
RkISP1FrameInfo *RkISP1Frames::find(Request *request)
{
for (auto &itInfo : frameInfo_) {
RkISP1FrameInfo *info = itInfo.second;
if (info->request == request)
return info;
}
LOG(RkISP1, Fatal) << "Can't locate info from request";
return nullptr;
}
PipelineHandlerRkISP1 *RkISP1CameraData::pipe()
{
return static_cast<PipelineHandlerRkISP1 *>(Camera::Private::pipe());
}
int RkISP1CameraData::loadIPA(unsigned int hwRevision)
{
ipa_ = IPAManager::createIPA<ipa::rkisp1::IPAProxyRkISP1>(pipe(), 1, 1);
if (!ipa_)
return -ENOENT;
ipa_->setSensorControls.connect(this, &RkISP1CameraData::setSensorControls);
ipa_->paramsBufferReady.connect(this, &RkISP1CameraData::paramFilled);
ipa_->metadataReady.connect(this, &RkISP1CameraData::metadataReady);
/*
* The API tuning file is made from the sensor name unless the
* environment variable overrides it.
*/
std::string ipaTuningFile;
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RKISP1_TUNING_FILE");
if (!configFromEnv || *configFromEnv == '\0') {
ipaTuningFile = ipa_->configurationFile(sensor_->model() + ".yaml");
/*
* If the tuning file isn't found, fall back to the
* 'uncalibrated' configuration file.
*/
if (ipaTuningFile.empty())
ipaTuningFile = ipa_->configurationFile("uncalibrated.yaml");
} else {
ipaTuningFile = std::string(configFromEnv);
}
IPACameraSensorInfo sensorInfo{};
int ret = sensor_->sensorInfo(&sensorInfo);
if (ret) {
LOG(RkISP1, Error) << "Camera sensor information not available";
return ret;
}
ret = ipa_->init({ ipaTuningFile, sensor_->model() }, hwRevision,
sensorInfo, sensor_->controls(), &controlInfo_);
if (ret < 0) {
LOG(RkISP1, Error) << "IPA initialization failure";
return ret;
}
return 0;
}
void RkISP1CameraData::paramFilled(unsigned int frame)
{
PipelineHandlerRkISP1 *pipe = RkISP1CameraData::pipe();
RkISP1FrameInfo *info = frameInfo_.find(frame);
if (!info)
return;
info->paramBuffer->_d()->metadata().planes()[0].bytesused =
sizeof(struct rkisp1_params_cfg);
pipe->param_->queueBuffer(info->paramBuffer);
pipe->stat_->queueBuffer(info->statBuffer);
if (info->mainPathBuffer)
mainPath_->queueBuffer(info->mainPathBuffer);
if (selfPath_ && info->selfPathBuffer)
selfPath_->queueBuffer(info->selfPathBuffer);
}
void RkISP1CameraData::setSensorControls([[maybe_unused]] unsigned int frame,
const ControlList &sensorControls)
{
delayedCtrls_->push(sensorControls);
}
void RkISP1CameraData::metadataReady(unsigned int frame, const ControlList &metadata)
{
RkISP1FrameInfo *info = frameInfo_.find(frame);
if (!info)
return;
info->request->metadata().merge(metadata);
info->metadataProcessed = true;
pipe()->tryCompleteRequest(info);
}
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
namespace {
/* Keep in sync with the supported raw formats in rkisp1_path.cpp. */
const std::map<PixelFormat, uint32_t> rawFormats = {
{ formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
{ formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
{ formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
{ formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
{ formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
{ formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
{ formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
{ formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
{ formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
{ formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
};
} /* namespace */
RkISP1CameraConfiguration::RkISP1CameraConfiguration(Camera *camera,
RkISP1CameraData *data)
: CameraConfiguration()
{
camera_ = camera->shared_from_this();
data_ = data;
}
bool RkISP1CameraConfiguration::fitsAllPaths(const StreamConfiguration &cfg)
{
const CameraSensor *sensor = data_->sensor_.get();
StreamConfiguration config;
config = cfg;
if (data_->mainPath_->validate(sensor, &config) != Valid)
return false;
config = cfg;
if (data_->selfPath_ && data_->selfPath_->validate(sensor, &config) != Valid)
return false;
return true;
}
CameraConfiguration::Status RkISP1CameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
unsigned int pathCount = data_->selfPath_ ? 2 : 1;
Status status;
if (config_.empty())
return Invalid;
status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
/* Cap the number of entries to the available streams. */
if (config_.size() > pathCount) {
config_.resize(pathCount);
status = Adjusted;
}
Orientation requestedOrientation = orientation;
combinedTransform_ = data_->sensor_->computeTransform(&orientation);
if (orientation != requestedOrientation)
status = Adjusted;
/*
* Simultaneous capture of raw and processed streams isn't possible. If
* there is any raw stream, cap the number of streams to one.
*/
if (config_.size() > 1) {
for (const auto &cfg : config_) {
if (PixelFormatInfo::info(cfg.pixelFormat).colourEncoding ==
PixelFormatInfo::ColourEncodingRAW) {
config_.resize(1);
status = Adjusted;
break;
}
}
}
/*
* If there are more than one stream in the configuration figure out the
* order to evaluate the streams. The first stream has the highest
* priority but if both main path and self path can satisfy it evaluate
* the second stream first as the first stream is guaranteed to work
* with whichever path is not used by the second one.
*/
std::vector<unsigned int> order(config_.size());
std::iota(order.begin(), order.end(), 0);
if (config_.size() == 2 && fitsAllPaths(config_[0]))
std::reverse(order.begin(), order.end());
bool mainPathAvailable = true;
bool selfPathAvailable = data_->selfPath_;
for (unsigned int index : order) {
StreamConfiguration &cfg = config_[index];
/* Try to match stream without adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
if (data_->mainPath_->validate(sensor, &tryCfg) == Valid) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
continue;
}
}
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
if (data_->selfPath_->validate(sensor, &tryCfg) == Valid) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
continue;
}
}
/* Try to match stream allowing adjusting configuration. */
if (mainPathAvailable) {
StreamConfiguration tryCfg = cfg;
if (data_->mainPath_->validate(sensor, &tryCfg) == Adjusted) {
mainPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->mainPathStream_));
status = Adjusted;
continue;
}
}
if (selfPathAvailable) {
StreamConfiguration tryCfg = cfg;
if (data_->selfPath_->validate(sensor, &tryCfg) == Adjusted) {
selfPathAvailable = false;
cfg = tryCfg;
cfg.setStream(const_cast<Stream *>(&data_->selfPathStream_));
status = Adjusted;
continue;
}
}
/* All paths rejected configuration. */
LOG(RkISP1, Debug) << "Camera configuration not supported "
<< cfg.toString();
return Invalid;
}
/* Select the sensor format. */
PixelFormat rawFormat;
Size maxSize;
for (const StreamConfiguration &cfg : config_) {
const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
rawFormat = cfg.pixelFormat;
maxSize = std::max(maxSize, cfg.size);
}
std::vector<unsigned int> mbusCodes;
if (rawFormat.isValid()) {
mbusCodes = { rawFormats.at(rawFormat) };
} else {
std::transform(rawFormats.begin(), rawFormats.end(),
std::back_inserter(mbusCodes),
[](const auto &value) { return value.second; });
}
sensorFormat_ = sensor->getFormat(mbusCodes, maxSize);
if (sensorFormat_.size.isNull())
sensorFormat_.size = sensor->resolution();
return status;
}
/* -----------------------------------------------------------------------------
* Pipeline Operations
*/
PipelineHandlerRkISP1::PipelineHandlerRkISP1(CameraManager *manager)
: PipelineHandler(manager), hasSelfPath_(true)
{
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerRkISP1::generateConfiguration(Camera *camera,
Span<const StreamRole> roles)
{
RkISP1CameraData *data = cameraData(camera);
unsigned int pathCount = data->selfPath_ ? 2 : 1;
if (roles.size() > pathCount) {
LOG(RkISP1, Error) << "Too many stream roles requested";
return nullptr;
}
std::unique_ptr<CameraConfiguration> config =
std::make_unique<RkISP1CameraConfiguration>(camera, data);
if (roles.empty())
return config;
/*
* As the ISP can't output different color spaces for the main and self
* path, pick a sensible default color space based on the role of the
* first stream and use it for all streams.
*/
std::optional<ColorSpace> colorSpace;
bool mainPathAvailable = true;
for (const StreamRole role : roles) {
Size size;
switch (role) {
case StreamRole::StillCapture:
/* JPEG encoders typically expect sYCC. */
if (!colorSpace)
colorSpace = ColorSpace::Sycc;
size = data->sensor_->resolution();
break;
case StreamRole::Viewfinder:
/*
* sYCC is the YCbCr encoding of sRGB, which is commonly
* used by displays.
*/
if (!colorSpace)
colorSpace = ColorSpace::Sycc;
size = kRkISP1PreviewSize;
break;
case StreamRole::VideoRecording:
/* Rec. 709 is a good default for HD video recording. */
if (!colorSpace)
colorSpace = ColorSpace::Rec709;
size = kRkISP1PreviewSize;
break;
case StreamRole::Raw:
if (roles.size() > 1) {
LOG(RkISP1, Error)
<< "Can't capture both raw and processed streams";
return nullptr;
}
colorSpace = ColorSpace::Raw;
size = data->sensor_->resolution();
break;
default:
LOG(RkISP1, Warning)
<< "Requested stream role not supported: " << role;
return nullptr;
}
/*
* Prefer the main path if available, as it supports higher
* resolutions.
*
* \todo Using the main path unconditionally hides support for
* RGB (only available on the self path) in the streams formats
* exposed to applications. This likely calls for a better API
* to expose streams capabilities.
*/
RkISP1Path *path;
if (mainPathAvailable) {
path = data->mainPath_;
mainPathAvailable = false;
} else {
path = data->selfPath_;
}
StreamConfiguration cfg =
path->generateConfiguration(data->sensor_.get(), size, role);
if (!cfg.pixelFormat.isValid())
return nullptr;
cfg.colorSpace = colorSpace;
config->addConfiguration(cfg);
}
config->validate();
return config;
}
int PipelineHandlerRkISP1::configure(Camera *camera, CameraConfiguration *c)
{
RkISP1CameraConfiguration *config =
static_cast<RkISP1CameraConfiguration *>(c);
RkISP1CameraData *data = cameraData(camera);
CameraSensor *sensor = data->sensor_.get();
int ret;
ret = initLinks(camera, sensor, *config);
if (ret)
return ret;
/*
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
V4L2SubdeviceFormat format = config->sensorFormat();
LOG(RkISP1, Debug) << "Configuring sensor with " << format;
ret = sensor->setFormat(&format, config->combinedTransform());
if (ret < 0)
return ret;
LOG(RkISP1, Debug) << "Sensor configured with " << format;
if (csi_) {
ret = csi_->setFormat(0, &format);
if (ret < 0)
return ret;
}
ret = isp_->setFormat(0, &format);
if (ret < 0)
return ret;
Rectangle rect(0, 0, format.size);
ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "ISP input pad configured with " << format
<< " crop " << rect;
const PixelFormat &streamFormat = config->at(0).pixelFormat;
const PixelFormatInfo &info = PixelFormatInfo::info(streamFormat);
isRaw_ = info.colourEncoding == PixelFormatInfo::ColourEncodingRAW;
/* YUYV8_2X8 is required on the ISP source path pad for YUV output. */
if (!isRaw_)
format.code = MEDIA_BUS_FMT_YUYV8_2X8;
LOG(RkISP1, Debug)
<< "Configuring ISP output pad with " << format
<< " crop " << rect;
ret = isp_->setSelection(2, V4L2_SEL_TGT_CROP, &rect);
if (ret < 0)
return ret;
format.colorSpace = config->at(0).colorSpace;
ret = isp_->setFormat(2, &format);
if (ret < 0)
return ret;
LOG(RkISP1, Debug)
<< "ISP output pad configured with " << format
<< " crop " << rect;
std::map<unsigned int, IPAStream> streamConfig;
for (const StreamConfiguration &cfg : *config) {
if (cfg.stream() == &data->mainPathStream_) {
ret = mainPath_.configure(cfg, format);
streamConfig[0] = IPAStream(cfg.pixelFormat,
cfg.size);
} else if (hasSelfPath_) {
ret = selfPath_.configure(cfg, format);
streamConfig[1] = IPAStream(cfg.pixelFormat,
cfg.size);
} else {
return -ENODEV;
}
if (ret)
return ret;
}
V4L2DeviceFormat paramFormat;
paramFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_PARAMS);
ret = param_->setFormat(¶mFormat);
if (ret)
return ret;
V4L2DeviceFormat statFormat;
statFormat.fourcc = V4L2PixelFormat(V4L2_META_FMT_RK_ISP1_STAT_3A);
ret = stat_->setFormat(&statFormat);
if (ret)
return ret;
/* Inform IPA of stream configuration and sensor controls. */
ipa::rkisp1::IPAConfigInfo ipaConfig{};
ret = data->sensor_->sensorInfo(&ipaConfig.sensorInfo);
if (ret)
return ret;
ipaConfig.sensorControls = data->sensor_->controls();
ret = data->ipa_->configure(ipaConfig, streamConfig, &data->controlInfo_);
if (ret) {
LOG(RkISP1, Error) << "failed configuring IPA (" << ret << ")";
return ret;
}
return 0;
}
int PipelineHandlerRkISP1::exportFrameBuffers([[maybe_unused]] Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
RkISP1CameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
if (stream == &data->mainPathStream_)
return mainPath_.exportBuffers(count, buffers);
else if (hasSelfPath_ && stream == &data->selfPathStream_)
return selfPath_.exportBuffers(count, buffers);
return -EINVAL;
}
int PipelineHandlerRkISP1::allocateBuffers(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
unsigned int ipaBufferId = 1;
int ret;
unsigned int maxCount = std::max({
data->mainPathStream_.configuration().bufferCount,
data->selfPathStream_.configuration().bufferCount,
});
if (!isRaw_) {
ret = param_->allocateBuffers(maxCount, ¶mBuffers_);
if (ret < 0)
goto error;
ret = stat_->allocateBuffers(maxCount, &statBuffers_);
if (ret < 0)
goto error;
}
for (std::unique_ptr<FrameBuffer> &buffer : paramBuffers_) {
buffer->setCookie(ipaBufferId++);
data->ipaBuffers_.emplace_back(buffer->cookie(),
buffer->planes());
availableParamBuffers_.push(buffer.get());
}
for (std::unique_ptr<FrameBuffer> &buffer : statBuffers_) {
buffer->setCookie(ipaBufferId++);
data->ipaBuffers_.emplace_back(buffer->cookie(),
buffer->planes());
availableStatBuffers_.push(buffer.get());
}
data->ipa_->mapBuffers(data->ipaBuffers_);
return 0;
error:
paramBuffers_.clear();
statBuffers_.clear();
return ret;
}
int PipelineHandlerRkISP1::freeBuffers(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
while (!availableStatBuffers_.empty())
availableStatBuffers_.pop();
while (!availableParamBuffers_.empty())
availableParamBuffers_.pop();
paramBuffers_.clear();
statBuffers_.clear();
std::vector<unsigned int> ids;
for (IPABuffer &ipabuf : data->ipaBuffers_)
ids.push_back(ipabuf.id);
data->ipa_->unmapBuffers(ids);
data->ipaBuffers_.clear();
if (param_->releaseBuffers())
LOG(RkISP1, Error) << "Failed to release parameters buffers";
if (stat_->releaseBuffers())
LOG(RkISP1, Error) << "Failed to release stat buffers";
return 0;
}
int PipelineHandlerRkISP1::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
RkISP1CameraData *data = cameraData(camera);
int ret;
/* Allocate buffers for internal pipeline usage. */
ret = allocateBuffers(camera);
if (ret)
return ret;
ret = data->ipa_->start();
if (ret) {
freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start IPA " << camera->id();
return ret;
}
data->frame_ = 0;
if (!isRaw_) {
ret = param_->streamOn();
if (ret) {
data->ipa_->stop();
freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start parameters " << camera->id();
return ret;
}
ret = stat_->streamOn();
if (ret) {
param_->streamOff();
data->ipa_->stop();
freeBuffers(camera);
LOG(RkISP1, Error)
<< "Failed to start statistics " << camera->id();
return ret;
}
}
if (data->mainPath_->isEnabled()) {
ret = mainPath_.start();
if (ret) {
param_->streamOff();
stat_->streamOff();
data->ipa_->stop();
freeBuffers(camera);
return ret;
}
}
if (hasSelfPath_ && data->selfPath_->isEnabled()) {
ret = selfPath_.start();
if (ret) {
mainPath_.stop();
param_->streamOff();
stat_->streamOff();
data->ipa_->stop();
freeBuffers(camera);
return ret;
}
}
isp_->setFrameStartEnabled(true);
activeCamera_ = camera;
return ret;
}
void PipelineHandlerRkISP1::stopDevice(Camera *camera)
{
RkISP1CameraData *data = cameraData(camera);
int ret;
isp_->setFrameStartEnabled(false);
data->ipa_->stop();
if (hasSelfPath_)
selfPath_.stop();
mainPath_.stop();
if (!isRaw_) {
ret = stat_->streamOff();
if (ret)
LOG(RkISP1, Warning)
<< "Failed to stop statistics for " << camera->id();
ret = param_->streamOff();
if (ret)
LOG(RkISP1, Warning)
<< "Failed to stop parameters for " << camera->id();
}
ASSERT(data->queuedRequests_.empty());
data->frameInfo_.clear();
freeBuffers(camera);
activeCamera_ = nullptr;
}
int PipelineHandlerRkISP1::queueRequestDevice(Camera *camera, Request *request)
{
RkISP1CameraData *data = cameraData(camera);
RkISP1FrameInfo *info = data->frameInfo_.create(data, request, isRaw_);
if (!info)
return -ENOENT;
data->ipa_->queueRequest(data->frame_, request->controls());
if (isRaw_) {
if (info->mainPathBuffer)
data->mainPath_->queueBuffer(info->mainPathBuffer);
if (data->selfPath_ && info->selfPathBuffer)
data->selfPath_->queueBuffer(info->selfPathBuffer);
} else {
data->ipa_->fillParamsBuffer(data->frame_,
info->paramBuffer->cookie());
}
data->frame_++;
return 0;
}
/* -----------------------------------------------------------------------------
* Match and Setup
*/
int PipelineHandlerRkISP1::initLinks(Camera *camera,
const CameraSensor *sensor,
const RkISP1CameraConfiguration &config)
{
RkISP1CameraData *data = cameraData(camera);
int ret;
ret = media_->disableLinks();
if (ret < 0)
return ret;
/*
* Configure the sensor links: enable the link corresponding to this
* camera.
*/
for (MediaLink *link : ispSink_->links()) {
if (link->source()->entity() != sensor->entity())
continue;
LOG(RkISP1, Debug)
<< "Enabling link from sensor '"
<< link->source()->entity()->name()
<< "' to ISP";
ret = link->setEnabled(true);
if (ret < 0)
return ret;
}
if (csi_) {
MediaLink *link = isp_->entity()->getPadByIndex(0)->links().at(0);
ret = link->setEnabled(true);
if (ret < 0)
return ret;
}
for (const StreamConfiguration &cfg : config) {
if (cfg.stream() == &data->mainPathStream_)
ret = data->mainPath_->setEnabled(true);
else if (hasSelfPath_ && cfg.stream() == &data->selfPathStream_)
ret = data->selfPath_->setEnabled(true);
else
return -EINVAL;
if (ret < 0)
return ret;
}
return 0;
}
int PipelineHandlerRkISP1::createCamera(MediaEntity *sensor)
{
int ret;
std::unique_ptr<RkISP1CameraData> data =
std::make_unique<RkISP1CameraData>(this, &mainPath_,
hasSelfPath_ ? &selfPath_ : nullptr);
data->sensor_ = std::make_unique<CameraSensor>(sensor);
ret = data->sensor_->init();
if (ret)
return ret;
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
/*
* \todo Read dealy values from the sensor itself or from a
* a sensor database. For now use generic values taken from
* the Raspberry Pi and listed as generic values.
*/
std::unordered_map<uint32_t, DelayedControls::ControlParams> params = {
{ V4L2_CID_ANALOGUE_GAIN, { 1, false } },
{ V4L2_CID_EXPOSURE, { 2, false } },
};
data->delayedCtrls_ =
std::make_unique<DelayedControls>(data->sensor_->device(),
params);
isp_->frameStart.connect(data->delayedCtrls_.get(),
&DelayedControls::applyControls);
ret = data->loadIPA(media_->hwRevision());
if (ret)
return ret;
std::set<Stream *> streams{
&data->mainPathStream_,
&data->selfPathStream_,
};
const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
registerCamera(std::move(camera));
return 0;
}
bool PipelineHandlerRkISP1::match(DeviceEnumerator *enumerator)
{
const MediaPad *pad;
DeviceMatch dm("rkisp1");
dm.add("rkisp1_isp");
dm.add("rkisp1_resizer_mainpath");
dm.add("rkisp1_mainpath");
dm.add("rkisp1_stats");
dm.add("rkisp1_params");
media_ = acquireMediaDevice(enumerator, dm);
if (!media_)
return false;
if (!media_->hwRevision()) {
LOG(RkISP1, Error)
<< "The rkisp1 driver is too old, v5.11 or newer is required";
return false;
}
hasSelfPath_ = !!media_->getEntityByName("rkisp1_selfpath");
/* Create the V4L2 subdevices we will need. */
isp_ = V4L2Subdevice::fromEntityName(media_, "rkisp1_isp");
if (isp_->open() < 0)
return false;
/* Locate and open the optional CSI-2 receiver. */
ispSink_ = isp_->entity()->getPadByIndex(0);
if (!ispSink_ || ispSink_->links().empty())
return false;
pad = ispSink_->links().at(0)->source();
if (pad->entity()->function() == MEDIA_ENT_F_VID_IF_BRIDGE) {
csi_ = std::make_unique<V4L2Subdevice>(pad->entity());
if (csi_->open() < 0)
return false;
ispSink_ = csi_->entity()->getPadByIndex(0);
if (!ispSink_)
return false;
}
/* Locate and open the stats and params video nodes. */
stat_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_stats");
if (stat_->open() < 0)
return false;
param_ = V4L2VideoDevice::fromEntityName(media_, "rkisp1_params");
if (param_->open() < 0)
return false;
/* Locate and open the ISP main and self paths. */
if (!mainPath_.init(media_))
return false;
if (hasSelfPath_ && !selfPath_.init(media_))
return false;
mainPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
if (hasSelfPath_)
selfPath_.bufferReady().connect(this, &PipelineHandlerRkISP1::bufferReady);
stat_->bufferReady.connect(this, &PipelineHandlerRkISP1::statReady);
param_->bufferReady.connect(this, &PipelineHandlerRkISP1::paramReady);
/*
* Enumerate all sensors connected to the ISP and create one
* camera instance for each of them.
*/
bool registered = false;
for (MediaLink *link : ispSink_->links()) {
if (!createCamera(link->source()->entity()))
registered = true;
}
return registered;
}
/* -----------------------------------------------------------------------------
* Buffer Handling
*/
void PipelineHandlerRkISP1::tryCompleteRequest(RkISP1FrameInfo *info)
{
RkISP1CameraData *data = cameraData(activeCamera_);
Request *request = info->request;
if (request->hasPendingBuffers())
return;
if (!info->metadataProcessed)
return;
if (!isRaw_ && !info->paramDequeued)
return;
data->frameInfo_.destroy(info->frame);
completeRequest(request);
}
void PipelineHandlerRkISP1::bufferReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
if (!info)
return;
const FrameMetadata &metadata = buffer->metadata();
Request *request = buffer->request();
if (metadata.status != FrameMetadata::FrameCancelled) {
/*
* Record the sensor's timestamp in the request metadata.
*
* \todo The sensor timestamp should be better estimated by connecting
* to the V4L2Device::frameStart signal.
*/
request->metadata().set(controls::SensorTimestamp,
metadata.timestamp);
if (isRaw_) {
const ControlList &ctrls =
data->delayedCtrls_->get(metadata.sequence);
data->ipa_->processStatsBuffer(info->frame, 0, ctrls);
}
} else {
if (isRaw_)
info->metadataProcessed = true;
}
completeBuffer(request, buffer);
tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::paramReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
if (!info)
return;
info->paramDequeued = true;
tryCompleteRequest(info);
}
void PipelineHandlerRkISP1::statReady(FrameBuffer *buffer)
{
ASSERT(activeCamera_);
RkISP1CameraData *data = cameraData(activeCamera_);
RkISP1FrameInfo *info = data->frameInfo_.find(buffer);
if (!info)
return;
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
info->metadataProcessed = true;
tryCompleteRequest(info);
return;
}
if (data->frame_ <= buffer->metadata().sequence)
data->frame_ = buffer->metadata().sequence + 1;
data->ipa_->processStatsBuffer(info->frame, info->statBuffer->cookie(),
data->delayedCtrls_->get(buffer->metadata().sequence));
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerRkISP1, "rkisp1")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/simple/simple.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Laurent Pinchart
* Copyright (C) 2019, Martijn Braam
*
* Pipeline handler for simple pipelines
*/
#include <algorithm>
#include <iterator>
#include <list>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include <string.h>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include <linux/media-bus-format.h>
#include <libcamera/base/log.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/converter.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/software_isp/software_isp.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(SimplePipeline)
/* -----------------------------------------------------------------------------
*
* Overview
* --------
*
* The SimplePipelineHandler relies on generic kernel APIs to control a camera
* device, without any device-specific code and with limited device-specific
* static data.
*
* To qualify for support by the simple pipeline handler, a device shall
*
* - be supported by V4L2 drivers, exposing the Media Controller API, the V4L2
* subdev APIs and the media bus format-based enumeration extension for the
* VIDIOC_ENUM_FMT ioctl ;
* - not expose any device-specific API from drivers to userspace ;
* - include one or more camera sensor media entities and one or more video
* capture devices ;
* - have a capture pipeline with linear paths from the camera sensors to the
* video capture devices ; and
* - have an optional memory-to-memory device to perform format conversion
* and/or scaling, exposed as a V4L2 M2M device.
*
* As devices that require a specific pipeline handler may still match the
* above characteristics, the simple pipeline handler doesn't attempt to
* automatically determine which devices it can support. It instead relies on
* an explicit list of supported devices, provided in the supportedDevices
* array.
*
* When matching a device, the pipeline handler enumerates all camera sensors
* and attempts, for each of them, to find a path to a video capture video node.
* It does so by using a breadth-first search to find the shortest path from the
* sensor device to a valid capture device. This is guaranteed to produce a
* valid path on devices with one only option and is a good heuristic on more
* complex devices to skip paths that aren't suitable for the simple pipeline
* handler. For instance, on the IPU-based i.MX6, the shortest path will skip
* encoders and image converters, and it will end in a CSI capture device.
* A more complex graph search algorithm could be implemented if a device that
* would otherwise be compatible with the pipeline handler isn't correctly
* handled by this heuristic.
*
* Once the camera data instances have been created, the match() function
* creates a V4L2VideoDevice or V4L2Subdevice instance for each entity used by
* any of the cameras and stores them in SimplePipelineHandler::entities_,
* accessible by the SimpleCameraData class through the
* SimplePipelineHandler::subdev() and SimplePipelineHandler::video() functions.
* This avoids duplication of subdev instances between different cameras when
* the same entity is used in multiple paths.
*
* Finally, all camera data instances are initialized to gather information
* about the possible pipeline configurations for the corresponding camera. If
* valid pipeline configurations are found, a Camera is registered for the
* SimpleCameraData instance.
*
* Pipeline Traversal
* ------------------
*
* During the breadth-first search, the pipeline is traversed from entity to
* entity, by following media graph links from source to sink, starting at the
* camera sensor.
*
* When reaching an entity (on its sink side), if the entity is a V4L2 subdev
* that supports the streams API, the subdev internal routes are followed to
* find the connected source pads. Otherwise all of the entity's source pads
* are considered to continue the graph traversal. The pipeline handler
* currently considers the default internal routes only and doesn't attempt to
* setup custom routes. This can be extended if needed.
*
* The shortest path between the camera sensor and a video node is stored in
* SimpleCameraData::entities_ as a list of SimpleCameraData::Entity structures,
* ordered along the data path from the camera sensor to the video node. The
* Entity structure stores a pointer to the MediaEntity, as well as information
* about how it is connected in that particular path for later usage when
* configuring the pipeline.
*
* Pipeline Configuration
* ----------------------
*
* The simple pipeline handler configures the pipeline by propagating V4L2
* subdev formats from the camera sensor to the video node. The format is first
* set on the camera sensor's output, picking a resolution supported by the
* sensor that best matches the needs of the requested streams. Then, on every
* link in the pipeline, the format is retrieved on the link source and set
* unmodified on the link sink.
*
* The best sensor resolution is selected using a heuristic that tries to
* minimize the required bus and memory bandwidth, as the simple pipeline
* handler is typically used on smaller, less powerful systems. To avoid the
* need to upscale, the pipeline handler picks the smallest sensor resolution
* large enough to accommodate the need of all streams. Resolutions that
* significantly restrict the field of view are ignored.
*
* When initializating the camera data, the above format propagation procedure
* is repeated for every media bus format and size supported by the camera
* sensor. Upon reaching the video node, the pixel formats compatible with the
* media bus format are enumerated. Each combination of the input media bus
* format, output pixel format and output size are recorded in an instance of
* the SimpleCameraData::Configuration structure, stored in the
* SimpleCameraData::configs_ vector.
*
* Format Conversion and Scaling
* -----------------------------
*
* The capture pipeline isn't expected to include a scaler, and if a scaler is
* available, it is ignored when configuring the pipeline. However, the simple
* pipeline handler supports optional memory-to-memory converters to scale the
* image and convert it to a different pixel format. If such a converter is
* present, the pipeline handler enumerates, for each pipeline configuration,
* the pixel formats and sizes that the converter can produce for the output of
* the capture video node, and stores the information in the outputFormats and
* outputSizes of the SimpleCameraData::Configuration structure.
*
* Concurrent Access to Cameras
* ----------------------------
*
* The cameras created by the same pipeline handler instance may share hardware
* resources. For instances, a platform may have multiple CSI-2 receivers but a
* single DMA engine, prohibiting usage of multiple cameras concurrently. This
* depends heavily on the hardware architecture, which the simple pipeline
* handler has no a priori knowledge of. The pipeline handler thus implements a
* heuristic to handle sharing of hardware resources in a generic fashion.
*
* Two cameras are considered to be mutually exclusive if their share common
* pads along the pipeline from the camera sensor to the video node. An entity
* can thus be used concurrently by multiple cameras, as long as pads are
* distinct.
*
* A resource reservation mechanism is implemented by the SimplePipelineHandler
* acquirePipeline() and releasePipeline() functions to manage exclusive access
* to pads. A camera reserves all the pads present in its pipeline when it is
* started, and the start() function returns an error if any of the required
* pads is already in use. When the camera is stopped, the pads it has reserved
* are released.
*/
class SimplePipelineHandler;
struct SimplePipelineInfo {
const char *driver;
/*
* Each converter in the list contains the name
* and the number of streams it supports.
*/
std::vector<std::pair<const char *, unsigned int>> converters;
/*
* Using Software ISP is to be enabled per driver.
*
* The Software ISP can't be used together with the converters.
*/
bool swIspEnabled;
};
namespace {
static const SimplePipelineInfo supportedDevices[] = {
{ "dcmipp", {}, false },
{ "imx7-csi", { { "pxp", 1 } }, false },
{ "j721e-csi2rx", {}, false },
{ "mtk-seninf", { { "mtk-mdp", 3 } }, false },
{ "mxc-isi", {}, false },
{ "qcom-camss", {}, true },
{ "sun6i-csi", {}, false },
};
} /* namespace */
class SimpleCameraData : public Camera::Private
{
public:
SimpleCameraData(SimplePipelineHandler *pipe,
unsigned int numStreams,
MediaEntity *sensor);
bool isValid() const { return sensor_ != nullptr; }
SimplePipelineHandler *pipe();
int init();
int setupLinks();
int setupFormats(V4L2SubdeviceFormat *format,
V4L2Subdevice::Whence whence,
Transform transform = Transform::Identity);
void bufferReady(FrameBuffer *buffer);
unsigned int streamIndex(const Stream *stream) const
{
return stream - &streams_.front();
}
struct Entity {
/* The media entity, always valid. */
MediaEntity *entity;
/*
* Whether or not the entity is a subdev that supports the
* routing API.
*/
bool supportsRouting;
/*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
const MediaPad *sink;
/*
* The local source pad connected to the downstream entity, null
* for the video node at the end of the pipeline.
*/
const MediaPad *source;
/*
* The link on the source pad, to the downstream entity, null
* for the video node at the end of the pipeline.
*/
MediaLink *sourceLink;
};
struct Configuration {
uint32_t code;
Size sensorSize;
PixelFormat captureFormat;
Size captureSize;
std::vector<PixelFormat> outputFormats;
SizeRange outputSizes;
};
std::vector<Stream> streams_;
/*
* All entities in the pipeline, from the camera sensor to the video
* node.
*/
std::list<Entity> entities_;
std::unique_ptr<CameraSensor> sensor_;
V4L2VideoDevice *video_;
std::vector<Configuration> configs_;
std::map<PixelFormat, std::vector<const Configuration *>> formats_;
std::vector<std::unique_ptr<FrameBuffer>> conversionBuffers_;
std::queue<std::map<unsigned int, FrameBuffer *>> conversionQueue_;
bool useConversion_;
std::unique_ptr<Converter> converter_;
std::unique_ptr<SoftwareIsp> swIsp_;
private:
void tryPipeline(unsigned int code, const Size &size);
static std::vector<const MediaPad *> routedSourcePads(MediaPad *sink);
void conversionInputDone(FrameBuffer *buffer);
void conversionOutputDone(FrameBuffer *buffer);
void ispStatsReady();
void setSensorControls(const ControlList &sensorControls);
};
class SimpleCameraConfiguration : public CameraConfiguration
{
public:
SimpleCameraConfiguration(Camera *camera, SimpleCameraData *data);
Status validate() override;
const SimpleCameraData::Configuration *pipeConfig() const
{
return pipeConfig_;
}
bool needConversion() const { return needConversion_; }
const Transform &combinedTransform() const { return combinedTransform_; }
private:
/*
* The SimpleCameraData instance is guaranteed to be valid as long as
* the corresponding Camera instance is valid. In order to borrow a
* reference to the camera data, store a new reference to the camera.
*/
std::shared_ptr<Camera> camera_;
SimpleCameraData *data_;
const SimpleCameraData::Configuration *pipeConfig_;
bool needConversion_;
Transform combinedTransform_;
};
class SimplePipelineHandler : public PipelineHandler
{
public:
SimplePipelineHandler(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
bool match(DeviceEnumerator *enumerator) override;
V4L2VideoDevice *video(const MediaEntity *entity);
V4L2Subdevice *subdev(const MediaEntity *entity);
MediaDevice *converter() { return converter_; }
bool swIspEnabled() const { return swIspEnabled_; }
protected:
int queueRequestDevice(Camera *camera, Request *request) override;
private:
static constexpr unsigned int kNumInternalBuffers = 3;
struct EntityData {
std::unique_ptr<V4L2VideoDevice> video;
std::unique_ptr<V4L2Subdevice> subdev;
std::map<const MediaPad *, SimpleCameraData *> owners;
};
SimpleCameraData *cameraData(Camera *camera)
{
return static_cast<SimpleCameraData *>(camera->_d());
}
std::vector<MediaEntity *> locateSensors();
static int resetRoutingTable(V4L2Subdevice *subdev);
const MediaPad *acquirePipeline(SimpleCameraData *data);
void releasePipeline(SimpleCameraData *data);
MediaDevice *media_;
std::map<const MediaEntity *, EntityData> entities_;
MediaDevice *converter_;
bool swIspEnabled_;
};
/* -----------------------------------------------------------------------------
* Camera Data
*/
SimpleCameraData::SimpleCameraData(SimplePipelineHandler *pipe,
unsigned int numStreams,
MediaEntity *sensor)
: Camera::Private(pipe), streams_(numStreams)
{
int ret;
/*
* Find the shortest path from the camera sensor to a video capture
* device using the breadth-first search algorithm. This heuristic will
* be most likely to skip paths that aren't suitable for the simple
* pipeline handler on more complex devices, and is guaranteed to
* produce a valid path on all devices that have a single option.
*
* For instance, on the IPU-based i.MX6Q, the shortest path will skip
* encoders and image converters, and will end in a CSI capture device.
*/
std::unordered_set<MediaEntity *> visited;
std::queue<std::tuple<MediaEntity *, MediaPad *>> queue;
/* Remember at each entity where we came from. */
std::unordered_map<MediaEntity *, Entity> parents;
MediaEntity *entity = nullptr;
MediaEntity *video = nullptr;
MediaPad *sinkPad;
queue.push({ sensor, nullptr });
while (!queue.empty()) {
std::tie(entity, sinkPad) = queue.front();
queue.pop();
/* Found the capture device. */
if (entity->function() == MEDIA_ENT_F_IO_V4L) {
LOG(SimplePipeline, Debug)
<< "Found capture device " << entity->name();
video = entity;
break;
}
visited.insert(entity);
/*
* Add direct downstream entities to the search queue. If the
* current entity supports the subdev internal routing API,
* restrict the search to downstream entities reachable through
* active routes.
*/
std::vector<const MediaPad *> pads;
bool supportsRouting = false;
if (sinkPad) {
pads = routedSourcePads(sinkPad);
if (!pads.empty())
supportsRouting = true;
}
if (pads.empty()) {
for (const MediaPad *pad : entity->pads()) {
if (!(pad->flags() & MEDIA_PAD_FL_SOURCE))
continue;
pads.push_back(pad);
}
}
for (const MediaPad *pad : pads) {
for (MediaLink *link : pad->links()) {
MediaEntity *next = link->sink()->entity();
if (visited.find(next) == visited.end()) {
queue.push({ next, link->sink() });
Entity e{ entity, supportsRouting, sinkPad, pad, link };
parents.insert({ next, e });
}
}
}
}
if (!video)
return;
/*
* With the parents, we can follow back our way from the capture device
* to the sensor. Store all the entities in the pipeline, from the
* camera sensor to the video node, in entities_.
*/
entities_.push_front({ entity, false, sinkPad, nullptr, nullptr });
for (auto it = parents.find(entity); it != parents.end();
it = parents.find(entity)) {
const Entity &e = it->second;
entities_.push_front(e);
entity = e.entity;
}
/* Finally also remember the sensor. */
sensor_ = std::make_unique<CameraSensor>(sensor);
ret = sensor_->init();
if (ret) {
sensor_.reset();
return;
}
LOG(SimplePipeline, Debug)
<< "Found pipeline: "
<< utils::join(entities_, " -> ",
[](const Entity &e) {
std::string s = "[";
if (e.sink)
s += std::to_string(e.sink->index()) + "|";
s += e.entity->name();
if (e.source)
s += "|" + std::to_string(e.source->index());
s += "]";
return s;
});
}
SimplePipelineHandler *SimpleCameraData::pipe()
{
return static_cast<SimplePipelineHandler *>(Camera::Private::pipe());
}
int SimpleCameraData::init()
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
/* Open the converter, if any. */
MediaDevice *converter = pipe->converter();
if (converter) {
converter_ = ConverterFactoryBase::create(converter);
if (!converter_) {
LOG(SimplePipeline, Warning)
<< "Failed to create converter, disabling format conversion";
converter_.reset();
} else {
converter_->inputBufferReady.connect(this, &SimpleCameraData::conversionInputDone);
converter_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
}
}
/*
* Instantiate Soft ISP if this is enabled for the given driver and no converter is used.
*/
if (!converter_ && pipe->swIspEnabled()) {
swIsp_ = std::make_unique<SoftwareIsp>(pipe, sensor_.get());
if (!swIsp_->isValid()) {
LOG(SimplePipeline, Warning)
<< "Failed to create software ISP, disabling software debayering";
swIsp_.reset();
} else {
/*
* The inputBufferReady signal is emitted from the soft ISP thread,
* and needs to be handled in the pipeline handler thread. Signals
* implement queued delivery, but this works transparently only if
* the receiver is bound to the target thread. As the
* SimpleCameraData class doesn't inherit from the Object class, it
* is not bound to any thread, and the signal would be delivered
* synchronously. Instead, connect the signal to a lambda function
* bound explicitly to the pipe, which is bound to the pipeline
* handler thread. The function then simply forwards the call to
* conversionInputDone().
*/
swIsp_->inputBufferReady.connect(pipe, [this](FrameBuffer *buffer) {
this->conversionInputDone(buffer);
});
swIsp_->outputBufferReady.connect(this, &SimpleCameraData::conversionOutputDone);
swIsp_->ispStatsReady.connect(this, &SimpleCameraData::ispStatsReady);
swIsp_->setSensorControls.connect(this, &SimpleCameraData::setSensorControls);
}
}
video_ = pipe->video(entities_.back().entity);
ASSERT(video_);
/*
* Setup links first as some subdev drivers take active links into
* account to propagate TRY formats. Such is life :-(
*/
ret = setupLinks();
if (ret < 0)
return ret;
/*
* Generate the list of possible pipeline configurations by trying each
* media bus format and size supported by the sensor.
*/
for (unsigned int code : sensor_->mbusCodes()) {
for (const Size &size : sensor_->sizes(code))
tryPipeline(code, size);
}
if (configs_.empty()) {
LOG(SimplePipeline, Error) << "No valid configuration found";
return -EINVAL;
}
/* Map the pixel formats to configurations. */
for (const Configuration &config : configs_) {
formats_[config.captureFormat].push_back(&config);
for (PixelFormat fmt : config.outputFormats)
formats_[fmt].push_back(&config);
}
properties_ = sensor_->properties();
return 0;
}
/*
* Generate a list of supported pipeline configurations for a sensor media bus
* code and size.
*
* First propagate the media bus code and size through the pipeline from the
* camera sensor to the video node. Then, query the video node for all supported
* pixel formats compatible with the media bus code. For each pixel format, store
* a full pipeline configuration in the configs_ vector.
*/
void SimpleCameraData::tryPipeline(unsigned int code, const Size &size)
{
/*
* Propagate the format through the pipeline, and enumerate the
* corresponding possible V4L2 pixel formats on the video node.
*/
V4L2SubdeviceFormat format{};
format.code = code;
format.size = size;
int ret = setupFormats(&format, V4L2Subdevice::TryFormat);
if (ret < 0) {
/* Pipeline configuration failed, skip this configuration. */
format.code = code;
format.size = size;
LOG(SimplePipeline, Debug)
<< "Sensor format " << format
<< " not supported for this pipeline";
return;
}
V4L2VideoDevice::Formats videoFormats = video_->formats(format.code);
LOG(SimplePipeline, Debug)
<< "Adding configuration for " << format.size
<< " in pixel formats [ "
<< utils::join(videoFormats, ", ",
[](const auto &f) {
return f.first.toString();
})
<< " ]";
for (const auto &videoFormat : videoFormats) {
PixelFormat pixelFormat = videoFormat.first.toPixelFormat();
if (!pixelFormat)
continue;
Configuration config;
config.code = code;
config.sensorSize = size;
config.captureFormat = pixelFormat;
config.captureSize = format.size;
if (converter_) {
config.outputFormats = converter_->formats(pixelFormat);
config.outputSizes = converter_->sizes(format.size);
} else if (swIsp_) {
config.outputFormats = swIsp_->formats(pixelFormat);
config.outputSizes = swIsp_->sizes(pixelFormat, format.size);
if (config.outputFormats.empty()) {
/* Do not use swIsp for unsupported pixelFormat's. */
config.outputFormats = { pixelFormat };
config.outputSizes = config.captureSize;
}
} else {
config.outputFormats = { pixelFormat };
config.outputSizes = config.captureSize;
}
configs_.push_back(config);
}
}
int SimpleCameraData::setupLinks()
{
int ret;
/*
* Configure all links along the pipeline. Some entities may not allow
* multiple sink links to be enabled together, even on different sink
* pads. We must thus start by disabling all sink links (but the one we
* want to enable) before enabling the pipeline link.
*
* The entities_ list stores entities along with their source link. We
* need to process the link in the context of the sink entity, so
* record the source link of the current entity as the sink link of the
* next entity, and skip the first entity in the loop.
*/
MediaLink *sinkLink = nullptr;
for (SimpleCameraData::Entity &e : entities_) {
if (!sinkLink) {
sinkLink = e.sourceLink;
continue;
}
for (MediaPad *pad : e.entity->pads()) {
/*
* If the entity supports the V4L2 internal routing API,
* assume that it may carry multiple independent streams
* concurrently, and only disable links on the sink and
* source pads used by the pipeline.
*/
if (e.supportsRouting && pad != e.sink && pad != e.source)
continue;
for (MediaLink *link : pad->links()) {
if (link == sinkLink)
continue;
if ((link->flags() & MEDIA_LNK_FL_ENABLED) &&
!(link->flags() & MEDIA_LNK_FL_IMMUTABLE)) {
ret = link->setEnabled(false);
if (ret < 0)
return ret;
}
}
}
if (!(sinkLink->flags() & MEDIA_LNK_FL_ENABLED)) {
ret = sinkLink->setEnabled(true);
if (ret < 0)
return ret;
}
sinkLink = e.sourceLink;
}
return 0;
}
int SimpleCameraData::setupFormats(V4L2SubdeviceFormat *format,
V4L2Subdevice::Whence whence,
Transform transform)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
int ret;
/*
* Configure the format on the sensor output and propagate it through
* the pipeline.
*/
ret = sensor_->setFormat(format, transform);
if (ret < 0)
return ret;
for (const Entity &e : entities_) {
if (!e.sourceLink)
break;
MediaLink *link = e.sourceLink;
MediaPad *source = link->source();
MediaPad *sink = link->sink();
if (source->entity() != sensor_->entity()) {
V4L2Subdevice *subdev = pipe->subdev(source->entity());
ret = subdev->getFormat(source->index(), format, whence);
if (ret < 0)
return ret;
}
if (sink->entity()->function() != MEDIA_ENT_F_IO_V4L) {
V4L2SubdeviceFormat sourceFormat = *format;
V4L2Subdevice *subdev = pipe->subdev(sink->entity());
ret = subdev->setFormat(sink->index(), format, whence);
if (ret < 0)
return ret;
if (format->code != sourceFormat.code ||
format->size != sourceFormat.size) {
LOG(SimplePipeline, Debug)
<< "Source '" << source->entity()->name()
<< "':" << source->index()
<< " produces " << sourceFormat
<< ", sink '" << sink->entity()->name()
<< "':" << sink->index()
<< " requires " << *format;
return -EINVAL;
}
}
LOG(SimplePipeline, Debug)
<< "Link '" << source->entity()->name()
<< "':" << source->index()
<< " -> '" << sink->entity()->name()
<< "':" << sink->index()
<< " configured with format " << *format;
}
return 0;
}
void SimpleCameraData::bufferReady(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
/*
* If an error occurred during capture, or if the buffer was cancelled,
* complete the request, even if the converter is in use as there's no
* point converting an erroneous buffer.
*/
if (buffer->metadata().status != FrameMetadata::FrameSuccess) {
if (!useConversion_) {
/* No conversion, just complete the request. */
Request *request = buffer->request();
pipe->completeBuffer(request, buffer);
pipe->completeRequest(request);
return;
}
/*
* The converter or Software ISP is in use. Requeue the internal
* buffer for capture (unless the stream is being stopped), and
* complete the request with all the user-facing buffers.
*/
if (buffer->metadata().status != FrameMetadata::FrameCancelled)
video_->queueBuffer(buffer);
if (conversionQueue_.empty())
return;
Request *request = nullptr;
for (auto &item : conversionQueue_.front()) {
FrameBuffer *outputBuffer = item.second;
request = outputBuffer->request();
pipe->completeBuffer(request, outputBuffer);
}
conversionQueue_.pop();
if (request)
pipe->completeRequest(request);
return;
}
/*
* Record the sensor's timestamp in the request metadata. The request
* needs to be obtained from the user-facing buffer, as internal
* buffers are free-wheeling and have no request associated with them.
*
* \todo The sensor timestamp should be better estimated by connecting
* to the V4L2Device::frameStart signal if the platform provides it.
*/
Request *request = buffer->request();
if (useConversion_ && !conversionQueue_.empty()) {
const std::map<unsigned int, FrameBuffer *> &outputs =
conversionQueue_.front();
if (!outputs.empty()) {
FrameBuffer *outputBuffer = outputs.begin()->second;
if (outputBuffer)
request = outputBuffer->request();
}
}
if (request)
request->metadata().set(controls::SensorTimestamp,
buffer->metadata().timestamp);
/*
* Queue the captured and the request buffer to the converter or Software
* ISP if format conversion is needed. If there's no queued request, just
* requeue the captured buffer for capture.
*/
if (useConversion_) {
if (conversionQueue_.empty()) {
video_->queueBuffer(buffer);
return;
}
if (converter_)
converter_->queueBuffers(buffer, conversionQueue_.front());
else
swIsp_->queueBuffers(buffer, conversionQueue_.front());
conversionQueue_.pop();
return;
}
/* Otherwise simply complete the request. */
pipe->completeBuffer(request, buffer);
pipe->completeRequest(request);
}
void SimpleCameraData::conversionInputDone(FrameBuffer *buffer)
{
/* Queue the input buffer back for capture. */
video_->queueBuffer(buffer);
}
void SimpleCameraData::conversionOutputDone(FrameBuffer *buffer)
{
SimplePipelineHandler *pipe = SimpleCameraData::pipe();
/* Complete the buffer and the request. */
Request *request = buffer->request();
if (pipe->completeBuffer(request, buffer))
pipe->completeRequest(request);
}
void SimpleCameraData::ispStatsReady()
{
/* \todo Use the DelayedControls class */
swIsp_->processStats(sensor_->getControls({ V4L2_CID_ANALOGUE_GAIN,
V4L2_CID_EXPOSURE }));
}
void SimpleCameraData::setSensorControls(const ControlList &sensorControls)
{
ControlList ctrls(sensorControls);
sensor_->setControls(&ctrls);
}
/* Retrieve all source pads connected to a sink pad through active routes. */
std::vector<const MediaPad *> SimpleCameraData::routedSourcePads(MediaPad *sink)
{
MediaEntity *entity = sink->entity();
std::unique_ptr<V4L2Subdevice> subdev =
std::make_unique<V4L2Subdevice>(entity);
int ret = subdev->open();
if (ret < 0)
return {};
V4L2Subdevice::Routing routing = {};
ret = subdev->getRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret < 0)
return {};
std::vector<const MediaPad *> pads;
for (const V4L2Subdevice::Route &route : routing) {
if (sink->index() != route.sink.pad ||
!(route.flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
continue;
const MediaPad *pad = entity->getPadByIndex(route.source.pad);
if (!pad) {
LOG(SimplePipeline, Warning)
<< "Entity " << entity->name()
<< " has invalid route source pad "
<< route.source.pad;
}
pads.push_back(pad);
}
return pads;
}
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
SimpleCameraConfiguration::SimpleCameraConfiguration(Camera *camera,
SimpleCameraData *data)
: CameraConfiguration(), camera_(camera->shared_from_this()),
data_(data), pipeConfig_(nullptr)
{
}
namespace {
static Size adjustSize(const Size &requestedSize, const SizeRange &supportedSizes)
{
ASSERT(supportedSizes.min <= supportedSizes.max);
if (supportedSizes.min == supportedSizes.max)
return supportedSizes.max;
unsigned int hStep = supportedSizes.hStep;
unsigned int vStep = supportedSizes.vStep;
if (hStep == 0)
hStep = supportedSizes.max.width - supportedSizes.min.width;
if (vStep == 0)
vStep = supportedSizes.max.height - supportedSizes.min.height;
Size adjusted = requestedSize.boundedTo(supportedSizes.max)
.expandedTo(supportedSizes.min);
return adjusted.shrunkBy(supportedSizes.min)
.alignedDownTo(hStep, vStep)
.grownBy(supportedSizes.min);
}
} /* namespace */
CameraConfiguration::Status SimpleCameraConfiguration::validate()
{
const CameraSensor *sensor = data_->sensor_.get();
Status status = Valid;
if (config_.empty())
return Invalid;
Orientation requestedOrientation = orientation;
combinedTransform_ = sensor->computeTransform(&orientation);
if (orientation != requestedOrientation)
status = Adjusted;
/* Cap the number of entries to the available streams. */
if (config_.size() > data_->streams_.size()) {
config_.resize(data_->streams_.size());
status = Adjusted;
}
/* Find the largest stream size. */
Size maxStreamSize;
for (const StreamConfiguration &cfg : config_)
maxStreamSize.expandTo(cfg.size);
LOG(SimplePipeline, Debug)
<< "Largest stream size is " << maxStreamSize;
/*
* Find the best configuration for the pipeline using a heuristic.
* First select the pixel format based on the streams (which are
* considered ordered from highest to lowest priority). Default to the
* first pipeline configuration if no streams request a supported pixel
* format.
*/
const std::vector<const SimpleCameraData::Configuration *> *configs =
&data_->formats_.begin()->second;
for (const StreamConfiguration &cfg : config_) {
auto it = data_->formats_.find(cfg.pixelFormat);
if (it != data_->formats_.end()) {
configs = &it->second;
break;
}
}
/*
* \todo Pick the best sensor output media bus format when the
* requested pixel format can be produced from multiple sensor media
* bus formats.
*/
/*
* Then pick, among the possible configuration for the pixel format,
* the smallest sensor resolution that can accommodate all streams
* without upscaling.
*/
const SimpleCameraData::Configuration *maxPipeConfig = nullptr;
pipeConfig_ = nullptr;
for (const SimpleCameraData::Configuration *pipeConfig : *configs) {
const Size &size = pipeConfig->captureSize;
if (size.width >= maxStreamSize.width &&
size.height >= maxStreamSize.height) {
if (!pipeConfig_ || size < pipeConfig_->captureSize)
pipeConfig_ = pipeConfig;
}
if (!maxPipeConfig || maxPipeConfig->captureSize < size)
maxPipeConfig = pipeConfig;
}
/* If no configuration was large enough, select the largest one. */
if (!pipeConfig_)
pipeConfig_ = maxPipeConfig;
LOG(SimplePipeline, Debug)
<< "Picked "
<< V4L2SubdeviceFormat{ pipeConfig_->code, pipeConfig_->sensorSize, {} }
<< " -> " << pipeConfig_->captureSize
<< "-" << pipeConfig_->captureFormat
<< " for max stream size " << maxStreamSize;
/*
* Adjust the requested streams.
*
* Enable usage of the converter when producing multiple streams, as
* the video capture device can't capture to multiple buffers.
*
* It is possible to produce up to one stream without conversion
* (provided the format and size match), at the expense of more complex
* buffer handling (including allocation of internal buffers to be used
* when a request doesn't contain a buffer for the stream that doesn't
* require any conversion, similar to raw capture use cases). This is
* left as a future improvement.
*/
needConversion_ = config_.size() > 1;
for (unsigned int i = 0; i < config_.size(); ++i) {
StreamConfiguration &cfg = config_[i];
/* Adjust the pixel format and size. */
auto it = std::find(pipeConfig_->outputFormats.begin(),
pipeConfig_->outputFormats.end(),
cfg.pixelFormat);
if (it == pipeConfig_->outputFormats.end())
it = pipeConfig_->outputFormats.begin();
PixelFormat pixelFormat = *it;
if (cfg.pixelFormat != pixelFormat) {
LOG(SimplePipeline, Debug) << "Adjusting pixel format";
cfg.pixelFormat = pixelFormat;
status = Adjusted;
}
if (!pipeConfig_->outputSizes.contains(cfg.size)) {
Size adjustedSize = pipeConfig_->captureSize;
/*
* The converter (when present) may not be able to output
* a size identical to its input size. The capture size is thus
* not guaranteed to be a valid output size. In such cases, use
* the smaller valid output size closest to the requested.
*/
if (!pipeConfig_->outputSizes.contains(adjustedSize))
adjustedSize = adjustSize(cfg.size, pipeConfig_->outputSizes);
LOG(SimplePipeline, Debug)
<< "Adjusting size from " << cfg.size
<< " to " << adjustedSize;
cfg.size = adjustedSize;
status = Adjusted;
}
/* \todo Create a libcamera core class to group format and size */
if (cfg.pixelFormat != pipeConfig_->captureFormat ||
cfg.size != pipeConfig_->captureSize)
needConversion_ = true;
/* Set the stride, frameSize and bufferCount. */
if (needConversion_) {
std::tie(cfg.stride, cfg.frameSize) =
data_->converter_
? data_->converter_->strideAndFrameSize(cfg.pixelFormat,
cfg.size)
: data_->swIsp_->strideAndFrameSize(cfg.pixelFormat,
cfg.size);
if (cfg.stride == 0)
return Invalid;
} else {
V4L2DeviceFormat format;
format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
if (ret < 0)
return Invalid;
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
}
cfg.bufferCount = 3;
}
return status;
}
/* -----------------------------------------------------------------------------
* Pipeline Handler
*/
SimplePipelineHandler::SimplePipelineHandler(CameraManager *manager)
: PipelineHandler(manager), converter_(nullptr)
{
}
std::unique_ptr<CameraConfiguration>
SimplePipelineHandler::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
SimpleCameraData *data = cameraData(camera);
std::unique_ptr<CameraConfiguration> config =
std::make_unique<SimpleCameraConfiguration>(camera, data);
if (roles.empty())
return config;
/* Create the formats map. */
std::map<PixelFormat, std::vector<SizeRange>> formats;
for (const SimpleCameraData::Configuration &cfg : data->configs_) {
for (PixelFormat format : cfg.outputFormats)
formats[format].push_back(cfg.outputSizes);
}
/* Sort the sizes and merge any consecutive overlapping ranges. */
for (auto &[format, sizes] : formats) {
std::sort(sizes.begin(), sizes.end(),
[](SizeRange &a, SizeRange &b) {
return a.min < b.min;
});
auto cur = sizes.begin();
auto next = cur;
while (++next != sizes.end()) {
if (cur->max.width >= next->min.width &&
cur->max.height >= next->min.height)
cur->max = next->max;
else if (++cur != next)
*cur = *next;
}
sizes.erase(++cur, sizes.end());
}
/*
* Create the stream configurations. Take the first entry in the formats
* map as the default, for lack of a better option.
*
* \todo Implement a better way to pick the default format
*/
for ([[maybe_unused]] StreamRole role : roles) {
StreamConfiguration cfg{ StreamFormats{ formats } };
cfg.pixelFormat = formats.begin()->first;
cfg.size = formats.begin()->second[0].max;
config->addConfiguration(cfg);
}
config->validate();
return config;
}
int SimplePipelineHandler::configure(Camera *camera, CameraConfiguration *c)
{
SimpleCameraConfiguration *config =
static_cast<SimpleCameraConfiguration *>(c);
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
int ret;
/*
* Configure links on the pipeline and propagate formats from the
* sensor to the video node.
*/
ret = data->setupLinks();
if (ret < 0)
return ret;
const SimpleCameraData::Configuration *pipeConfig = config->pipeConfig();
V4L2SubdeviceFormat format{};
format.code = pipeConfig->code;
format.size = pipeConfig->sensorSize;
ret = data->setupFormats(&format, V4L2Subdevice::ActiveFormat,
config->combinedTransform());
if (ret < 0)
return ret;
/* Configure the video node. */
V4L2PixelFormat videoFormat = video->toV4L2PixelFormat(pipeConfig->captureFormat);
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = videoFormat;
captureFormat.size = pipeConfig->captureSize;
ret = video->setFormat(&captureFormat);
if (ret)
return ret;
if (captureFormat.planesCount != 1) {
LOG(SimplePipeline, Error)
<< "Planar formats using non-contiguous memory not supported";
return -EINVAL;
}
if (captureFormat.fourcc != videoFormat ||
captureFormat.size != pipeConfig->captureSize) {
LOG(SimplePipeline, Error)
<< "Unable to configure capture in "
<< pipeConfig->captureSize << "-" << videoFormat
<< " (got " << captureFormat << ")";
return -EINVAL;
}
/* Configure the converter if needed. */
std::vector<std::reference_wrapper<StreamConfiguration>> outputCfgs;
data->useConversion_ = config->needConversion();
for (unsigned int i = 0; i < config->size(); ++i) {
StreamConfiguration &cfg = config->at(i);
cfg.setStream(&data->streams_[i]);
if (data->useConversion_)
outputCfgs.push_back(cfg);
}
if (outputCfgs.empty())
return 0;
StreamConfiguration inputCfg;
inputCfg.pixelFormat = pipeConfig->captureFormat;
inputCfg.size = pipeConfig->captureSize;
inputCfg.stride = captureFormat.planes[0].bpl;
inputCfg.bufferCount = kNumInternalBuffers;
return data->converter_
? data->converter_->configure(inputCfg, outputCfgs)
: data->swIsp_->configure(inputCfg, outputCfgs,
data->sensor_->controls());
}
int SimplePipelineHandler::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
SimpleCameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
/*
* Export buffers on the converter or capture video node, depending on
* whether the converter is used or not.
*/
if (data->useConversion_)
return data->converter_
? data->converter_->exportBuffers(data->streamIndex(stream),
count, buffers)
: data->swIsp_->exportBuffers(data->streamIndex(stream),
count, buffers);
else
return data->video_->exportBuffers(count, buffers);
}
int SimplePipelineHandler::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
int ret;
const MediaPad *pad = acquirePipeline(data);
if (pad) {
LOG(SimplePipeline, Info)
<< "Failed to acquire pipeline, entity "
<< pad->entity()->name() << " in use";
return -EBUSY;
}
if (data->useConversion_) {
/*
* When using the converter allocate a fixed number of internal
* buffers.
*/
ret = video->allocateBuffers(kNumInternalBuffers,
&data->conversionBuffers_);
} else {
/* Otherwise, prepare for using buffers from the only stream. */
Stream *stream = &data->streams_[0];
ret = video->importBuffers(stream->configuration().bufferCount);
}
if (ret < 0) {
releasePipeline(data);
return ret;
}
video->bufferReady.connect(data, &SimpleCameraData::bufferReady);
ret = video->streamOn();
if (ret < 0) {
stop(camera);
return ret;
}
if (data->useConversion_) {
if (data->converter_)
ret = data->converter_->start();
else if (data->swIsp_)
ret = data->swIsp_->start();
else
ret = 0;
if (ret < 0) {
stop(camera);
return ret;
}
/* Queue all internal buffers for capture. */
for (std::unique_ptr<FrameBuffer> &buffer : data->conversionBuffers_)
video->queueBuffer(buffer.get());
}
return 0;
}
void SimplePipelineHandler::stopDevice(Camera *camera)
{
SimpleCameraData *data = cameraData(camera);
V4L2VideoDevice *video = data->video_;
if (data->useConversion_) {
if (data->converter_)
data->converter_->stop();
else if (data->swIsp_)
data->swIsp_->stop();
}
video->streamOff();
video->releaseBuffers();
video->bufferReady.disconnect(data, &SimpleCameraData::bufferReady);
data->conversionBuffers_.clear();
releasePipeline(data);
}
int SimplePipelineHandler::queueRequestDevice(Camera *camera, Request *request)
{
SimpleCameraData *data = cameraData(camera);
int ret;
std::map<unsigned int, FrameBuffer *> buffers;
for (auto &[stream, buffer] : request->buffers()) {
/*
* If conversion is needed, push the buffer to the converter
* queue, it will be handed to the converter in the capture
* completion handler.
*/
if (data->useConversion_) {
buffers.emplace(data->streamIndex(stream), buffer);
} else {
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
return ret;
}
}
if (data->useConversion_)
data->conversionQueue_.push(std::move(buffers));
return 0;
}
/* -----------------------------------------------------------------------------
* Match and Setup
*/
std::vector<MediaEntity *> SimplePipelineHandler::locateSensors()
{
std::vector<MediaEntity *> entities;
/*
* Gather all the camera sensor entities based on the function they
* expose.
*/
for (MediaEntity *entity : media_->entities()) {
if (entity->function() == MEDIA_ENT_F_CAM_SENSOR)
entities.push_back(entity);
}
if (entities.empty())
return {};
/*
* Sensors can be made of multiple entities. For instance, a raw sensor
* can be connected to an ISP, and the combination of both should be
* treated as one sensor. To support this, as a crude heuristic, check
* the downstream entity from the camera sensor, and if it is an ISP,
* use it instead of the sensor.
*/
std::vector<MediaEntity *> sensors;
for (MediaEntity *entity : entities) {
/*
* Locate the downstream entity by following the first link
* from a source pad.
*/
const MediaLink *link = nullptr;
for (const MediaPad *pad : entity->pads()) {
if ((pad->flags() & MEDIA_PAD_FL_SOURCE) &&
!pad->links().empty()) {
link = pad->links()[0];
break;
}
}
if (!link)
continue;
MediaEntity *remote = link->sink()->entity();
if (remote->function() == MEDIA_ENT_F_PROC_VIDEO_ISP)
sensors.push_back(remote);
else
sensors.push_back(entity);
}
/*
* Remove duplicates, in case multiple sensors are connected to the
* same ISP.
*/
std::sort(sensors.begin(), sensors.end());
auto last = std::unique(sensors.begin(), sensors.end());
sensors.erase(last, sensors.end());
return sensors;
}
int SimplePipelineHandler::resetRoutingTable(V4L2Subdevice *subdev)
{
/* Reset the media entity routing table to its default state. */
V4L2Subdevice::Routing routing = {};
int ret = subdev->getRouting(&routing, V4L2Subdevice::TryFormat);
if (ret)
return ret;
ret = subdev->setRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret)
return ret;
/*
* If the routing table is empty we won't be able to meaningfully use
* the subdev.
*/
if (routing.empty()) {
LOG(SimplePipeline, Error)
<< "Default routing table of " << subdev->deviceNode()
<< " is empty";
return -EINVAL;
}
LOG(SimplePipeline, Debug)
<< "Routing table of " << subdev->deviceNode()
<< " reset to " << routing;
return 0;
}
bool SimplePipelineHandler::match(DeviceEnumerator *enumerator)
{
const SimplePipelineInfo *info = nullptr;
unsigned int numStreams = 1;
for (const SimplePipelineInfo &inf : supportedDevices) {
DeviceMatch dm(inf.driver);
media_ = acquireMediaDevice(enumerator, dm);
if (media_) {
info = &inf;
break;
}
}
if (!media_)
return false;
for (const auto &[name, streams] : info->converters) {
DeviceMatch converterMatch(name);
converter_ = acquireMediaDevice(enumerator, converterMatch);
if (converter_) {
numStreams = streams;
break;
}
}
swIspEnabled_ = info->swIspEnabled;
/* Locate the sensors. */
std::vector<MediaEntity *> sensors = locateSensors();
if (sensors.empty()) {
LOG(SimplePipeline, Error) << "No sensor found";
return false;
}
/*
* Create one camera data instance for each sensor and gather all
* entities in all pipelines.
*/
std::vector<std::unique_ptr<SimpleCameraData>> pipelines;
std::set<MediaEntity *> entities;
pipelines.reserve(sensors.size());
for (MediaEntity *sensor : sensors) {
std::unique_ptr<SimpleCameraData> data =
std::make_unique<SimpleCameraData>(this, numStreams, sensor);
if (!data->isValid()) {
LOG(SimplePipeline, Error)
<< "No valid pipeline for sensor '"
<< sensor->name() << "', skipping";
continue;
}
for (SimpleCameraData::Entity &entity : data->entities_)
entities.insert(entity.entity);
pipelines.push_back(std::move(data));
}
if (entities.empty())
return false;
/*
* Insert all entities in the global entities list. Create and open
* V4L2VideoDevice and V4L2Subdevice instances for the corresponding
* entities.
*/
for (MediaEntity *entity : entities) {
std::unique_ptr<V4L2VideoDevice> video;
std::unique_ptr<V4L2Subdevice> subdev;
int ret;
switch (entity->type()) {
case MediaEntity::Type::V4L2VideoDevice:
video = std::make_unique<V4L2VideoDevice>(entity);
ret = video->open();
if (ret < 0) {
LOG(SimplePipeline, Error)
<< "Failed to open " << video->deviceNode()
<< ": " << strerror(-ret);
return false;
}
break;
case MediaEntity::Type::V4L2Subdevice:
subdev = std::make_unique<V4L2Subdevice>(entity);
ret = subdev->open();
if (ret < 0) {
LOG(SimplePipeline, Error)
<< "Failed to open " << subdev->deviceNode()
<< ": " << strerror(-ret);
return false;
}
if (subdev->caps().hasStreams()) {
/*
* Reset the routing table to its default state
* to make sure entities are enumerate according
* to the defaul routing configuration.
*/
ret = resetRoutingTable(subdev.get());
if (ret) {
LOG(SimplePipeline, Error)
<< "Failed to reset routes for "
<< subdev->deviceNode() << ": "
<< strerror(-ret);
return false;
}
}
break;
default:
break;
}
entities_[entity] = { std::move(video), std::move(subdev), {} };
}
/* Initialize each pipeline and register a corresponding camera. */
bool registered = false;
for (std::unique_ptr<SimpleCameraData> &data : pipelines) {
int ret = data->init();
if (ret < 0)
continue;
std::set<Stream *> streams;
std::transform(data->streams_.begin(), data->streams_.end(),
std::inserter(streams, streams.end()),
[](Stream &stream) { return &stream; });
const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
registerCamera(std::move(camera));
registered = true;
}
return registered;
}
V4L2VideoDevice *SimplePipelineHandler::video(const MediaEntity *entity)
{
auto iter = entities_.find(entity);
if (iter == entities_.end())
return nullptr;
return iter->second.video.get();
}
V4L2Subdevice *SimplePipelineHandler::subdev(const MediaEntity *entity)
{
auto iter = entities_.find(entity);
if (iter == entities_.end())
return nullptr;
return iter->second.subdev.get();
}
/**
* \brief Acquire all resources needed by the camera pipeline
* \return nullptr on success, a pointer to the contended pad on error
*/
const MediaPad *SimplePipelineHandler::acquirePipeline(SimpleCameraData *data)
{
for (const SimpleCameraData::Entity &entity : data->entities_) {
const EntityData &edata = entities_[entity.entity];
if (entity.sink) {
auto iter = edata.owners.find(entity.sink);
if (iter != edata.owners.end() && iter->second != data)
return entity.sink;
}
if (entity.source) {
auto iter = edata.owners.find(entity.source);
if (iter != edata.owners.end() && iter->second != data)
return entity.source;
}
}
for (const SimpleCameraData::Entity &entity : data->entities_) {
EntityData &edata = entities_[entity.entity];
if (entity.sink)
edata.owners[entity.sink] = data;
if (entity.source)
edata.owners[entity.source] = data;
}
return nullptr;
}
void SimplePipelineHandler::releasePipeline(SimpleCameraData *data)
{
for (const SimpleCameraData::Entity &entity : data->entities_) {
EntityData &edata = entities_[entity.entity];
if (entity.sink) {
auto iter = edata.owners.find(entity.sink);
ASSERT(iter->second == data);
edata.owners.erase(iter);
}
if (entity.source) {
auto iter = edata.owners.find(entity.source);
ASSERT(iter->second == data);
edata.owners.erase(iter);
}
}
}
REGISTER_PIPELINE_HANDLER(SimplePipelineHandler, "simple")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Pipeline handler for uvcvideo devices
*/
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <math.h>
#include <memory>
#include <tuple>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/property_ids.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/sysfs.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(UVC)
class UVCCameraData : public Camera::Private
{
public:
UVCCameraData(PipelineHandler *pipe)
: Camera::Private(pipe)
{
}
int init(MediaDevice *media);
void addControl(uint32_t cid, const ControlInfo &v4l2info,
ControlInfoMap::Map *ctrls);
void bufferReady(FrameBuffer *buffer);
const std::string &id() const { return id_; }
std::unique_ptr<V4L2VideoDevice> video_;
Stream stream_;
std::map<PixelFormat, std::vector<SizeRange>> formats_;
private:
bool generateId();
std::string id_;
};
class UVCCameraConfiguration : public CameraConfiguration
{
public:
UVCCameraConfiguration(UVCCameraData *data);
Status validate() override;
private:
UVCCameraData *data_;
};
class PipelineHandlerUVC : public PipelineHandler
{
public:
PipelineHandlerUVC(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
int processControl(ControlList *controls, unsigned int id,
const ControlValue &value);
int processControls(UVCCameraData *data, Request *request);
UVCCameraData *cameraData(Camera *camera)
{
return static_cast<UVCCameraData *>(camera->_d());
}
};
UVCCameraConfiguration::UVCCameraConfiguration(UVCCameraData *data)
: CameraConfiguration(), data_(data)
{
}
CameraConfiguration::Status UVCCameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
if (orientation != Orientation::Rotate0) {
orientation = Orientation::Rotate0;
status = Adjusted;
}
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
status = Adjusted;
}
StreamConfiguration &cfg = config_[0];
const StreamFormats &formats = cfg.formats();
const PixelFormat pixelFormat = cfg.pixelFormat;
const Size size = cfg.size;
const std::vector<PixelFormat> pixelFormats = formats.pixelformats();
auto iter = std::find(pixelFormats.begin(), pixelFormats.end(), pixelFormat);
if (iter == pixelFormats.end()) {
cfg.pixelFormat = pixelFormats.front();
LOG(UVC, Debug)
<< "Adjusting pixel format from " << pixelFormat
<< " to " << cfg.pixelFormat;
status = Adjusted;
}
const std::vector<Size> &formatSizes = formats.sizes(cfg.pixelFormat);
cfg.size = formatSizes.front();
for (const Size &formatsSize : formatSizes) {
if (formatsSize > size)
break;
cfg.size = formatsSize;
}
if (cfg.size != size) {
LOG(UVC, Debug)
<< "Adjusting size from " << size << " to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
V4L2DeviceFormat format;
format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
if (ret)
return Invalid;
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
if (cfg.colorSpace != format.colorSpace) {
cfg.colorSpace = format.colorSpace;
status = Adjusted;
}
return status;
}
PipelineHandlerUVC::PipelineHandlerUVC(CameraManager *manager)
: PipelineHandler(manager)
{
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerUVC::generateConfiguration(Camera *camera,
Span<const StreamRole> roles)
{
UVCCameraData *data = cameraData(camera);
std::unique_ptr<CameraConfiguration> config =
std::make_unique<UVCCameraConfiguration>(data);
if (roles.empty())
return config;
StreamFormats formats(data->formats_);
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats.pixelformats().front();
cfg.size = formats.sizes(cfg.pixelFormat).back();
cfg.bufferCount = 4;
config->addConfiguration(cfg);
config->validate();
return config;
}
int PipelineHandlerUVC::configure(Camera *camera, CameraConfiguration *config)
{
UVCCameraData *data = cameraData(camera);
StreamConfiguration &cfg = config->at(0);
int ret;
V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
if (ret)
return ret;
if (format.size != cfg.size ||
format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
cfg.setStream(&data->stream_);
return 0;
}
int PipelineHandlerUVC::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
UVCCameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
return data->video_->exportBuffers(count, buffers);
}
int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
UVCCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
int ret = data->video_->importBuffers(count);
if (ret < 0)
return ret;
ret = data->video_->streamOn();
if (ret < 0) {
data->video_->releaseBuffers();
return ret;
}
return 0;
}
void PipelineHandlerUVC::stopDevice(Camera *camera)
{
UVCCameraData *data = cameraData(camera);
data->video_->streamOff();
data->video_->releaseBuffers();
}
int PipelineHandlerUVC::processControl(ControlList *controls, unsigned int id,
const ControlValue &value)
{
uint32_t cid;
if (id == controls::Brightness)
cid = V4L2_CID_BRIGHTNESS;
else if (id == controls::Contrast)
cid = V4L2_CID_CONTRAST;
else if (id == controls::Saturation)
cid = V4L2_CID_SATURATION;
else if (id == controls::AeEnable)
cid = V4L2_CID_EXPOSURE_AUTO;
else if (id == controls::ExposureTime)
cid = V4L2_CID_EXPOSURE_ABSOLUTE;
else if (id == controls::AnalogueGain)
cid = V4L2_CID_GAIN;
else
return -EINVAL;
const ControlInfo &v4l2Info = controls->infoMap()->at(cid);
int32_t min = v4l2Info.min().get<int32_t>();
int32_t def = v4l2Info.def().get<int32_t>();
int32_t max = v4l2Info.max().get<int32_t>();
/*
* See UVCCameraData::addControl() for explanations of the different
* value mappings.
*/
switch (cid) {
case V4L2_CID_BRIGHTNESS: {
float scale = std::max(max - def, def - min);
float fvalue = value.get<float>() * scale + def;
controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
break;
}
case V4L2_CID_SATURATION: {
float scale = def - min;
float fvalue = value.get<float>() * scale + min;
controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
break;
}
case V4L2_CID_EXPOSURE_AUTO: {
int32_t ivalue = value.get<bool>()
? V4L2_EXPOSURE_APERTURE_PRIORITY
: V4L2_EXPOSURE_MANUAL;
controls->set(V4L2_CID_EXPOSURE_AUTO, ivalue);
break;
}
case V4L2_CID_EXPOSURE_ABSOLUTE:
controls->set(cid, value.get<int32_t>() / 100);
break;
case V4L2_CID_CONTRAST:
case V4L2_CID_GAIN: {
float m = (4.0f - 1.0f) / (max - def);
float p = 1.0f - m * def;
if (m * min + p < 0.5f) {
m = (1.0f - 0.5f) / (def - min);
p = 1.0f - m * def;
}
float fvalue = (value.get<float>() - p) / m;
controls->set(cid, static_cast<int32_t>(lroundf(fvalue)));
break;
}
default: {
int32_t ivalue = value.get<int32_t>();
controls->set(cid, ivalue);
break;
}
}
return 0;
}
int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request)
{
ControlList controls(data->video_->controls());
for (const auto &[id, value] : request->controls())
processControl(&controls, id, value);
for (const auto &ctrl : controls)
LOG(UVC, Debug)
<< "Setting control " << utils::hex(ctrl.first)
<< " to " << ctrl.second.toString();
int ret = data->video_->setControls(&controls);
if (ret) {
LOG(UVC, Error) << "Failed to set controls: " << ret;
return ret < 0 ? ret : -EINVAL;
}
return ret;
}
int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request)
{
UVCCameraData *data = cameraData(camera);
FrameBuffer *buffer = request->findBuffer(&data->stream_);
if (!buffer) {
LOG(UVC, Error)
<< "Attempt to queue request with invalid stream";
return -ENOENT;
}
int ret = processControls(data, request);
if (ret < 0)
return ret;
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
return ret;
return 0;
}
bool PipelineHandlerUVC::match(DeviceEnumerator *enumerator)
{
MediaDevice *media;
DeviceMatch dm("uvcvideo");
media = acquireMediaDevice(enumerator, dm);
if (!media)
return false;
std::unique_ptr<UVCCameraData> data = std::make_unique<UVCCameraData>(this);
if (data->init(media))
return false;
/* Create and register the camera. */
std::string id = data->id();
std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
registerCamera(std::move(camera));
/* Enable hot-unplug notifications. */
hotplugMediaDevice(media);
return true;
}
int UVCCameraData::init(MediaDevice *media)
{
int ret;
/* Locate and initialise the camera data with the default video node. */
const std::vector<MediaEntity *> &entities = media->entities();
auto entity = std::find_if(entities.begin(), entities.end(),
[](MediaEntity *e) {
return e->flags() & MEDIA_ENT_FL_DEFAULT;
});
if (entity == entities.end()) {
LOG(UVC, Error) << "Could not find a default video device";
return -ENODEV;
}
/* Create and open the video device. */
video_ = std::make_unique<V4L2VideoDevice>(*entity);
ret = video_->open();
if (ret)
return ret;
video_->bufferReady.connect(this, &UVCCameraData::bufferReady);
/* Generate the camera ID. */
if (!generateId()) {
LOG(UVC, Error) << "Failed to generate camera ID";
return -EINVAL;
}
/*
* Populate the map of supported formats, and infer the camera sensor
* resolution from the largest size it advertises.
*/
Size resolution;
for (const auto &format : video_->formats()) {
PixelFormat pixelFormat = format.first.toPixelFormat();
if (!pixelFormat.isValid())
continue;
formats_[pixelFormat] = format.second;
const std::vector<SizeRange> &sizeRanges = format.second;
for (const SizeRange &sizeRange : sizeRanges) {
if (sizeRange.max > resolution)
resolution = sizeRange.max;
}
}
if (formats_.empty()) {
LOG(UVC, Error)
<< "Camera " << id_ << " (" << media->model()
<< ") doesn't expose any supported format";
return -EINVAL;
}
/* Populate the camera properties. */
properties_.set(properties::Model, utils::toAscii(media->model()));
/*
* Derive the location from the device removable attribute in sysfs.
* Non-removable devices are assumed to be front as we lack detailed
* location information, and removable device are considered external.
*
* The sysfs removable attribute is derived from the ACPI _UPC attribute
* if available, or from the USB hub descriptors otherwise. ACPI data
* may not be very reliable, and the USB hub descriptors may not be
* accurate on DT-based platforms. A heuristic may need to be
* implemented later if too many devices end up being miscategorized.
*
* \todo Find a way to tell front and back devices apart. This could
* come from the ACPI _PLD, but that may be even more unreliable than
* the _UPC.
*/
properties::LocationEnum location = properties::CameraLocationExternal;
std::ifstream file(video_->devicePath() + "/../removable");
if (file.is_open()) {
std::string value;
std::getline(file, value);
file.close();
if (value == "fixed")
location = properties::CameraLocationFront;
}
properties_.set(properties::Location, location);
properties_.set(properties::PixelArraySize, resolution);
properties_.set(properties::PixelArrayActiveAreas, { Rectangle(resolution) });
/* Initialise the supported controls. */
ControlInfoMap::Map ctrls;
for (const auto &ctrl : video_->controls()) {
uint32_t cid = ctrl.first->id();
const ControlInfo &info = ctrl.second;
addControl(cid, info, &ctrls);
}
controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
return 0;
}
bool UVCCameraData::generateId()
{
const std::string path = video_->devicePath();
/* Create a controller ID from first device described in firmware. */
std::string controllerId;
std::string searchPath = path;
while (true) {
std::string::size_type pos = searchPath.rfind('/');
if (pos <= 1) {
LOG(UVC, Error) << "Can not find controller ID";
return false;
}
searchPath = searchPath.substr(0, pos);
controllerId = sysfs::firmwareNodePath(searchPath);
if (!controllerId.empty())
break;
}
/*
* Create a USB ID from the device path which has the known format:
*
* path = bus, "-", ports, ":", config, ".", interface ;
* bus = number ;
* ports = port, [ ".", ports ] ;
* port = number ;
* config = number ;
* interface = number ;
*
* Example: 3-2.4:1.0
*
* The bus is not guaranteed to be stable and needs to be stripped from
* the USB ID. The final USB ID is built up of the ports, config and
* interface properties.
*
* Example 2.4:1.0.
*/
std::string usbId = utils::basename(path.c_str());
usbId = usbId.substr(usbId.find('-') + 1);
/* Creata a device ID from the USB devices vendor and product ID. */
std::string deviceId;
for (const char *name : { "idVendor", "idProduct" }) {
std::ifstream file(path + "/../" + name);
if (!file.is_open())
return false;
std::string value;
std::getline(file, value);
file.close();
if (!deviceId.empty())
deviceId += ":";
deviceId += value;
}
id_ = controllerId + "-" + usbId + "-" + deviceId;
return true;
}
void UVCCameraData::addControl(uint32_t cid, const ControlInfo &v4l2Info,
ControlInfoMap::Map *ctrls)
{
const ControlId *id;
ControlInfo info;
/* Map the control ID. */
switch (cid) {
case V4L2_CID_BRIGHTNESS:
id = &controls::Brightness;
break;
case V4L2_CID_CONTRAST:
id = &controls::Contrast;
break;
case V4L2_CID_SATURATION:
id = &controls::Saturation;
break;
case V4L2_CID_EXPOSURE_AUTO:
id = &controls::AeEnable;
break;
case V4L2_CID_EXPOSURE_ABSOLUTE:
id = &controls::ExposureTime;
break;
case V4L2_CID_GAIN:
id = &controls::AnalogueGain;
break;
default:
return;
}
/* Map the control info. */
int32_t min = v4l2Info.min().get<int32_t>();
int32_t max = v4l2Info.max().get<int32_t>();
int32_t def = v4l2Info.def().get<int32_t>();
switch (cid) {
case V4L2_CID_BRIGHTNESS: {
/*
* The Brightness control is a float, with 0.0 mapped to the
* default value. The control range is [-1.0, 1.0], but the V4L2
* default may not be in the middle of the V4L2 range.
* Accommodate this by restricting the range of the libcamera
* control, but always within the maximum limits.
*/
float scale = std::max(max - def, def - min);
info = ControlInfo{
{ static_cast<float>(min - def) / scale },
{ static_cast<float>(max - def) / scale },
{ 0.0f }
};
break;
}
case V4L2_CID_SATURATION:
/*
* The Saturation control is a float, with 0.0 mapped to the
* minimum value (corresponding to a fully desaturated image)
* and 1.0 mapped to the default value. Calculate the maximum
* value accordingly.
*/
info = ControlInfo{
{ 0.0f },
{ static_cast<float>(max - min) / (def - min) },
{ 1.0f }
};
break;
case V4L2_CID_EXPOSURE_AUTO:
info = ControlInfo{ false, true, true };
break;
case V4L2_CID_EXPOSURE_ABSOLUTE:
/*
* ExposureTime is in units of 1 µs, and UVC expects
* V4L2_CID_EXPOSURE_ABSOLUTE in units of 100 µs.
*/
info = ControlInfo{
{ min * 100 },
{ max * 100 },
{ def * 100 }
};
break;
case V4L2_CID_CONTRAST:
case V4L2_CID_GAIN: {
/*
* The Contrast and AnalogueGain controls are floats, with 1.0
* mapped to the default value. UVC doesn't specify units, and
* cameras have been seen to expose very different ranges for
* the controls. Arbitrarily assume that the minimum and
* maximum values are respectively no lower than 0.5 and no
* higher than 4.0.
*/
float m = (4.0f - 1.0f) / (max - def);
float p = 1.0f - m * def;
if (m * min + p < 0.5f) {
m = (1.0f - 0.5f) / (def - min);
p = 1.0f - m * def;
}
info = ControlInfo{
{ m * min + p },
{ m * max + p },
{ 1.0f }
};
break;
}
default:
info = v4l2Info;
break;
}
ctrls->emplace(id, info);
}
void UVCCameraData::bufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
/* \todo Use the UVC metadata to calculate a more precise timestamp */
request->metadata().set(controls::SensorTimestamp,
buffer->metadata().timestamp);
pipe()->completeBuffer(request, buffer);
pipe()->completeRequest(request);
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerUVC, "uvcvideo")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/vimc/vimc.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* Pipeline handler for the vimc device
*/
#include <algorithm>
#include <iomanip>
#include <map>
#include <math.h>
#include <tuple>
#include <linux/media-bus-format.h>
#include <linux/version.h>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera.h>
#include <libcamera/control_ids.h>
#include <libcamera/controls.h>
#include <libcamera/formats.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
#include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h>
#include <libcamera/ipa/vimc_ipa_interface.h>
#include <libcamera/ipa/vimc_ipa_proxy.h>
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(VIMC)
class VimcCameraData : public Camera::Private
{
public:
VimcCameraData(PipelineHandler *pipe, MediaDevice *media)
: Camera::Private(pipe), media_(media)
{
}
int init();
int allocateMockIPABuffers();
void bufferReady(FrameBuffer *buffer);
void paramsBufferReady(unsigned int id, const Flags<ipa::vimc::TestFlag> flags);
MediaDevice *media_;
std::unique_ptr<CameraSensor> sensor_;
std::unique_ptr<V4L2Subdevice> debayer_;
std::unique_ptr<V4L2Subdevice> scaler_;
std::unique_ptr<V4L2VideoDevice> video_;
std::unique_ptr<V4L2VideoDevice> raw_;
Stream stream_;
std::unique_ptr<ipa::vimc::IPAProxyVimc> ipa_;
std::vector<std::unique_ptr<FrameBuffer>> mockIPABufs_;
};
class VimcCameraConfiguration : public CameraConfiguration
{
public:
VimcCameraConfiguration(VimcCameraData *data);
Status validate() override;
private:
VimcCameraData *data_;
};
class PipelineHandlerVimc : public PipelineHandler
{
public:
PipelineHandlerVimc(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
bool match(DeviceEnumerator *enumerator) override;
private:
int processControls(VimcCameraData *data, Request *request);
VimcCameraData *cameraData(Camera *camera)
{
return static_cast<VimcCameraData *>(camera->_d());
}
};
namespace {
static const std::map<PixelFormat, uint32_t> pixelformats{
{ formats::RGB888, MEDIA_BUS_FMT_BGR888_1X24 },
{ formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
};
static constexpr Size kMinSize{ 16, 16 };
static constexpr Size kMaxSize{ 4096, 2160 };
} /* namespace */
VimcCameraConfiguration::VimcCameraConfiguration(VimcCameraData *data)
: CameraConfiguration(), data_(data)
{
}
CameraConfiguration::Status VimcCameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
if (orientation != Orientation::Rotate0) {
orientation = Orientation::Rotate0;
status = Adjusted;
}
/* Cap the number of entries to the available streams. */
if (config_.size() > 1) {
config_.resize(1);
status = Adjusted;
}
StreamConfiguration &cfg = config_[0];
/* Adjust the pixel format. */
const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
LOG(VIMC, Debug) << "Adjusting format to BGR888";
cfg.pixelFormat = formats::BGR888;
status = Adjusted;
}
/* Clamp the size based on the device limits. */
const Size size = cfg.size;
/*
* The sensor output size is aligned to two pixels in both directions.
* Additionally, prior to v5.16, the scaler hardcodes a x3 scale-up
* ratio, requiring the output width and height to be multiples of 6.
*/
Size minSize{ kMinSize };
unsigned int alignment = 2;
if (data_->media_->version() < KERNEL_VERSION(5, 16, 0)) {
minSize *= 3;
alignment *= 3;
}
cfg.size.expandTo(minSize).boundTo(kMaxSize)
.alignDownTo(alignment, alignment);
if (cfg.size != size) {
LOG(VIMC, Debug)
<< "Adjusting size to " << cfg.size;
status = Adjusted;
}
cfg.bufferCount = 4;
V4L2DeviceFormat format;
format.fourcc = data_->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
int ret = data_->video_->tryFormat(&format);
if (ret)
return Invalid;
cfg.stride = format.planes[0].bpl;
cfg.frameSize = format.planes[0].size;
return status;
}
PipelineHandlerVimc::PipelineHandlerVimc(CameraManager *manager)
: PipelineHandler(manager)
{
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerVimc::generateConfiguration(Camera *camera,
Span<const StreamRole> roles)
{
VimcCameraData *data = cameraData(camera);
std::unique_ptr<CameraConfiguration> config =
std::make_unique<VimcCameraConfiguration>(data);
if (roles.empty())
return config;
std::map<PixelFormat, std::vector<SizeRange>> formats;
for (const auto &pixelformat : pixelformats) {
/*
* Kernels prior to v5.7 incorrectly report support for RGB888,
* but it isn't functional within the pipeline.
*/
if (data->media_->version() < KERNEL_VERSION(5, 7, 0)) {
if (pixelformat.first != formats::BGR888) {
LOG(VIMC, Info)
<< "Skipping unsupported pixel format "
<< pixelformat.first;
continue;
}
}
/* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. */
Size minSize{ kMinSize };
if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
minSize *= 3;
std::vector<SizeRange> sizes{ { minSize, kMaxSize } };
formats[pixelformat.first] = sizes;
}
StreamConfiguration cfg(formats);
cfg.pixelFormat = formats::BGR888;
cfg.size = { 1920, 1080 };
cfg.bufferCount = 4;
config->addConfiguration(cfg);
config->validate();
return config;
}
int PipelineHandlerVimc::configure(Camera *camera, CameraConfiguration *config)
{
VimcCameraData *data = cameraData(camera);
StreamConfiguration &cfg = config->at(0);
int ret;
/*
* Prior to v5.16, the scaler hardcodes a x3 scale-up ratio. For newer
* kernels, use a sensor resolution of 1920x1080 and let the scaler
* produce the requested stream size.
*/
Size sensorSize{ 1920, 1080 };
if (data->media_->version() < KERNEL_VERSION(5, 16, 0))
sensorSize = { cfg.size.width / 3, cfg.size.height / 3 };
V4L2SubdeviceFormat subformat = {};
subformat.code = MEDIA_BUS_FMT_SGRBG8_1X8;
subformat.size = sensorSize;
ret = data->sensor_->setFormat(&subformat);
if (ret)
return ret;
ret = data->debayer_->setFormat(0, &subformat);
if (ret)
return ret;
subformat.code = pixelformats.find(cfg.pixelFormat)->second;
ret = data->debayer_->setFormat(1, &subformat);
if (ret)
return ret;
ret = data->scaler_->setFormat(0, &subformat);
if (ret)
return ret;
if (data->media_->version() >= KERNEL_VERSION(5, 6, 0)) {
Rectangle crop{ 0, 0, subformat.size };
ret = data->scaler_->setSelection(0, V4L2_SEL_TGT_CROP, &crop);
if (ret)
return ret;
}
subformat.size = cfg.size;
ret = data->scaler_->setFormat(1, &subformat);
if (ret)
return ret;
V4L2DeviceFormat format;
format.fourcc = data->video_->toV4L2PixelFormat(cfg.pixelFormat);
format.size = cfg.size;
ret = data->video_->setFormat(&format);
if (ret)
return ret;
if (format.size != cfg.size ||
format.fourcc != data->video_->toV4L2PixelFormat(cfg.pixelFormat))
return -EINVAL;
/*
* Format has to be set on the raw capture video node, otherwise the
* vimc driver will fail pipeline validation.
*/
format.fourcc = V4L2PixelFormat(V4L2_PIX_FMT_SGRBG8);
format.size = sensorSize;
ret = data->raw_->setFormat(&format);
if (ret)
return ret;
cfg.setStream(&data->stream_);
if (data->ipa_) {
/* Inform IPA of stream configuration and sensor controls. */
std::map<unsigned int, IPAStream> streamConfig;
streamConfig.emplace(std::piecewise_construct,
std::forward_as_tuple(0),
std::forward_as_tuple(cfg.pixelFormat, cfg.size));
std::map<unsigned int, ControlInfoMap> entityControls;
entityControls.emplace(0, data->sensor_->controls());
IPACameraSensorInfo sensorInfo;
data->sensor_->sensorInfo(&sensorInfo);
data->ipa_->configure(sensorInfo, streamConfig, entityControls);
}
return 0;
}
int PipelineHandlerVimc::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
VimcCameraData *data = cameraData(camera);
unsigned int count = stream->configuration().bufferCount;
return data->video_->exportBuffers(count, buffers);
}
int PipelineHandlerVimc::start(Camera *camera, [[maybe_unused]] const ControlList *controls)
{
VimcCameraData *data = cameraData(camera);
unsigned int count = data->stream_.configuration().bufferCount;
int ret = data->video_->importBuffers(count);
if (ret < 0)
return ret;
/* Map the mock IPA buffers to VIMC IPA to exercise IPC code paths. */
std::vector<IPABuffer> ipaBuffers;
for (auto [i, buffer] : utils::enumerate(data->mockIPABufs_)) {
buffer->setCookie(i + 1);
ipaBuffers.emplace_back(buffer->cookie(), buffer->planes());
}
data->ipa_->mapBuffers(ipaBuffers);
ret = data->ipa_->start();
if (ret) {
data->video_->releaseBuffers();
return ret;
}
ret = data->video_->streamOn();
if (ret < 0) {
data->ipa_->stop();
data->video_->releaseBuffers();
return ret;
}
return 0;
}
void PipelineHandlerVimc::stopDevice(Camera *camera)
{
VimcCameraData *data = cameraData(camera);
data->video_->streamOff();
std::vector<unsigned int> ids;
for (const std::unique_ptr<FrameBuffer> &buffer : data->mockIPABufs_)
ids.push_back(buffer->cookie());
data->ipa_->unmapBuffers(ids);
data->ipa_->stop();
data->video_->releaseBuffers();
}
int PipelineHandlerVimc::processControls(VimcCameraData *data, Request *request)
{
ControlList controls(data->sensor_->controls());
for (const auto &it : request->controls()) {
unsigned int id = it.first;
unsigned int offset;
uint32_t cid;
if (id == controls::Brightness) {
cid = V4L2_CID_BRIGHTNESS;
offset = 128;
} else if (id == controls::Contrast) {
cid = V4L2_CID_CONTRAST;
offset = 0;
} else if (id == controls::Saturation) {
cid = V4L2_CID_SATURATION;
offset = 0;
} else {
continue;
}
int32_t value = lroundf(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255));
}
for (const auto &ctrl : controls)
LOG(VIMC, Debug)
<< "Setting control " << utils::hex(ctrl.first)
<< " to " << ctrl.second.toString();
int ret = data->sensor_->setControls(&controls);
if (ret) {
LOG(VIMC, Error) << "Failed to set controls: " << ret;
return ret < 0 ? ret : -EINVAL;
}
return ret;
}
int PipelineHandlerVimc::queueRequestDevice(Camera *camera, Request *request)
{
VimcCameraData *data = cameraData(camera);
FrameBuffer *buffer = request->findBuffer(&data->stream_);
if (!buffer) {
LOG(VIMC, Error)
<< "Attempt to queue request with invalid stream";
return -ENOENT;
}
int ret = processControls(data, request);
if (ret < 0)
return ret;
ret = data->video_->queueBuffer(buffer);
if (ret < 0)
return ret;
data->ipa_->queueRequest(request->sequence(), request->controls());
return 0;
}
bool PipelineHandlerVimc::match(DeviceEnumerator *enumerator)
{
DeviceMatch dm("vimc");
dm.add("Raw Capture 0");
dm.add("Raw Capture 1");
dm.add("RGB/YUV Capture");
dm.add("Sensor A");
dm.add("Sensor B");
dm.add("Debayer A");
dm.add("Debayer B");
dm.add("RGB/YUV Input");
dm.add("Scaler");
MediaDevice *media = acquireMediaDevice(enumerator, dm);
if (!media)
return false;
std::unique_ptr<VimcCameraData> data = std::make_unique<VimcCameraData>(this, media);
/* Locate and open the capture video node. */
if (data->init())
return false;
data->ipa_ = IPAManager::createIPA<ipa::vimc::IPAProxyVimc>(this, 0, 0);
if (!data->ipa_) {
LOG(VIMC, Error) << "no matching IPA found";
return false;
}
data->ipa_->paramsBufferReady.connect(data.get(), &VimcCameraData::paramsBufferReady);
std::string conf = data->ipa_->configurationFile("vimc.conf");
Flags<ipa::vimc::TestFlag> inFlags = ipa::vimc::TestFlag::Flag2;
Flags<ipa::vimc::TestFlag> outFlags;
data->ipa_->init(IPASettings{ conf, data->sensor_->model() },
ipa::vimc::IPAOperationInit, inFlags, &outFlags);
LOG(VIMC, Debug)
<< "Flag 1 was "
<< (outFlags & ipa::vimc::TestFlag::Flag1 ? "" : "not ")
<< "set";
/* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ };
const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
registerCamera(std::move(camera));
return true;
}
int VimcCameraData::init()
{
int ret;
ret = media_->disableLinks();
if (ret < 0)
return ret;
MediaLink *link = media_->link("Debayer B", 1, "Scaler", 0);
if (!link)
return -ENODEV;
ret = link->setEnabled(true);
if (ret < 0)
return ret;
/* Create and open the camera sensor, debayer, scaler and video device. */
sensor_ = std::make_unique<CameraSensor>(media_->getEntityByName("Sensor B"));
ret = sensor_->init();
if (ret)
return ret;
debayer_ = V4L2Subdevice::fromEntityName(media_, "Debayer B");
if (debayer_->open())
return -ENODEV;
scaler_ = V4L2Subdevice::fromEntityName(media_, "Scaler");
if (scaler_->open())
return -ENODEV;
video_ = V4L2VideoDevice::fromEntityName(media_, "RGB/YUV Capture");
if (video_->open())
return -ENODEV;
video_->bufferReady.connect(this, &VimcCameraData::bufferReady);
raw_ = V4L2VideoDevice::fromEntityName(media_, "Raw Capture 1");
if (raw_->open())
return -ENODEV;
ret = allocateMockIPABuffers();
if (ret < 0) {
LOG(VIMC, Warning) << "Cannot allocate mock IPA buffers";
return ret;
}
/* Initialise the supported controls. */
const ControlInfoMap &controls = sensor_->controls();
ControlInfoMap::Map ctrls;
for (const auto &ctrl : controls) {
const ControlId *id;
ControlInfo info;
switch (ctrl.first->id()) {
case V4L2_CID_BRIGHTNESS:
id = &controls::Brightness;
info = ControlInfo{ { -1.0f }, { 1.0f }, { 0.0f } };
break;
case V4L2_CID_CONTRAST:
id = &controls::Contrast;
info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
case V4L2_CID_SATURATION:
id = &controls::Saturation;
info = ControlInfo{ { 0.0f }, { 2.0f }, { 1.0f } };
break;
default:
continue;
}
ctrls.emplace(id, info);
}
controlInfo_ = ControlInfoMap(std::move(ctrls), controls::controls);
/* Initialize the camera properties. */
properties_ = sensor_->properties();
return 0;
}
void VimcCameraData::bufferReady(FrameBuffer *buffer)
{
PipelineHandlerVimc *pipe =
static_cast<PipelineHandlerVimc *>(this->pipe());
Request *request = buffer->request();
/* If the buffer is cancelled force a complete of the whole request. */
if (buffer->metadata().status == FrameMetadata::FrameCancelled) {
for (auto it : request->buffers()) {
FrameBuffer *b = it.second;
b->_d()->cancel();
pipe->completeBuffer(request, b);
}
pipe->completeRequest(request);
return;
}
/* Record the sensor's timestamp in the request metadata. */
request->metadata().set(controls::SensorTimestamp,
buffer->metadata().timestamp);
pipe->completeBuffer(request, buffer);
pipe->completeRequest(request);
ipa_->fillParamsBuffer(request->sequence(), mockIPABufs_[0]->cookie());
}
int VimcCameraData::allocateMockIPABuffers()
{
constexpr unsigned int kBufCount = 2;
V4L2DeviceFormat format;
format.fourcc = video_->toV4L2PixelFormat(formats::BGR888);
format.size = Size (160, 120);
int ret = video_->setFormat(&format);
if (ret < 0)
return ret;
return video_->exportBuffers(kBufCount, &mockIPABufs_);
}
void VimcCameraData::paramsBufferReady([[maybe_unused]] unsigned int id,
[[maybe_unused]] const Flags<ipa::vimc::TestFlag> flags)
{
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerVimc, "vimc")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/rpi_stream.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Raspberry Pi device stream abstraction class.
*/
#pragma once
#include <optional>
#include <queue>
#include <string>
#include <unordered_map>
#include <vector>
#include <libcamera/base/flags.h>
#include <libcamera/base/utils.h>
#include <libcamera/stream.h>
#include "libcamera/internal/mapped_framebuffer.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace libcamera {
namespace RPi {
enum BufferMask {
MaskID = 0x00ffff,
MaskStats = 0x010000,
MaskEmbeddedData = 0x020000,
MaskBayerData = 0x040000,
};
struct BufferObject {
BufferObject(FrameBuffer *b, bool requiresMmap)
: buffer(b), mapped(std::nullopt)
{
if (requiresMmap)
mapped = std::make_optional<MappedFrameBuffer>
(b, MappedFrameBuffer::MapFlag::ReadWrite);
}
FrameBuffer *buffer;
std::optional<MappedFrameBuffer> mapped;
};
using BufferMap = std::unordered_map<unsigned int, BufferObject>;
/*
* Device stream abstraction for either an internal or external stream.
* Used for both Unicam and the ISP.
*/
class Stream : public libcamera::Stream
{
public:
enum class StreamFlag {
None = 0,
/*
* Indicates that this stream only imports buffers, e.g. the ISP
* input stream.
*/
ImportOnly = (1 << 0),
/*
* Indicates that this stream is active externally, i.e. the
* buffers might be provided by (and returned to) the application.
*/
External = (1 << 1),
/*
* Indicates that the stream buffers need to be mmaped and returned
* to the pipeline handler when requested.
*/
RequiresMmap = (1 << 2),
/*
* Indicates a stream that needs buffers recycled every frame internally
* in the pipeline handler, e.g. stitch, TDN, config. All buffer
* management will be handled by the pipeline handler.
*/
Recurrent = (1 << 3),
/*
* Indicates that the output stream needs a software format conversion
* to be applied after ISP processing.
*/
Needs32bitConv = (1 << 4),
};
using StreamFlags = Flags<StreamFlag>;
Stream()
: flags_(StreamFlag::None), id_(0), swDownscale_(0)
{
}
Stream(const char *name, MediaEntity *dev, StreamFlags flags = StreamFlag::None)
: flags_(flags), name_(name),
dev_(std::make_unique<V4L2VideoDevice>(dev)), id_(0),
swDownscale_(0)
{
}
void setFlags(StreamFlags flags);
void clearFlags(StreamFlags flags);
StreamFlags getFlags() const;
V4L2VideoDevice *dev() const;
const std::string &name() const;
void resetBuffers();
unsigned int swDownscale() const;
void setSwDownscale(unsigned int swDownscale);
void setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers);
const BufferMap &getBuffers() const;
unsigned int getBufferId(FrameBuffer *buffer) const;
void setExportedBuffer(FrameBuffer *buffer);
int prepareBuffers(unsigned int count);
int queueBuffer(FrameBuffer *buffer);
void returnBuffer(FrameBuffer *buffer);
const BufferObject &getBuffer(unsigned int id);
const BufferObject &acquireBuffer();
int queueAllBuffers();
void releaseBuffers();
/* For error handling. */
static const BufferObject errorBufferObject;
private:
void bufferEmplace(unsigned int id, FrameBuffer *buffer);
void clearBuffers();
int queueToDevice(FrameBuffer *buffer);
StreamFlags flags_;
/* Stream name identifier. */
std::string name_;
/* The actual device stream. */
std::unique_ptr<V4L2VideoDevice> dev_;
/* Tracks a unique id key for the bufferMap_ */
unsigned int id_;
/* Power of 2 greater than one if software downscaling will be required. */
unsigned int swDownscale_;
/* All frame buffers associated with this device stream. */
BufferMap bufferMap_;
/*
* List of frame buffers that we can use if none have been provided by
* the application for external streams. This is populated by the
* buffers exported internally.
*/
std::queue<FrameBuffer *> availableBuffers_;
/*
* List of frame buffers that are to be queued into the device from a Request.
* A nullptr indicates any internal buffer can be used (from availableBuffers_),
* whereas a valid pointer indicates an external buffer to be queued.
*
* Ordering buffers to be queued is important here as it must match the
* requests coming from the application.
*/
std::queue<FrameBuffer *> requestBuffers_;
/*
* This is a list of buffers exported internally. Need to keep this around
* as the stream needs to maintain ownership of these buffers.
*/
std::vector<std::unique_ptr<FrameBuffer>> internalBuffers_;
};
/*
* The following class is just a convenient (and typesafe) array of device
* streams indexed with an enum class.
*/
template<typename E, std::size_t N>
class Device : public std::array<class Stream, N>
{
public:
Stream &operator[](E e)
{
return std::array<class Stream, N>::operator[](utils::to_underlying(e));
}
const Stream &operator[](E e) const
{
return std::array<class Stream, N>::operator[](utils::to_underlying(e));
}
};
} /* namespace RPi */
LIBCAMERA_FLAGS_ENABLE_OPERATORS(RPi::Stream::StreamFlag)
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/rpi_stream.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Raspberry Pi device stream abstraction class.
*/
#include "rpi_stream.h"
#include <algorithm>
#include <tuple>
#include <utility>
#include <libcamera/base/log.h>
/* Maximum number of buffer slots to allocate in the V4L2 device driver. */
static constexpr unsigned int maxV4L2BufferCount = 32;
namespace libcamera {
LOG_DEFINE_CATEGORY(RPISTREAM)
namespace RPi {
const BufferObject Stream::errorBufferObject{ nullptr, false };
void Stream::setFlags(StreamFlags flags)
{
/* We don't want dynamic mmapping. */
ASSERT(!(flags & StreamFlag::RequiresMmap));
flags_ |= flags;
/* Import streams cannot be external. */
ASSERT(!(flags_ & StreamFlag::External) || !(flags_ & StreamFlag::ImportOnly));
}
void Stream::clearFlags(StreamFlags flags)
{
/* We don't want dynamic mmapping. */
ASSERT(!(flags & StreamFlag::RequiresMmap));
flags_ &= ~flags;
}
RPi::Stream::StreamFlags Stream::getFlags() const
{
return flags_;
}
V4L2VideoDevice *Stream::dev() const
{
return dev_.get();
}
const std::string &Stream::name() const
{
return name_;
}
unsigned int Stream::swDownscale() const
{
return swDownscale_;
}
void Stream::setSwDownscale(unsigned int swDownscale)
{
swDownscale_ = swDownscale;
}
void Stream::resetBuffers()
{
/* Add all internal buffers to the queue of usable buffers. */
availableBuffers_ = {};
for (auto const &buffer : internalBuffers_)
availableBuffers_.push(buffer.get());
}
void Stream::setExportedBuffers(std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
for (auto const &buffer : *buffers)
bufferEmplace(++id_, buffer.get());
}
const BufferMap &Stream::getBuffers() const
{
return bufferMap_;
}
unsigned int Stream::getBufferId(FrameBuffer *buffer) const
{
if (flags_ & StreamFlag::ImportOnly)
return 0;
/* Find the buffer in the map, and return the buffer id. */
auto it = std::find_if(bufferMap_.begin(), bufferMap_.end(),
[&buffer](auto const &p) { return p.second.buffer == buffer; });
if (it == bufferMap_.end())
return 0;
return it->first;
}
void Stream::setExportedBuffer(FrameBuffer *buffer)
{
bufferEmplace(++id_, buffer);
}
int Stream::prepareBuffers(unsigned int count)
{
int ret;
if (!(flags_ & StreamFlag::ImportOnly)) {
/* Export some frame buffers for internal use. */
ret = dev_->exportBuffers(count, &internalBuffers_);
if (ret < 0)
return ret;
/* Add these exported buffers to the internal/external buffer list. */
setExportedBuffers(&internalBuffers_);
resetBuffers();
}
return dev_->importBuffers(maxV4L2BufferCount);
}
int Stream::queueBuffer(FrameBuffer *buffer)
{
/*
* A nullptr buffer implies an external stream, but no external
* buffer has been supplied in the Request. So, pick one from the
* availableBuffers_ queue.
*/
if (!buffer) {
if (availableBuffers_.empty()) {
LOG(RPISTREAM, Debug) << "No buffers available for "
<< name_;
/*
* Note that we need to queue an internal buffer as soon
* as one becomes available.
*/
requestBuffers_.push(nullptr);
return 0;
}
buffer = availableBuffers_.front();
availableBuffers_.pop();
}
/*
* If no earlier requests are pending to be queued we can go ahead and
* queue this buffer into the device.
*/
if (requestBuffers_.empty())
return queueToDevice(buffer);
/*
* There are earlier Request buffers to be queued, so this buffer must go
* on the waiting list.
*/
requestBuffers_.push(buffer);
return 0;
}
void Stream::returnBuffer(FrameBuffer *buffer)
{
if (!(flags_ & StreamFlag::External) && !(flags_ & StreamFlag::Recurrent)) {
/* For internal buffers, simply requeue back to the device. */
queueToDevice(buffer);
return;
}
/* Push this buffer back into the queue to be used again. */
availableBuffers_.push(buffer);
/*
* Do we have any Request buffers that are waiting to be queued?
* If so, do it now as availableBuffers_ will not be empty.
*/
while (!requestBuffers_.empty()) {
FrameBuffer *requestBuffer = requestBuffers_.front();
if (!requestBuffer) {
/*
* We want to queue an internal buffer, but none
* are available. Can't do anything, quit the loop.
*/
if (availableBuffers_.empty())
break;
/*
* We want to queue an internal buffer, and at least one
* is available.
*/
requestBuffer = availableBuffers_.front();
availableBuffers_.pop();
}
requestBuffers_.pop();
queueToDevice(requestBuffer);
}
}
const BufferObject &Stream::getBuffer(unsigned int id)
{
auto const &it = bufferMap_.find(id);
if (it == bufferMap_.end())
return errorBufferObject;
return it->second;
}
const BufferObject &Stream::acquireBuffer()
{
/* No id provided, so pick up the next available buffer if possible. */
if (availableBuffers_.empty())
return errorBufferObject;
unsigned int id = getBufferId(availableBuffers_.front());
availableBuffers_.pop();
return getBuffer(id);
}
int Stream::queueAllBuffers()
{
int ret;
if ((flags_ & StreamFlag::External) || (flags_ & StreamFlag::Recurrent))
return 0;
while (!availableBuffers_.empty()) {
ret = queueBuffer(availableBuffers_.front());
if (ret < 0)
return ret;
availableBuffers_.pop();
}
return 0;
}
void Stream::releaseBuffers()
{
dev_->releaseBuffers();
clearBuffers();
}
void Stream::bufferEmplace(unsigned int id, FrameBuffer *buffer)
{
if (flags_ & StreamFlag::RequiresMmap)
bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
std::forward_as_tuple(buffer, true));
else
bufferMap_.emplace(std::piecewise_construct, std::forward_as_tuple(id),
std::forward_as_tuple(buffer, false));
}
void Stream::clearBuffers()
{
availableBuffers_ = std::queue<FrameBuffer *>{};
requestBuffers_ = std::queue<FrameBuffer *>{};
internalBuffers_.clear();
bufferMap_.clear();
id_ = 0;
}
int Stream::queueToDevice(FrameBuffer *buffer)
{
LOG(RPISTREAM, Debug) << "Queuing buffer " << getBufferId(buffer)
<< " for " << name_;
int ret = dev_->queueBuffer(buffer);
if (ret)
LOG(RPISTREAM, Error) << "Failed to queue buffer for "
<< name_;
return ret;
}
} /* namespace RPi */
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/delayed_controls.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper to deal with controls that take effect with a delay
*
* Note: This has been forked from the libcamera core implementation.
*/
#include "delayed_controls.h"
#include <libcamera/base/log.h>
#include <libcamera/controls.h>
#include "libcamera/internal/v4l2_device.h"
/**
* \file delayed_controls.h
* \brief Helper to deal with controls that take effect with a delay
*/
namespace libcamera {
LOG_DEFINE_CATEGORY(RPiDelayedControls)
namespace RPi {
/**
* \class DelayedControls
* \brief Helper to deal with controls that take effect with a delay
*
* Some sensor controls take effect with a delay as the sensor needs time to
* adjust, for example exposure and analog gain. This is a helper class to deal
* with such controls and the intended users are pipeline handlers.
*
* The idea is to extend the concept of the buffer depth of a pipeline the
* application needs to maintain to also cover controls. Just as with buffer
* depth if the application keeps the number of requests queued above the
* control depth the controls are guaranteed to take effect for the correct
* request. The control depth is determined by the control with the greatest
* delay.
*/
/**
* \struct DelayedControls::ControlParams
* \brief Parameters associated with controls handled by the \a DelayedControls
* helper class
*
* \var ControlParams::delay
* \brief Frame delay from setting the control on a sensor device to when it is
* consumed during framing.
*
* \var ControlParams::priorityWrite
* \brief Flag to indicate that this control must be applied ahead of, and
* separately from the other controls.
*
* Typically set for the \a V4L2_CID_VBLANK control so that the device driver
* does not reject \a V4L2_CID_EXPOSURE control values that may be outside of
* the existing vertical blanking specified bounds, but are within the new
* blanking bounds.
*/
/**
* \brief Construct a DelayedControls instance
* \param[in] device The V4L2 device the controls have to be applied to
* \param[in] controlParams Map of the numerical V4L2 control ids to their
* associated control parameters.
*
* The control parameters comprise of delays (in frames) and a priority write
* flag. If this flag is set, the relevant control is written separately from,
* and ahead of the rest of the batched controls.
*
* Only controls specified in \a controlParams are handled. If it's desired to
* mix delayed controls and controls that take effect immediately the immediate
* controls must be listed in the \a controlParams map with a delay value of 0.
*/
DelayedControls::DelayedControls(V4L2Device *device,
const std::unordered_map<uint32_t, ControlParams> &controlParams)
: device_(device), maxDelay_(0)
{
const ControlInfoMap &controls = device_->controls();
/*
* Create a map of control ids to delays for controls exposed by the
* device.
*/
for (auto const ¶m : controlParams) {
auto it = controls.find(param.first);
if (it == controls.end()) {
LOG(RPiDelayedControls, Error)
<< "Delay request for control id "
<< utils::hex(param.first)
<< " but control is not exposed by device "
<< device_->deviceNode();
continue;
}
const ControlId *id = it->first;
controlParams_[id] = param.second;
LOG(RPiDelayedControls, Debug)
<< "Set a delay of " << controlParams_[id].delay
<< " and priority write flag " << controlParams_[id].priorityWrite
<< " for " << id->name();
maxDelay_ = std::max(maxDelay_, controlParams_[id].delay);
}
reset(0);
}
/**
* \brief Reset state machine
*
* Resets the state machine to a starting position based on control values
* retrieved from the device.
*/
void DelayedControls::reset(unsigned int cookie)
{
queueCount_ = 1;
writeCount_ = 0;
cookies_[0] = cookie;
/* Retrieve control as reported by the device. */
std::vector<uint32_t> ids;
for (auto const ¶m : controlParams_)
ids.push_back(param.first->id());
ControlList controls = device_->getControls(ids);
/* Seed the control queue with the controls reported by the device. */
values_.clear();
for (const auto &ctrl : controls) {
const ControlId *id = device_->controls().idmap().at(ctrl.first);
/*
* Do not mark this control value as updated, it does not need
* to be written to to device on startup.
*/
values_[id][0] = Info(ctrl.second, false);
}
}
/**
* \brief Push a set of controls on the queue
* \param[in] controls List of controls to add to the device queue
*
* Push a set of controls to the control queue. This increases the control queue
* depth by one.
*
* \returns true if \a controls are accepted, or false otherwise
*/
bool DelayedControls::push(const ControlList &controls, const unsigned int cookie)
{
/* Copy state from previous frame. */
for (auto &ctrl : values_) {
Info &info = ctrl.second[queueCount_];
info = values_[ctrl.first][queueCount_ - 1];
info.updated = false;
}
/* Update with new controls. */
const ControlIdMap &idmap = device_->controls().idmap();
for (const auto &control : controls) {
const auto &it = idmap.find(control.first);
if (it == idmap.end()) {
LOG(RPiDelayedControls, Warning)
<< "Unknown control " << control.first;
return false;
}
const ControlId *id = it->second;
if (controlParams_.find(id) == controlParams_.end())
return false;
Info &info = values_[id][queueCount_];
info = Info(control.second);
LOG(RPiDelayedControls, Debug)
<< "Queuing " << id->name()
<< " to " << info.toString()
<< " at index " << queueCount_;
}
cookies_[queueCount_] = cookie;
queueCount_++;
return true;
}
/**
* \brief Read back controls in effect at a sequence number
* \param[in] sequence The sequence number to get controls for
*
* Read back what controls where in effect at a specific sequence number. The
* history is a ring buffer of 16 entries where new and old values coexist. It's
* the callers responsibility to not read too old sequence numbers that have been
* pushed out of the history.
*
* Historic values are evicted by pushing new values onto the queue using
* push(). The max history from the current sequence number that yields valid
* values are thus 16 minus number of controls pushed.
*
* \return The controls at \a sequence number
*/
std::pair<ControlList, unsigned int> DelayedControls::get(uint32_t sequence)
{
unsigned int index = std::max<int>(0, sequence - maxDelay_);
ControlList out(device_->controls());
for (const auto &ctrl : values_) {
const ControlId *id = ctrl.first;
const Info &info = ctrl.second[index];
out.set(id->id(), info);
LOG(RPiDelayedControls, Debug)
<< "Reading " << id->name()
<< " to " << info.toString()
<< " at index " << index;
}
return { out, cookies_[index] };
}
/**
* \brief Inform DelayedControls of the start of a new frame
* \param[in] sequence Sequence number of the frame that started
*
* Inform the state machine that a new frame has started and of its sequence
* number. Any user of these helpers is responsible to inform the helper about
* the start of any frame. This can be connected with ease to the start of a
* exposure (SOE) V4L2 event.
*/
void DelayedControls::applyControls(uint32_t sequence)
{
LOG(RPiDelayedControls, Debug) << "frame " << sequence << " started";
/*
* Create control list peeking ahead in the value queue to ensure
* values are set in time to satisfy the sensor delay.
*/
ControlList out(device_->controls());
for (auto &ctrl : values_) {
const ControlId *id = ctrl.first;
unsigned int delayDiff = maxDelay_ - controlParams_[id].delay;
unsigned int index = std::max<int>(0, writeCount_ - delayDiff);
Info &info = ctrl.second[index];
if (info.updated) {
if (controlParams_[id].priorityWrite) {
/*
* This control must be written now, it could
* affect validity of the other controls.
*/
ControlList priority(device_->controls());
priority.set(id->id(), info);
device_->setControls(&priority);
} else {
/*
* Batch up the list of controls and write them
* at the end of the function.
*/
out.set(id->id(), info);
}
LOG(RPiDelayedControls, Debug)
<< "Setting " << id->name()
<< " to " << info.toString()
<< " at index " << index;
/* Done with this update, so mark as completed. */
info.updated = false;
}
}
writeCount_ = sequence + 1;
while (writeCount_ > queueCount_) {
LOG(RPiDelayedControls, Debug)
<< "Queue is empty, auto queue no-op.";
push({}, cookies_[queueCount_ - 1]);
}
device_->setControls(&out);
}
} /* namespace RPi */
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/pipeline_base.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
* Pipeline handler base class for Raspberry Pi devices
*/
#include "pipeline_base.h"
#include <chrono>
#include <linux/media-bus-format.h>
#include <linux/videodev2.h>
#include <libcamera/base/file.h>
#include <libcamera/base/utils.h>
#include <libcamera/formats.h>
#include <libcamera/logging.h>
#include <libcamera/property_ids.h>
#include "libcamera/internal/camera_lens.h"
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/v4l2_subdevice.h"
using namespace std::chrono_literals;
namespace libcamera {
using namespace RPi;
LOG_DEFINE_CATEGORY(RPI)
using StreamFlag = RPi::Stream::StreamFlag;
namespace {
constexpr unsigned int defaultRawBitDepth = 12;
PixelFormat mbusCodeToPixelFormat(unsigned int code,
BayerFormat::Packing packingReq)
{
BayerFormat bayer = BayerFormat::fromMbusCode(code);
ASSERT(bayer.isValid());
bayer.packing = packingReq;
PixelFormat pix = bayer.toPixelFormat();
/*
* Not all formats (e.g. 8-bit or 16-bit Bayer formats) can have packed
* variants. So if the PixelFormat returns as invalid, use the non-packed
* conversion instead.
*/
if (!pix.isValid()) {
bayer.packing = BayerFormat::Packing::None;
pix = bayer.toPixelFormat();
}
return pix;
}
bool isMonoSensor(std::unique_ptr<CameraSensor> &sensor)
{
unsigned int mbusCode = sensor->mbusCodes()[0];
const BayerFormat &bayer = BayerFormat::fromMbusCode(mbusCode);
return bayer.order == BayerFormat::Order::MONO;
}
const std::vector<ColorSpace> validColorSpaces = {
ColorSpace::Sycc,
ColorSpace::Smpte170m,
ColorSpace::Rec709
};
std::optional<ColorSpace> findValidColorSpace(const ColorSpace &colourSpace)
{
for (auto cs : validColorSpaces) {
if (colourSpace.primaries == cs.primaries &&
colourSpace.transferFunction == cs.transferFunction)
return cs;
}
return std::nullopt;
}
} /* namespace */
/*
* Raspberry Pi drivers expect the following colour spaces:
* - V4L2_COLORSPACE_RAW for raw streams.
* - One of V4L2_COLORSPACE_JPEG, V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_REC709 for
* non-raw streams. Other fields such as transfer function, YCbCr encoding and
* quantisation are not used.
*
* The libcamera colour spaces that we wish to use corresponding to these are therefore:
* - ColorSpace::Raw for V4L2_COLORSPACE_RAW
* - ColorSpace::Sycc for V4L2_COLORSPACE_JPEG
* - ColorSpace::Smpte170m for V4L2_COLORSPACE_SMPTE170M
* - ColorSpace::Rec709 for V4L2_COLORSPACE_REC709
*/
CameraConfiguration::Status RPiCameraConfiguration::validateColorSpaces([[maybe_unused]] ColorSpaceFlags flags)
{
Status status = Valid;
yuvColorSpace_.reset();
for (auto cfg : config_) {
/* First fix up raw streams to have the "raw" colour space. */
if (PipelineHandlerBase::isRaw(cfg.pixelFormat)) {
/* If there was no value here, that doesn't count as "adjusted". */
if (cfg.colorSpace && cfg.colorSpace != ColorSpace::Raw)
status = Adjusted;
cfg.colorSpace = ColorSpace::Raw;
continue;
}
/* Next we need to find our shared colour space. The first valid one will do. */
if (cfg.colorSpace && !yuvColorSpace_)
yuvColorSpace_ = findValidColorSpace(cfg.colorSpace.value());
}
/* If no colour space was given anywhere, choose sYCC. */
if (!yuvColorSpace_)
yuvColorSpace_ = ColorSpace::Sycc;
/* Note the version of this that any RGB streams will have to use. */
rgbColorSpace_ = yuvColorSpace_;
rgbColorSpace_->ycbcrEncoding = ColorSpace::YcbcrEncoding::None;
rgbColorSpace_->range = ColorSpace::Range::Full;
/* Go through the streams again and force everyone to the same colour space. */
for (auto cfg : config_) {
if (cfg.colorSpace == ColorSpace::Raw)
continue;
if (PipelineHandlerBase::isYuv(cfg.pixelFormat) && cfg.colorSpace != yuvColorSpace_) {
/* Again, no value means "not adjusted". */
if (cfg.colorSpace)
status = Adjusted;
cfg.colorSpace = yuvColorSpace_;
}
if (PipelineHandlerBase::isRgb(cfg.pixelFormat) && cfg.colorSpace != rgbColorSpace_) {
/* Be nice, and let the YUV version count as non-adjusted too. */
if (cfg.colorSpace && cfg.colorSpace != yuvColorSpace_)
status = Adjusted;
cfg.colorSpace = rgbColorSpace_;
}
}
return status;
}
CameraConfiguration::Status RPiCameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
/*
* Make sure that if a sensor configuration has been requested it
* is valid.
*/
if (sensorConfig && !sensorConfig->isValid()) {
LOG(RPI, Error) << "Invalid sensor configuration request";
return Invalid;
}
status = validateColorSpaces(ColorSpaceFlag::StreamsShareColorSpace);
/*
* Validate the requested transform against the sensor capabilities and
* rotation and store the final combined transform that configure() will
* need to apply to the sensor to save us working it out again.
*/
Orientation requestedOrientation = orientation;
combinedTransform_ = data_->sensor_->computeTransform(&orientation);
if (orientation != requestedOrientation)
status = Adjusted;
rawStreams_.clear();
outStreams_.clear();
for (const auto &[index, cfg] : utils::enumerate(config_)) {
if (PipelineHandlerBase::isRaw(cfg.pixelFormat))
rawStreams_.emplace_back(index, &cfg);
else
outStreams_.emplace_back(index, &cfg);
}
/* Sort the streams so the highest resolution is first. */
std::sort(rawStreams_.begin(), rawStreams_.end(),
[](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
std::sort(outStreams_.begin(), outStreams_.end(),
[](auto &l, auto &r) { return l.cfg->size > r.cfg->size; });
/* Compute the sensor's format then do any platform specific fixups. */
unsigned int bitDepth;
Size sensorSize;
if (sensorConfig) {
/* Use the application provided sensor configuration. */
bitDepth = sensorConfig->bitDepth;
sensorSize = sensorConfig->outputSize;
} else if (!rawStreams_.empty()) {
/* Use the RAW stream format and size. */
BayerFormat bayerFormat = BayerFormat::fromPixelFormat(rawStreams_[0].cfg->pixelFormat);
bitDepth = bayerFormat.bitDepth;
sensorSize = rawStreams_[0].cfg->size;
} else {
bitDepth = defaultRawBitDepth;
sensorSize = outStreams_[0].cfg->size;
}
sensorFormat_ = data_->findBestFormat(sensorSize, bitDepth);
/*
* If a sensor configuration has been requested, it should apply
* without modifications.
*/
if (sensorConfig) {
BayerFormat bayer = BayerFormat::fromMbusCode(sensorFormat_.code);
if (bayer.bitDepth != sensorConfig->bitDepth ||
sensorFormat_.size != sensorConfig->outputSize) {
LOG(RPI, Error) << "Invalid sensor configuration: "
<< "bitDepth/size mismatch";
return Invalid;
}
}
/* Start with some initial generic RAW stream adjustments. */
for (auto &raw : rawStreams_) {
StreamConfiguration *rawStream = raw.cfg;
/*
* Some sensors change their Bayer order when they are
* h-flipped or v-flipped, according to the transform. Adjust
* the RAW stream to match the computed sensor format by
* applying the sensor Bayer order resulting from the transform
* to the user request.
*/
BayerFormat cfgBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
cfgBayer.order = data_->sensor_->bayerOrder(combinedTransform_);
if (rawStream->pixelFormat != cfgBayer.toPixelFormat()) {
rawStream->pixelFormat = cfgBayer.toPixelFormat();
status = Adjusted;
}
}
/* Do any platform specific fixups. */
Status st = data_->platformValidate(this);
if (st == Invalid)
return Invalid;
else if (st == Adjusted)
status = Adjusted;
/* Further fixups on the RAW streams. */
for (auto &raw : rawStreams_) {
int ret = raw.dev->tryFormat(&raw.format);
if (ret)
return Invalid;
if (RPi::PipelineHandlerBase::updateStreamConfig(raw.cfg, raw.format))
status = Adjusted;
}
/* Further fixups on the ISP output streams. */
for (auto &out : outStreams_) {
/*
* We want to send the associated YCbCr info through to the driver.
*
* But for RGB streams, the YCbCr info gets overwritten on the way back
* so we must check against what the stream cfg says, not what we actually
* requested (which carefully included the YCbCr info)!
*/
out.format.colorSpace = yuvColorSpace_;
LOG(RPI, Debug)
<< "Try color space " << ColorSpace::toString(out.cfg->colorSpace);
int ret = out.dev->tryFormat(&out.format);
if (ret)
return Invalid;
if (RPi::PipelineHandlerBase::updateStreamConfig(out.cfg, out.format))
status = Adjusted;
}
return status;
}
bool PipelineHandlerBase::isRgb(const PixelFormat &pixFmt)
{
const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
return info.colourEncoding == PixelFormatInfo::ColourEncodingRGB;
}
bool PipelineHandlerBase::isYuv(const PixelFormat &pixFmt)
{
/* The code below would return true for raw mono streams, so weed those out first. */
if (PipelineHandlerBase::isRaw(pixFmt))
return false;
const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
return info.colourEncoding == PixelFormatInfo::ColourEncodingYUV;
}
bool PipelineHandlerBase::isRaw(const PixelFormat &pixFmt)
{
/* This test works for both Bayer and raw mono formats. */
return BayerFormat::fromPixelFormat(pixFmt).isValid();
}
/*
* Adjust a StreamConfiguration fields to match a video device format.
* Returns true if the StreamConfiguration has been adjusted.
*/
bool PipelineHandlerBase::updateStreamConfig(StreamConfiguration *stream,
const V4L2DeviceFormat &format)
{
const PixelFormat &pixFormat = format.fourcc.toPixelFormat();
bool adjusted = false;
if (stream->pixelFormat != pixFormat || stream->size != format.size) {
stream->pixelFormat = pixFormat;
stream->size = format.size;
adjusted = true;
}
if (stream->colorSpace != format.colorSpace) {
stream->colorSpace = format.colorSpace;
adjusted = true;
LOG(RPI, Debug)
<< "Color space changed from "
<< ColorSpace::toString(stream->colorSpace) << " to "
<< ColorSpace::toString(format.colorSpace);
}
stream->stride = format.planes[0].bpl;
stream->frameSize = format.planes[0].size;
return adjusted;
}
/*
* Populate and return a video device format using a StreamConfiguration. */
V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
const StreamConfiguration *stream)
{
V4L2DeviceFormat deviceFormat;
const PixelFormatInfo &info = PixelFormatInfo::info(stream->pixelFormat);
deviceFormat.planesCount = info.numPlanes();
deviceFormat.fourcc = dev->toV4L2PixelFormat(stream->pixelFormat);
deviceFormat.size = stream->size;
deviceFormat.planes[0].bpl = stream->stride;
deviceFormat.colorSpace = stream->colorSpace;
return deviceFormat;
}
V4L2DeviceFormat PipelineHandlerBase::toV4L2DeviceFormat(const V4L2VideoDevice *dev,
const V4L2SubdeviceFormat &format,
BayerFormat::Packing packingReq)
{
unsigned int code = format.code;
const PixelFormat pix = mbusCodeToPixelFormat(code, packingReq);
V4L2DeviceFormat deviceFormat;
deviceFormat.fourcc = dev->toV4L2PixelFormat(pix);
deviceFormat.size = format.size;
deviceFormat.colorSpace = format.colorSpace;
return deviceFormat;
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerBase::generateConfiguration(Camera *camera, Span<const StreamRole> roles)
{
CameraData *data = cameraData(camera);
std::unique_ptr<CameraConfiguration> config =
std::make_unique<RPiCameraConfiguration>(data);
V4L2SubdeviceFormat sensorFormat;
unsigned int bufferCount;
PixelFormat pixelFormat;
V4L2VideoDevice::Formats fmts;
Size size;
std::optional<ColorSpace> colorSpace;
if (roles.empty())
return config;
Size sensorSize = data->sensor_->resolution();
for (const StreamRole role : roles) {
switch (role) {
case StreamRole::Raw:
size = sensorSize;
sensorFormat = data->findBestFormat(size, defaultRawBitDepth);
pixelFormat = mbusCodeToPixelFormat(sensorFormat.code,
BayerFormat::Packing::CSI2);
ASSERT(pixelFormat.isValid());
colorSpace = ColorSpace::Raw;
bufferCount = 2;
break;
case StreamRole::StillCapture:
fmts = data->ispFormats();
pixelFormat = formats::YUV420;
/*
* Still image codecs usually expect the sYCC color space.
* Even RGB codecs will be fine as the RGB we get with the
* sYCC color space is the same as sRGB.
*/
colorSpace = ColorSpace::Sycc;
/* Return the largest sensor resolution. */
size = sensorSize;
bufferCount = 1;
break;
case StreamRole::VideoRecording:
/*
* The colour denoise algorithm requires the analysis
* image, produced by the second ISP output, to be in
* YUV420 format. Select this format as the default, to
* maximize chances that it will be picked by
* applications and enable usage of the colour denoise
* algorithm.
*/
fmts = data->ispFormats();
pixelFormat = formats::YUV420;
/*
* Choose a color space appropriate for video recording.
* Rec.709 will be a good default for HD resolutions.
*/
colorSpace = ColorSpace::Rec709;
size = { 1920, 1080 };
bufferCount = 4;
break;
case StreamRole::Viewfinder:
fmts = data->ispFormats();
pixelFormat = formats::XRGB8888;
colorSpace = ColorSpace::Sycc;
size = { 800, 600 };
bufferCount = 4;
break;
default:
LOG(RPI, Error) << "Requested stream role not supported: "
<< role;
return nullptr;
}
std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
if (role == StreamRole::Raw) {
/* Translate the MBUS codes to a PixelFormat. */
for (const auto &format : data->sensorFormats_) {
PixelFormat pf = mbusCodeToPixelFormat(format.first,
BayerFormat::Packing::CSI2);
if (pf.isValid())
deviceFormats.emplace(std::piecewise_construct, std::forward_as_tuple(pf),
std::forward_as_tuple(format.second.begin(), format.second.end()));
}
} else {
/*
* Translate the V4L2PixelFormat to PixelFormat. Note that we
* limit the recommended largest ISP output size to match the
* sensor resolution.
*/
for (const auto &format : fmts) {
PixelFormat pf = format.first.toPixelFormat();
/*
* Some V4L2 formats translate to the same pixel format (e.g. YU12, YM12
* both give YUV420). We must avoid duplicating the range in this case.
*/
if (pf.isValid() && deviceFormats.find(pf) == deviceFormats.end()) {
const SizeRange &ispSizes = format.second[0];
deviceFormats[pf].emplace_back(ispSizes.min, sensorSize,
ispSizes.hStep, ispSizes.vStep);
}
}
}
/* Add the stream format based on the device node used for the use case. */
StreamFormats formats(deviceFormats);
StreamConfiguration cfg(formats);
cfg.size = size;
cfg.pixelFormat = pixelFormat;
cfg.colorSpace = colorSpace;
cfg.bufferCount = bufferCount;
config->addConfiguration(cfg);
}
return config;
}
int PipelineHandlerBase::configure(Camera *camera, CameraConfiguration *config)
{
CameraData *data = cameraData(camera);
int ret;
/* Start by freeing all buffers and reset the stream states. */
data->freeBuffers();
for (auto const stream : data->streams_)
stream->clearFlags(StreamFlag::External);
/*
* Apply the format on the sensor with any cached transform.
*
* If the application has provided a sensor configuration apply it
* instead of just applying a format.
*/
RPiCameraConfiguration *rpiConfig = static_cast<RPiCameraConfiguration *>(config);
V4L2SubdeviceFormat *sensorFormat = &rpiConfig->sensorFormat_;
if (rpiConfig->sensorConfig) {
ret = data->sensor_->applyConfiguration(*rpiConfig->sensorConfig,
rpiConfig->combinedTransform_,
sensorFormat);
} else {
ret = data->sensor_->setFormat(sensorFormat,
rpiConfig->combinedTransform_);
}
if (ret)
return ret;
/*
* Platform specific internal stream configuration. This also assigns
* external streams which get configured below.
*/
ret = data->platformConfigure(rpiConfig);
if (ret)
return ret;
ipa::RPi::ConfigResult result;
ret = data->configureIPA(config, &result);
if (ret) {
LOG(RPI, Error) << "Failed to configure the IPA: " << ret;
return ret;
}
/*
* Set the scaler crop to the value we are using (scaled to native sensor
* coordinates).
*/
data->scalerCrop_ = data->scaleIspCrop(data->ispCrop_);
/*
* Update the ScalerCropMaximum to the correct value for this camera mode.
* For us, it's the same as the "analogue crop".
*
* \todo Make this property the ScalerCrop maximum value when dynamic
* controls are available and set it at validate() time
*/
data->properties_.set(properties::ScalerCropMaximum, data->sensorInfo_.analogCrop);
/* Store the mode sensitivity for the application. */
data->properties_.set(properties::SensorSensitivity, result.modeSensitivity);
/* Update the controls that the Raspberry Pi IPA can handle. */
ControlInfoMap::Map ctrlMap;
for (auto const &c : result.controlInfo)
ctrlMap.emplace(c.first, c.second);
/* Add the ScalerCrop control limits based on the current mode. */
Rectangle ispMinCrop = data->scaleIspCrop(Rectangle(data->ispMinCropSize_));
ctrlMap[&controls::ScalerCrop] = ControlInfo(ispMinCrop, data->sensorInfo_.analogCrop, data->scalerCrop_);
data->controlInfo_ = ControlInfoMap(std::move(ctrlMap), result.controlInfo.idmap());
/* Setup the Video Mux/Bridge entities. */
for (auto &[device, link] : data->bridgeDevices_) {
/*
* Start by disabling all the sink pad links on the devices in the
* cascade, with the exception of the link connecting the device.
*/
for (const MediaPad *p : device->entity()->pads()) {
if (!(p->flags() & MEDIA_PAD_FL_SINK))
continue;
for (MediaLink *l : p->links()) {
if (l != link)
l->setEnabled(false);
}
}
/*
* Next, enable the entity -> entity links, and setup the pad format.
*
* \todo Some bridge devices may chainge the media bus code, so we
* ought to read the source pad format and propagate it to the sink pad.
*/
link->setEnabled(true);
const MediaPad *sinkPad = link->sink();
ret = device->setFormat(sinkPad->index(), sensorFormat);
if (ret) {
LOG(RPI, Error) << "Failed to set format on " << device->entity()->name()
<< " pad " << sinkPad->index()
<< " with format " << *sensorFormat
<< ": " << ret;
return ret;
}
LOG(RPI, Debug) << "Configured media link on device " << device->entity()->name()
<< " on pad " << sinkPad->index();
}
return 0;
}
int PipelineHandlerBase::exportFrameBuffers([[maybe_unused]] Camera *camera, libcamera::Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
RPi::Stream *s = static_cast<RPi::Stream *>(stream);
unsigned int count = stream->configuration().bufferCount;
int ret = s->dev()->exportBuffers(count, buffers);
s->setExportedBuffers(buffers);
return ret;
}
int PipelineHandlerBase::start(Camera *camera, const ControlList *controls)
{
CameraData *data = cameraData(camera);
int ret;
/* Check if a ScalerCrop control was specified. */
if (controls)
data->applyScalerCrop(*controls);
/* Start the IPA. */
ipa::RPi::StartResult result;
data->ipa_->start(controls ? *controls : ControlList{ controls::controls },
&result);
/* Apply any gain/exposure settings that the IPA may have passed back. */
if (!result.controls.empty())
data->setSensorControls(result.controls);
/* Configure the number of dropped frames required on startup. */
data->dropFrameCount_ = data->config_.disableStartupFrameDrops
? 0 : result.dropFrameCount;
for (auto const stream : data->streams_)
stream->resetBuffers();
if (!data->buffersAllocated_) {
/* Allocate buffers for internal pipeline usage. */
ret = prepareBuffers(camera);
if (ret) {
LOG(RPI, Error) << "Failed to allocate buffers";
data->freeBuffers();
stop(camera);
return ret;
}
data->buffersAllocated_ = true;
}
/* We need to set the dropFrameCount_ before queueing buffers. */
ret = queueAllBuffers(camera);
if (ret) {
LOG(RPI, Error) << "Failed to queue buffers";
stop(camera);
return ret;
}
/*
* Reset the delayed controls with the gain and exposure values set by
* the IPA.
*/
data->delayedCtrls_->reset(0);
data->state_ = CameraData::State::Idle;
/* Enable SOF event generation. */
data->frontendDevice()->setFrameStartEnabled(true);
data->platformStart();
/* Start all streams. */
for (auto const stream : data->streams_) {
ret = stream->dev()->streamOn();
if (ret) {
stop(camera);
return ret;
}
}
return 0;
}
void PipelineHandlerBase::stopDevice(Camera *camera)
{
CameraData *data = cameraData(camera);
data->state_ = CameraData::State::Stopped;
data->platformStop();
for (auto const stream : data->streams_)
stream->dev()->streamOff();
/* Disable SOF event generation. */
data->frontendDevice()->setFrameStartEnabled(false);
data->clearIncompleteRequests();
/* Stop the IPA. */
data->ipa_->stop();
}
void PipelineHandlerBase::releaseDevice(Camera *camera)
{
CameraData *data = cameraData(camera);
data->freeBuffers();
}
int PipelineHandlerBase::queueRequestDevice(Camera *camera, Request *request)
{
CameraData *data = cameraData(camera);
if (!data->isRunning())
return -EINVAL;
LOG(RPI, Debug) << "queueRequestDevice: New request sequence: "
<< request->sequence();
/* Push all buffers supplied in the Request to the respective streams. */
for (auto stream : data->streams_) {
if (!(stream->getFlags() & StreamFlag::External))
continue;
FrameBuffer *buffer = request->findBuffer(stream);
if (buffer && !stream->getBufferId(buffer)) {
/*
* This buffer is not recognised, so it must have been allocated
* outside the v4l2 device. Store it in the stream buffer list
* so we can track it.
*/
stream->setExportedBuffer(buffer);
}
/*
* If no buffer is provided by the request for this stream, we
* queue a nullptr to the stream to signify that it must use an
* internally allocated buffer for this capture request. This
* buffer will not be given back to the application, but is used
* to support the internal pipeline flow.
*
* The below queueBuffer() call will do nothing if there are not
* enough internal buffers allocated, but this will be handled by
* queuing the request for buffers in the RPiStream object.
*/
int ret = stream->queueBuffer(buffer);
if (ret)
return ret;
}
/* Push the request to the back of the queue. */
data->requestQueue_.push(request);
data->handleState();
return 0;
}
int PipelineHandlerBase::registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
MediaDevice *frontend, const std::string &frontendName,
MediaDevice *backend, MediaEntity *sensorEntity)
{
CameraData *data = cameraData.get();
int ret;
data->sensor_ = std::make_unique<CameraSensor>(sensorEntity);
if (!data->sensor_)
return -EINVAL;
if (data->sensor_->init())
return -EINVAL;
/* Populate the map of sensor supported formats and sizes. */
for (auto const mbusCode : data->sensor_->mbusCodes())
data->sensorFormats_.emplace(mbusCode,
data->sensor_->sizes(mbusCode));
/*
* Enumerate all the Video Mux/Bridge devices across the sensor -> Fr
* chain. There may be a cascade of devices in this chain!
*/
MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0];
data->enumerateVideoDevices(link, frontendName);
ipa::RPi::InitResult result;
if (data->loadIPA(&result)) {
LOG(RPI, Error) << "Failed to load a suitable IPA library";
return -EINVAL;
}
/*
* Setup our delayed control writer with the sensor default
* gain and exposure delays. Mark VBLANK for priority write.
*/
std::unordered_map<uint32_t, RPi::DelayedControls::ControlParams> params = {
{ V4L2_CID_ANALOGUE_GAIN, { result.sensorConfig.gainDelay, false } },
{ V4L2_CID_EXPOSURE, { result.sensorConfig.exposureDelay, false } },
{ V4L2_CID_HBLANK, { result.sensorConfig.hblankDelay, false } },
{ V4L2_CID_VBLANK, { result.sensorConfig.vblankDelay, true } }
};
data->delayedCtrls_ = std::make_unique<RPi::DelayedControls>(data->sensor_->device(), params);
data->sensorMetadata_ = result.sensorConfig.sensorMetadata;
/* Register initial controls that the Raspberry Pi IPA can handle. */
data->controlInfo_ = std::move(result.controlInfo);
/* Initialize the camera properties. */
data->properties_ = data->sensor_->properties();
/*
* The V4L2_CID_NOTIFY_GAINS control, if present, is used to inform the
* sensor of the colour gains. It is defined to be a linear gain where
* the default value represents a gain of exactly one.
*/
auto it = data->sensor_->controls().find(V4L2_CID_NOTIFY_GAINS);
if (it != data->sensor_->controls().end())
data->notifyGainsUnity_ = it->second.def().get<int32_t>();
/*
* Set a default value for the ScalerCropMaximum property to show
* that we support its use, however, initialise it to zero because
* it's not meaningful until a camera mode has been chosen.
*/
data->properties_.set(properties::ScalerCropMaximum, Rectangle{});
ret = platformRegister(cameraData, frontend, backend);
if (ret)
return ret;
ret = data->loadPipelineConfiguration();
if (ret) {
LOG(RPI, Error) << "Unable to load pipeline configuration";
return ret;
}
/* Setup the general IPA signal handlers. */
data->frontendDevice()->dequeueTimeout.connect(data, &RPi::CameraData::cameraTimeout);
data->frontendDevice()->frameStart.connect(data, &RPi::CameraData::frameStarted);
data->ipa_->setDelayedControls.connect(data, &CameraData::setDelayedControls);
data->ipa_->setLensControls.connect(data, &CameraData::setLensControls);
data->ipa_->metadataReady.connect(data, &CameraData::metadataReady);
return 0;
}
void PipelineHandlerBase::mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask)
{
CameraData *data = cameraData(camera);
std::vector<IPABuffer> bufferIds;
/*
* Link the FrameBuffers with the id (key value) in the map stored in
* the RPi stream object - along with an identifier mask.
*
* This will allow us to identify buffers passed between the pipeline
* handler and the IPA.
*/
for (auto const &it : buffers) {
bufferIds.push_back(IPABuffer(mask | it.first,
it.second.buffer->planes()));
data->bufferIds_.insert(mask | it.first);
}
data->ipa_->mapBuffers(bufferIds);
}
int PipelineHandlerBase::queueAllBuffers(Camera *camera)
{
CameraData *data = cameraData(camera);
int ret;
for (auto const stream : data->streams_) {
if (!(stream->getFlags() & StreamFlag::External)) {
ret = stream->queueAllBuffers();
if (ret < 0)
return ret;
} else {
/*
* For external streams, we must queue up a set of internal
* buffers to handle the number of drop frames requested by
* the IPA. This is done by passing nullptr in queueBuffer().
*
* The below queueBuffer() call will do nothing if there
* are not enough internal buffers allocated, but this will
* be handled by queuing the request for buffers in the
* RPiStream object.
*/
unsigned int i;
for (i = 0; i < data->dropFrameCount_; i++) {
ret = stream->queueBuffer(nullptr);
if (ret)
return ret;
}
}
}
return 0;
}
double CameraData::scoreFormat(double desired, double actual) const
{
double score = desired - actual;
/* Smaller desired dimensions are preferred. */
if (score < 0.0)
score = (-score) / 8;
/* Penalise non-exact matches. */
if (actual != desired)
score *= 2;
return score;
}
V4L2SubdeviceFormat CameraData::findBestFormat(const Size &req, unsigned int bitDepth) const
{
double bestScore = std::numeric_limits<double>::max(), score;
V4L2SubdeviceFormat bestFormat;
bestFormat.colorSpace = ColorSpace::Raw;
constexpr float penaltyAr = 1500.0;
constexpr float penaltyBitDepth = 500.0;
/* Calculate the closest/best mode from the user requested size. */
for (const auto &iter : sensorFormats_) {
const unsigned int mbusCode = iter.first;
const PixelFormat format = mbusCodeToPixelFormat(mbusCode,
BayerFormat::Packing::None);
const PixelFormatInfo &info = PixelFormatInfo::info(format);
for (const Size &size : iter.second) {
double reqAr = static_cast<double>(req.width) / req.height;
double fmtAr = static_cast<double>(size.width) / size.height;
/* Score the dimensions for closeness. */
score = scoreFormat(req.width, size.width);
score += scoreFormat(req.height, size.height);
score += penaltyAr * scoreFormat(reqAr, fmtAr);
/* Add any penalties... this is not an exact science! */
score += utils::abs_diff(info.bitsPerPixel, bitDepth) * penaltyBitDepth;
if (score <= bestScore) {
bestScore = score;
bestFormat.code = mbusCode;
bestFormat.size = size;
}
LOG(RPI, Debug) << "Format: " << size
<< " fmt " << format
<< " Score: " << score
<< " (best " << bestScore << ")";
}
}
return bestFormat;
}
void CameraData::freeBuffers()
{
if (ipa_) {
/*
* Copy the buffer ids from the unordered_set to a vector to
* pass to the IPA.
*/
std::vector<unsigned int> bufferIds(bufferIds_.begin(),
bufferIds_.end());
ipa_->unmapBuffers(bufferIds);
bufferIds_.clear();
}
for (auto const stream : streams_)
stream->releaseBuffers();
platformFreeBuffers();
buffersAllocated_ = false;
}
/*
* enumerateVideoDevices() iterates over the Media Controller topology, starting
* at the sensor and finishing at the frontend. For each sensor, CameraData stores
* a unique list of any intermediate video mux or bridge devices connected in a
* cascade, together with the entity to entity link.
*
* Entity pad configuration and link enabling happens at the end of configure().
* We first disable all pad links on each entity device in the chain, and then
* selectively enabling the specific links to link sensor to the frontend across
* all intermediate muxes and bridges.
*
* In the cascaded topology below, if Sensor1 is used, the Mux2 -> Mux1 link
* will be disabled, and Sensor1 -> Mux1 -> Frontend links enabled. Alternatively,
* if Sensor3 is used, the Sensor2 -> Mux2 and Sensor1 -> Mux1 links are disabled,
* and Sensor3 -> Mux2 -> Mux1 -> Frontend links are enabled. All other links will
* remain unchanged.
*
* +----------+
* | FE |
* +-----^----+
* |
* +---+---+
* | Mux1 |<------+
* +--^---- |
* | |
* +-----+---+ +---+---+
* | Sensor1 | | Mux2 |<--+
* +---------+ +-^-----+ |
* | |
* +-------+-+ +---+-----+
* | Sensor2 | | Sensor3 |
* +---------+ +---------+
*/
void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend)
{
const MediaPad *sinkPad = link->sink();
const MediaEntity *entity = sinkPad->entity();
bool frontendFound = false;
/* We only deal with Video Mux and Bridge devices in cascade. */
if (entity->function() != MEDIA_ENT_F_VID_MUX &&
entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE)
return;
/* Find the source pad for this Video Mux or Bridge device. */
const MediaPad *sourcePad = nullptr;
for (const MediaPad *pad : entity->pads()) {
if (pad->flags() & MEDIA_PAD_FL_SOURCE) {
/*
* We can only deal with devices that have a single source
* pad. If this device has multiple source pads, ignore it
* and this branch in the cascade.
*/
if (sourcePad)
return;
sourcePad = pad;
}
}
LOG(RPI, Debug) << "Found video mux device " << entity->name()
<< " linked to sink pad " << sinkPad->index();
bridgeDevices_.emplace_back(std::make_unique<V4L2Subdevice>(entity), link);
bridgeDevices_.back().first->open();
/*
* Iterate through all the sink pad links down the cascade to find any
* other Video Mux and Bridge devices.
*/
for (MediaLink *l : sourcePad->links()) {
enumerateVideoDevices(l, frontend);
/* Once we reach the Frontend entity, we are done. */
if (l->sink()->entity()->name() == frontend) {
frontendFound = true;
break;
}
}
/* This identifies the end of our entity enumeration recursion. */
if (link->source()->entity()->function() == MEDIA_ENT_F_CAM_SENSOR) {
/*
* If the frontend is not at the end of this cascade, we cannot
* configure this topology automatically, so remove all entity
* references.
*/
if (!frontendFound) {
LOG(RPI, Warning) << "Cannot automatically configure this MC topology!";
bridgeDevices_.clear();
}
}
}
int CameraData::loadPipelineConfiguration()
{
config_ = {
.disableStartupFrameDrops = false,
.cameraTimeoutValue = 0,
};
/* Initial configuration of the platform, in case no config file is present */
platformPipelineConfigure({});
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_CONFIG_FILE");
if (!configFromEnv || *configFromEnv == '\0')
return 0;
std::string filename = std::string(configFromEnv);
File file(filename);
if (!file.open(File::OpenModeFlag::ReadOnly)) {
LOG(RPI, Warning) << "Failed to open configuration file '" << filename << "'"
<< ", using defaults";
return 0;
}
LOG(RPI, Info) << "Using configuration file '" << filename << "'";
std::unique_ptr<YamlObject> root = YamlParser::parse(file);
if (!root) {
LOG(RPI, Warning) << "Failed to parse configuration file, using defaults";
return 0;
}
std::optional<double> ver = (*root)["version"].get<double>();
if (!ver || *ver != 1.0) {
LOG(RPI, Warning) << "Unexpected configuration file version reported: "
<< *ver;
return 0;
}
const YamlObject &phConfig = (*root)["pipeline_handler"];
config_.disableStartupFrameDrops =
phConfig["disable_startup_frame_drops"].get<bool>(config_.disableStartupFrameDrops);
config_.cameraTimeoutValue =
phConfig["camera_timeout_value_ms"].get<unsigned int>(config_.cameraTimeoutValue);
if (config_.cameraTimeoutValue) {
/* Disable the IPA signal to control timeout and set the user requested value. */
ipa_->setCameraTimeout.disconnect();
frontendDevice()->setDequeueTimeout(config_.cameraTimeoutValue * 1ms);
}
return platformPipelineConfigure(root);
}
int CameraData::loadIPA(ipa::RPi::InitResult *result)
{
int ret;
ipa_ = IPAManager::createIPA<ipa::RPi::IPAProxyRPi>(pipe(), 1, 1);
if (!ipa_)
return -ENOENT;
/*
* The configuration (tuning file) is made from the sensor name unless
* the environment variable overrides it.
*/
std::string configurationFile;
char const *configFromEnv = utils::secure_getenv("LIBCAMERA_RPI_TUNING_FILE");
if (!configFromEnv || *configFromEnv == '\0') {
std::string model = sensor_->model();
if (isMonoSensor(sensor_))
model += "_mono";
configurationFile = ipa_->configurationFile(model + ".json");
} else {
configurationFile = std::string(configFromEnv);
}
IPASettings settings(configurationFile, sensor_->model());
ipa::RPi::InitParams params;
ret = sensor_->sensorInfo(¶ms.sensorInfo);
if (ret) {
LOG(RPI, Error) << "Failed to retrieve camera sensor info";
return ret;
}
params.lensPresent = !!sensor_->focusLens();
ret = platformInitIpa(params);
if (ret)
return ret;
return ipa_->init(settings, params, result);
}
int CameraData::configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result)
{
ipa::RPi::ConfigParams params;
int ret;
params.sensorControls = sensor_->controls();
if (sensor_->focusLens())
params.lensControls = sensor_->focusLens()->controls();
ret = platformConfigureIpa(params);
if (ret)
return ret;
/* We store the IPACameraSensorInfo for digital zoom calculations. */
ret = sensor_->sensorInfo(&sensorInfo_);
if (ret) {
LOG(RPI, Error) << "Failed to retrieve camera sensor info";
return ret;
}
/* Always send the user transform to the IPA. */
Transform transform = config->orientation / Orientation::Rotate0;
params.transform = static_cast<unsigned int>(transform);
/* Ready the IPA - it must know about the sensor resolution. */
ret = ipa_->configure(sensorInfo_, params, result);
if (ret < 0) {
LOG(RPI, Error) << "IPA configuration failed!";
return -EPIPE;
}
if (!result->sensorControls.empty())
setSensorControls(result->sensorControls);
if (!result->lensControls.empty())
setLensControls(result->lensControls);
return 0;
}
void CameraData::metadataReady(const ControlList &metadata)
{
if (!isRunning())
return;
/* Add to the Request metadata buffer what the IPA has provided. */
/* Last thing to do is to fill up the request metadata. */
Request *request = requestQueue_.front();
request->metadata().merge(metadata);
/*
* Inform the sensor of the latest colour gains if it has the
* V4L2_CID_NOTIFY_GAINS control (which means notifyGainsUnity_ is set).
*/
const auto &colourGains = metadata.get(libcamera::controls::ColourGains);
if (notifyGainsUnity_ && colourGains) {
/* The control wants linear gains in the order B, Gb, Gr, R. */
ControlList ctrls(sensor_->controls());
std::array<int32_t, 4> gains{
static_cast<int32_t>((*colourGains)[1] * *notifyGainsUnity_),
*notifyGainsUnity_,
*notifyGainsUnity_,
static_cast<int32_t>((*colourGains)[0] * *notifyGainsUnity_)
};
ctrls.set(V4L2_CID_NOTIFY_GAINS, Span<const int32_t>{ gains });
sensor_->setControls(&ctrls);
}
}
void CameraData::setDelayedControls(const ControlList &controls, uint32_t delayContext)
{
if (!delayedCtrls_->push(controls, delayContext))
LOG(RPI, Error) << "V4L2 DelayedControl set failed";
}
void CameraData::setLensControls(const ControlList &controls)
{
CameraLens *lens = sensor_->focusLens();
if (lens && controls.contains(V4L2_CID_FOCUS_ABSOLUTE)) {
ControlValue const &focusValue = controls.get(V4L2_CID_FOCUS_ABSOLUTE);
lens->setFocusPosition(focusValue.get<int32_t>());
}
}
void CameraData::setSensorControls(ControlList &controls)
{
/*
* We need to ensure that if both VBLANK and EXPOSURE are present, the
* former must be written ahead of, and separately from EXPOSURE to avoid
* V4L2 rejecting the latter. This is identical to what DelayedControls
* does with the priority write flag.
*
* As a consequence of the below logic, VBLANK gets set twice, and we
* rely on the v4l2 framework to not pass the second control set to the
* driver as the actual control value has not changed.
*/
if (controls.contains(V4L2_CID_EXPOSURE) && controls.contains(V4L2_CID_VBLANK)) {
ControlList vblank_ctrl;
vblank_ctrl.set(V4L2_CID_VBLANK, controls.get(V4L2_CID_VBLANK));
sensor_->setControls(&vblank_ctrl);
}
sensor_->setControls(&controls);
}
Rectangle CameraData::scaleIspCrop(const Rectangle &ispCrop) const
{
/*
* Scale a crop rectangle defined in the ISP's coordinates into native sensor
* coordinates.
*/
Rectangle nativeCrop = ispCrop.scaledBy(sensorInfo_.analogCrop.size(),
sensorInfo_.outputSize);
nativeCrop.translateBy(sensorInfo_.analogCrop.topLeft());
return nativeCrop;
}
void CameraData::applyScalerCrop(const ControlList &controls)
{
const auto &scalerCrop = controls.get<Rectangle>(controls::ScalerCrop);
if (scalerCrop) {
Rectangle nativeCrop = *scalerCrop;
if (!nativeCrop.width || !nativeCrop.height)
nativeCrop = { 0, 0, 1, 1 };
/* Create a version of the crop scaled to ISP (camera mode) pixels. */
Rectangle ispCrop = nativeCrop.translatedBy(-sensorInfo_.analogCrop.topLeft());
ispCrop.scaleBy(sensorInfo_.outputSize, sensorInfo_.analogCrop.size());
/*
* The crop that we set must be:
* 1. At least as big as ispMinCropSize_, once that's been
* enlarged to the same aspect ratio.
* 2. With the same mid-point, if possible.
* 3. But it can't go outside the sensor area.
*/
Size minSize = ispMinCropSize_.expandedToAspectRatio(nativeCrop.size());
Size size = ispCrop.size().expandedTo(minSize);
ispCrop = size.centeredTo(ispCrop.center()).enclosedIn(Rectangle(sensorInfo_.outputSize));
if (ispCrop != ispCrop_) {
ispCrop_ = ispCrop;
platformSetIspCrop();
/*
* Also update the ScalerCrop in the metadata with what we actually
* used. But we must first rescale that from ISP (camera mode) pixels
* back into sensor native pixels.
*/
scalerCrop_ = scaleIspCrop(ispCrop_);
}
}
}
void CameraData::cameraTimeout()
{
LOG(RPI, Error) << "Camera frontend has timed out!";
LOG(RPI, Error) << "Please check that your camera sensor connector is attached securely.";
LOG(RPI, Error) << "Alternatively, try another cable and/or sensor.";
state_ = CameraData::State::Error;
platformStop();
/*
* To allow the application to attempt a recovery from this timeout,
* stop all devices streaming, and return any outstanding requests as
* incomplete and cancelled.
*/
for (auto const stream : streams_)
stream->dev()->streamOff();
clearIncompleteRequests();
}
void CameraData::frameStarted(uint32_t sequence)
{
LOG(RPI, Debug) << "Frame start " << sequence;
/* Write any controls for the next frame as soon as we can. */
delayedCtrls_->applyControls(sequence);
}
void CameraData::clearIncompleteRequests()
{
/*
* All outstanding requests (and associated buffers) must be returned
* back to the application.
*/
while (!requestQueue_.empty()) {
Request *request = requestQueue_.front();
for (auto &b : request->buffers()) {
FrameBuffer *buffer = b.second;
/*
* Has the buffer already been handed back to the
* request? If not, do so now.
*/
if (buffer->request()) {
buffer->_d()->cancel();
pipe()->completeBuffer(request, buffer);
}
}
pipe()->completeRequest(request);
requestQueue_.pop();
}
}
void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream)
{
/*
* It is possible to be here without a pending request, so check
* that we actually have one to action, otherwise we just return
* buffer back to the stream.
*/
Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front();
if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) {
/*
* Tag the buffer as completed, returning it to the
* application.
*/
LOG(RPI, Debug) << "Completing request buffer for stream "
<< stream->name();
pipe()->completeBuffer(request, buffer);
} else {
/*
* This buffer was not part of the Request (which happens if an
* internal buffer was used for an external stream, or
* unconditionally for internal streams), or there is no pending
* request, so we can recycle it.
*/
LOG(RPI, Debug) << "Returning buffer to stream "
<< stream->name();
stream->returnBuffer(buffer);
}
}
void CameraData::handleState()
{
switch (state_) {
case State::Stopped:
case State::Busy:
case State::Error:
break;
case State::IpaComplete:
/* If the request is completed, we will switch to Idle state. */
checkRequestCompleted();
/*
* No break here, we want to try running the pipeline again.
* The fallthrough clause below suppresses compiler warnings.
*/
[[fallthrough]];
case State::Idle:
tryRunPipeline();
break;
}
}
void CameraData::checkRequestCompleted()
{
bool requestCompleted = false;
/*
* If we are dropping this frame, do not touch the request, simply
* change the state to IDLE when ready.
*/
if (!dropFrameCount_) {
Request *request = requestQueue_.front();
if (request->hasPendingBuffers())
return;
/* Must wait for metadata to be filled in before completing. */
if (state_ != State::IpaComplete)
return;
LOG(RPI, Debug) << "Completing request sequence: "
<< request->sequence();
pipe()->completeRequest(request);
requestQueue_.pop();
requestCompleted = true;
}
/*
* Make sure we have three outputs completed in the case of a dropped
* frame.
*/
if (state_ == State::IpaComplete &&
((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) ||
requestCompleted)) {
LOG(RPI, Debug) << "Going into Idle state";
state_ = State::Idle;
if (dropFrameCount_) {
dropFrameCount_--;
LOG(RPI, Debug) << "Dropping frame at the request of the IPA ("
<< dropFrameCount_ << " left)";
}
}
}
void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request *request)
{
request->metadata().set(controls::SensorTimestamp,
bufferControls.get(controls::SensorTimestamp).value_or(0));
request->metadata().set(controls::ScalerCrop, scalerCrop_);
}
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/pipeline_base.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
* Pipeline handler base class for Raspberry Pi devices
*/
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include <libcamera/controls.h>
#include <libcamera/request.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/framebuffer.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/media_object.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_videodevice.h"
#include "libcamera/internal/yaml_parser.h"
#include <libcamera/ipa/raspberrypi_ipa_interface.h>
#include <libcamera/ipa/raspberrypi_ipa_proxy.h>
#include "delayed_controls.h"
#include "rpi_stream.h"
using namespace std::chrono_literals;
namespace libcamera {
namespace RPi {
/* Map of mbus codes to supported sizes reported by the sensor. */
using SensorFormats = std::map<unsigned int, std::vector<Size>>;
class RPiCameraConfiguration;
class CameraData : public Camera::Private
{
public:
CameraData(PipelineHandler *pipe)
: Camera::Private(pipe), state_(State::Stopped),
dropFrameCount_(0), buffersAllocated_(false),
ispOutputCount_(0), ispOutputTotal_(0)
{
}
virtual ~CameraData()
{
}
virtual CameraConfiguration::Status platformValidate(RPiCameraConfiguration *rpiConfig) const = 0;
virtual int platformConfigure(const RPiCameraConfiguration *rpiConfig) = 0;
virtual void platformStart() = 0;
virtual void platformStop() = 0;
double scoreFormat(double desired, double actual) const;
V4L2SubdeviceFormat findBestFormat(const Size &req, unsigned int bitDepth) const;
void freeBuffers();
virtual void platformFreeBuffers() = 0;
void enumerateVideoDevices(MediaLink *link, const std::string &frontend);
int loadPipelineConfiguration();
int loadIPA(ipa::RPi::InitResult *result);
int configureIPA(const CameraConfiguration *config, ipa::RPi::ConfigResult *result);
virtual int platformInitIpa(ipa::RPi::InitParams ¶ms) = 0;
virtual int platformConfigureIpa(ipa::RPi::ConfigParams ¶ms) = 0;
void metadataReady(const ControlList &metadata);
void setDelayedControls(const ControlList &controls, uint32_t delayContext);
void setLensControls(const ControlList &controls);
void setSensorControls(ControlList &controls);
Rectangle scaleIspCrop(const Rectangle &ispCrop) const;
void applyScalerCrop(const ControlList &controls);
virtual void platformSetIspCrop() = 0;
void cameraTimeout();
void frameStarted(uint32_t sequence);
void clearIncompleteRequests();
void handleStreamBuffer(FrameBuffer *buffer, Stream *stream);
void handleState();
virtual V4L2VideoDevice::Formats ispFormats() const = 0;
virtual V4L2VideoDevice::Formats rawFormats() const = 0;
virtual V4L2VideoDevice *frontendDevice() = 0;
virtual int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) = 0;
std::unique_ptr<ipa::RPi::IPAProxyRPi> ipa_;
std::unique_ptr<CameraSensor> sensor_;
SensorFormats sensorFormats_;
/* The vector below is just for convenience when iterating over all streams. */
std::vector<Stream *> streams_;
/* Stores the ids of the buffers mapped in the IPA. */
std::unordered_set<unsigned int> bufferIds_;
/*
* Stores a cascade of Video Mux or Bridge devices between the sensor and
* Unicam together with media link across the entities.
*/
std::vector<std::pair<std::unique_ptr<V4L2Subdevice>, MediaLink *>> bridgeDevices_;
std::unique_ptr<DelayedControls> delayedCtrls_;
bool sensorMetadata_;
/*
* All the functions in this class are called from a single calling
* thread. So, we do not need to have any mutex to protect access to any
* of the variables below.
*/
enum class State { Stopped, Idle, Busy, IpaComplete, Error };
State state_;
bool isRunning()
{
return state_ != State::Stopped && state_ != State::Error;
}
std::queue<Request *> requestQueue_;
/* For handling digital zoom. */
IPACameraSensorInfo sensorInfo_;
Rectangle ispCrop_; /* crop in ISP (camera mode) pixels */
Rectangle scalerCrop_; /* crop in sensor native pixels */
Size ispMinCropSize_;
unsigned int dropFrameCount_;
/*
* If set, this stores the value that represets a gain of one for
* the V4L2_CID_NOTIFY_GAINS control.
*/
std::optional<int32_t> notifyGainsUnity_;
/* Have internal buffers been allocated? */
bool buffersAllocated_;
struct Config {
/*
* Override any request from the IPA to drop a number of startup
* frames.
*/
bool disableStartupFrameDrops;
/*
* Override the camera timeout value calculated by the IPA based
* on frame durations.
*/
unsigned int cameraTimeoutValue;
};
Config config_;
protected:
void fillRequestMetadata(const ControlList &bufferControls,
Request *request);
virtual void tryRunPipeline() = 0;
unsigned int ispOutputCount_;
unsigned int ispOutputTotal_;
private:
void checkRequestCompleted();
};
class PipelineHandlerBase : public PipelineHandler
{
public:
PipelineHandlerBase(CameraManager *manager)
: PipelineHandler(manager)
{
}
virtual ~PipelineHandlerBase()
{
}
static bool isRgb(const PixelFormat &pixFmt);
static bool isYuv(const PixelFormat &pixFmt);
static bool isRaw(const PixelFormat &pixFmt);
static bool updateStreamConfig(StreamConfiguration *stream,
const V4L2DeviceFormat &format);
static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
const StreamConfiguration *stream);
static V4L2DeviceFormat toV4L2DeviceFormat(const V4L2VideoDevice *dev,
const V4L2SubdeviceFormat &format,
BayerFormat::Packing packingReq);
std::unique_ptr<CameraConfiguration>
generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, libcamera::Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
void releaseDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
protected:
int registerCamera(std::unique_ptr<RPi::CameraData> &cameraData,
MediaDevice *frontent, const std::string &frontendName,
MediaDevice *backend, MediaEntity *sensorEntity);
void mapBuffers(Camera *camera, const BufferMap &buffers, unsigned int mask);
virtual int platformRegister(std::unique_ptr<CameraData> &cameraData,
MediaDevice *unicam, MediaDevice *isp) = 0;
private:
CameraData *cameraData(Camera *camera)
{
return static_cast<CameraData *>(camera->_d());
}
int queueAllBuffers(Camera *camera);
virtual int prepareBuffers(Camera *camera) = 0;
};
class RPiCameraConfiguration final : public CameraConfiguration
{
public:
RPiCameraConfiguration(const CameraData *data)
: CameraConfiguration(), data_(data)
{
}
CameraConfiguration::Status validateColorSpaces(ColorSpaceFlags flags);
Status validate() override;
/* Cache the combinedTransform_ that will be applied to the sensor */
Transform combinedTransform_;
/* The sensor format computed in validate() */
V4L2SubdeviceFormat sensorFormat_;
struct StreamParams {
StreamParams()
: index(0), cfg(nullptr), dev(nullptr)
{
}
StreamParams(unsigned int index_, StreamConfiguration *cfg_)
: index(index_), cfg(cfg_), dev(nullptr)
{
}
unsigned int index;
StreamConfiguration *cfg;
V4L2VideoDevice *dev;
V4L2DeviceFormat format;
};
std::vector<StreamParams> rawStreams_;
std::vector<StreamParams> outStreams_;
/*
* Store the colour spaces that all our streams will have. RGB format streams
* will have the same colorspace as YUV streams, with YCbCr field cleared and
* range set to full.
*/
std::optional<ColorSpace> yuvColorSpace_;
std::optional<ColorSpace> rgbColorSpace_;
private:
const CameraData *data_;
};
} /* namespace RPi */
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/common/delayed_controls.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper to deal with controls that take effect with a delay
*
* Note: This has been forked from the libcamera core implementation.
*/
#pragma once
#include <stdint.h>
#include <unordered_map>
#include <utility>
#include <libcamera/controls.h>
namespace libcamera {
class V4L2Device;
namespace RPi {
class DelayedControls
{
public:
struct ControlParams {
unsigned int delay;
bool priorityWrite;
};
DelayedControls(V4L2Device *device,
const std::unordered_map<uint32_t, ControlParams> &controlParams);
void reset(unsigned int cookie);
bool push(const ControlList &controls, unsigned int cookie);
std::pair<ControlList, unsigned int> get(uint32_t sequence);
void applyControls(uint32_t sequence);
private:
class Info : public ControlValue
{
public:
Info()
: updated(false)
{
}
Info(const ControlValue &v, bool updated_ = true)
: ControlValue(v), updated(updated_)
{
}
bool updated;
};
static constexpr int listSize = 16;
template<typename T>
class RingBuffer : public std::array<T, listSize>
{
public:
T &operator[](unsigned int index)
{
return std::array<T, listSize>::operator[](index % listSize);
}
const T &operator[](unsigned int index) const
{
return std::array<T, listSize>::operator[](index % listSize);
}
};
V4L2Device *device_;
std::unordered_map<const ControlId *, ControlParams> controlParams_;
unsigned int maxDelay_;
uint32_t queueCount_;
uint32_t writeCount_;
std::unordered_map<const ControlId *, RingBuffer<Info>> values_;
RingBuffer<unsigned int> cookies_;
};
} /* namespace RPi */
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi | repos/libcamera/src/libcamera/pipeline/rpi/vc4/vc4.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019-2023, Raspberry Pi Ltd
*
* Pipeline handler for VC4-based Raspberry Pi devices
*/
#include <linux/bcm2835-isp.h>
#include <linux/v4l2-controls.h>
#include <linux/videodev2.h>
#include <libcamera/formats.h>
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/dma_buf_allocator.h"
#include "../common/pipeline_base.h"
#include "../common/rpi_stream.h"
using namespace std::chrono_literals;
namespace libcamera {
LOG_DECLARE_CATEGORY(RPI)
using StreamFlag = RPi::Stream::StreamFlag;
using StreamParams = RPi::RPiCameraConfiguration::StreamParams;
namespace {
enum class Unicam : unsigned int { Image, Embedded };
enum class Isp : unsigned int { Input, Output0, Output1, Stats };
} /* namespace */
class Vc4CameraData final : public RPi::CameraData
{
public:
Vc4CameraData(PipelineHandler *pipe)
: RPi::CameraData(pipe)
{
}
~Vc4CameraData()
{
freeBuffers();
}
V4L2VideoDevice::Formats ispFormats() const override
{
return isp_[Isp::Output0].dev()->formats();
}
V4L2VideoDevice::Formats rawFormats() const override
{
return unicam_[Unicam::Image].dev()->formats();
}
V4L2VideoDevice *frontendDevice() override
{
return unicam_[Unicam::Image].dev();
}
void platformFreeBuffers() override
{
}
CameraConfiguration::Status platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const override;
int platformPipelineConfigure(const std::unique_ptr<YamlObject> &root) override;
void platformStart() override;
void platformStop() override;
void unicamBufferDequeue(FrameBuffer *buffer);
void ispInputDequeue(FrameBuffer *buffer);
void ispOutputDequeue(FrameBuffer *buffer);
void processStatsComplete(const ipa::RPi::BufferIds &buffers);
void prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool stitchSwapBuffers);
void setIspControls(const ControlList &controls);
void setCameraTimeout(uint32_t maxFrameLengthMs);
/* Array of Unicam and ISP device streams and associated buffers/streams. */
RPi::Device<Unicam, 2> unicam_;
RPi::Device<Isp, 4> isp_;
/* DMAHEAP allocation helper. */
DmaBufAllocator dmaHeap_;
SharedFD lsTable_;
struct Config {
/*
* The minimum number of internal buffers to be allocated for
* the Unicam Image stream.
*/
unsigned int minUnicamBuffers;
/*
* The minimum total (internal + external) buffer count used for
* the Unicam Image stream.
*
* Note that:
* minTotalUnicamBuffers must be >= 1, and
* minTotalUnicamBuffers >= minUnicamBuffers
*/
unsigned int minTotalUnicamBuffers;
};
Config config_;
private:
void platformSetIspCrop() override
{
isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &ispCrop_);
}
int platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig) override;
int platformConfigureIpa(ipa::RPi::ConfigParams ¶ms) override;
int platformInitIpa([[maybe_unused]] ipa::RPi::InitParams ¶ms) override
{
return 0;
}
struct BayerFrame {
FrameBuffer *buffer;
ControlList controls;
unsigned int delayContext;
};
void tryRunPipeline() override;
bool findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer);
std::queue<BayerFrame> bayerQueue_;
std::queue<FrameBuffer *> embeddedQueue_;
};
class PipelineHandlerVc4 : public RPi::PipelineHandlerBase
{
public:
PipelineHandlerVc4(CameraManager *manager)
: RPi::PipelineHandlerBase(manager)
{
}
~PipelineHandlerVc4()
{
}
bool match(DeviceEnumerator *enumerator) override;
private:
Vc4CameraData *cameraData(Camera *camera)
{
return static_cast<Vc4CameraData *>(camera->_d());
}
int prepareBuffers(Camera *camera) override;
int platformRegister(std::unique_ptr<RPi::CameraData> &cameraData,
MediaDevice *unicam, MediaDevice *isp) override;
};
bool PipelineHandlerVc4::match(DeviceEnumerator *enumerator)
{
constexpr unsigned int numUnicamDevices = 2;
/*
* Loop over all Unicam instances, but return out once a match is found.
* This is to ensure we correctly enumrate the camera when an instance
* of Unicam has registered with media controller, but has not registered
* device nodes due to a sensor subdevice failure.
*/
for (unsigned int i = 0; i < numUnicamDevices; i++) {
DeviceMatch unicam("unicam");
MediaDevice *unicamDevice = acquireMediaDevice(enumerator, unicam);
if (!unicamDevice) {
LOG(RPI, Debug) << "Unable to acquire a Unicam instance";
continue;
}
DeviceMatch isp("bcm2835-isp");
MediaDevice *ispDevice = acquireMediaDevice(enumerator, isp);
if (!ispDevice) {
LOG(RPI, Debug) << "Unable to acquire ISP instance";
continue;
}
/*
* The loop below is used to register multiple cameras behind one or more
* video mux devices that are attached to a particular Unicam instance.
* Obviously these cameras cannot be used simultaneously.
*/
unsigned int numCameras = 0;
for (MediaEntity *entity : unicamDevice->entities()) {
if (entity->function() != MEDIA_ENT_F_CAM_SENSOR)
continue;
std::unique_ptr<RPi::CameraData> cameraData = std::make_unique<Vc4CameraData>(this);
int ret = RPi::PipelineHandlerBase::registerCamera(cameraData,
unicamDevice, "unicam-image",
ispDevice, entity);
if (ret)
LOG(RPI, Error) << "Failed to register camera "
<< entity->name() << ": " << ret;
else
numCameras++;
}
if (numCameras)
return true;
}
return false;
}
int PipelineHandlerVc4::prepareBuffers(Camera *camera)
{
Vc4CameraData *data = cameraData(camera);
unsigned int numRawBuffers = 0;
int ret;
for (Stream *s : camera->streams()) {
if (BayerFormat::fromPixelFormat(s->configuration().pixelFormat).isValid()) {
numRawBuffers = s->configuration().bufferCount;
break;
}
}
/* Decide how many internal buffers to allocate. */
for (auto const stream : data->streams_) {
unsigned int numBuffers;
/*
* For Unicam, allocate a minimum number of buffers for internal
* use as we want to avoid any frame drops.
*/
const unsigned int minBuffers = data->config_.minTotalUnicamBuffers;
if (stream == &data->unicam_[Unicam::Image]) {
/*
* If an application has configured a RAW stream, allocate
* additional buffers to make up the minimum, but ensure
* we have at least minUnicamBuffers of internal buffers
* to use to minimise frame drops.
*/
numBuffers = std::max<int>(data->config_.minUnicamBuffers,
minBuffers - numRawBuffers);
} else if (stream == &data->isp_[Isp::Input]) {
/*
* ISP input buffers are imported from Unicam, so follow
* similar logic as above to count all the RAW buffers
* available.
*/
numBuffers = numRawBuffers +
std::max<int>(data->config_.minUnicamBuffers,
minBuffers - numRawBuffers);
} else if (stream == &data->unicam_[Unicam::Embedded]) {
/*
* Embedded data buffers are (currently) for internal use, and
* are small enough (typically 1-2KB) that we can
* allocate them generously to avoid causing problems in the
* IPA when we cannot supply the metadata.
*
* 12 are allocated as a typical application will have 8-10
* input buffers, so allocating more embedded buffers than that
* is a sensible choice.
*
* The lifetimes of these buffers are smaller than those of the
* raw buffers, so allocating a fixed number will still suffice
* if the application requests a greater number of raw
* buffers, as these will be recycled quicker.
*/
numBuffers = 12;
} else {
/*
* Since the ISP runs synchronous with the IPA and requests,
* we only ever need one set of internal buffers. Any buffers
* the application wants to hold onto will already be exported
* through PipelineHandlerRPi::exportFrameBuffers().
*/
numBuffers = 1;
}
LOG(RPI, Debug) << "Preparing " << numBuffers
<< " buffers for stream " << stream->name();
ret = stream->prepareBuffers(numBuffers);
if (ret < 0)
return ret;
}
/*
* Pass the stats and embedded data buffers to the IPA. No other
* buffers need to be passed.
*/
mapBuffers(camera, data->isp_[Isp::Stats].getBuffers(), RPi::MaskStats);
if (data->sensorMetadata_)
mapBuffers(camera, data->unicam_[Unicam::Embedded].getBuffers(),
RPi::MaskEmbeddedData);
return 0;
}
int PipelineHandlerVc4::platformRegister(std::unique_ptr<RPi::CameraData> &cameraData, MediaDevice *unicam, MediaDevice *isp)
{
Vc4CameraData *data = static_cast<Vc4CameraData *>(cameraData.get());
if (!data->dmaHeap_.isValid())
return -ENOMEM;
MediaEntity *unicamImage = unicam->getEntityByName("unicam-image");
MediaEntity *ispOutput0 = isp->getEntityByName("bcm2835-isp0-output0");
MediaEntity *ispCapture1 = isp->getEntityByName("bcm2835-isp0-capture1");
MediaEntity *ispCapture2 = isp->getEntityByName("bcm2835-isp0-capture2");
MediaEntity *ispCapture3 = isp->getEntityByName("bcm2835-isp0-capture3");
if (!unicamImage || !ispOutput0 || !ispCapture1 || !ispCapture2 || !ispCapture3)
return -ENOENT;
/* Locate and open the unicam video streams. */
data->unicam_[Unicam::Image] = RPi::Stream("Unicam Image", unicamImage);
/* An embedded data node will not be present if the sensor does not support it. */
MediaEntity *unicamEmbedded = unicam->getEntityByName("unicam-embedded");
if (unicamEmbedded) {
data->unicam_[Unicam::Embedded] = RPi::Stream("Unicam Embedded", unicamEmbedded);
data->unicam_[Unicam::Embedded].dev()->bufferReady.connect(data,
&Vc4CameraData::unicamBufferDequeue);
}
/* Tag the ISP input stream as an import stream. */
data->isp_[Isp::Input] = RPi::Stream("ISP Input", ispOutput0, StreamFlag::ImportOnly);
data->isp_[Isp::Output0] = RPi::Stream("ISP Output0", ispCapture1);
data->isp_[Isp::Output1] = RPi::Stream("ISP Output1", ispCapture2);
data->isp_[Isp::Stats] = RPi::Stream("ISP Stats", ispCapture3);
/* Wire up all the buffer connections. */
data->unicam_[Unicam::Image].dev()->bufferReady.connect(data, &Vc4CameraData::unicamBufferDequeue);
data->isp_[Isp::Input].dev()->bufferReady.connect(data, &Vc4CameraData::ispInputDequeue);
data->isp_[Isp::Output0].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
data->isp_[Isp::Output1].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
data->isp_[Isp::Stats].dev()->bufferReady.connect(data, &Vc4CameraData::ispOutputDequeue);
if (data->sensorMetadata_ ^ !!data->unicam_[Unicam::Embedded].dev()) {
LOG(RPI, Warning) << "Mismatch between Unicam and CamHelper for embedded data usage!";
data->sensorMetadata_ = false;
if (data->unicam_[Unicam::Embedded].dev())
data->unicam_[Unicam::Embedded].dev()->bufferReady.disconnect();
}
/*
* Open all Unicam and ISP streams. The exception is the embedded data
* stream, which only gets opened below if the IPA reports that the sensor
* supports embedded data.
*
* The below grouping is just for convenience so that we can easily
* iterate over all streams in one go.
*/
data->streams_.push_back(&data->unicam_[Unicam::Image]);
if (data->sensorMetadata_)
data->streams_.push_back(&data->unicam_[Unicam::Embedded]);
for (auto &stream : data->isp_)
data->streams_.push_back(&stream);
for (auto stream : data->streams_) {
int ret = stream->dev()->open();
if (ret)
return ret;
}
if (!data->unicam_[Unicam::Image].dev()->caps().hasMediaController()) {
LOG(RPI, Error) << "Unicam driver does not use the MediaController, please update your kernel!";
return -EINVAL;
}
/* Write up all the IPA connections. */
data->ipa_->processStatsComplete.connect(data, &Vc4CameraData::processStatsComplete);
data->ipa_->prepareIspComplete.connect(data, &Vc4CameraData::prepareIspComplete);
data->ipa_->setIspControls.connect(data, &Vc4CameraData::setIspControls);
data->ipa_->setCameraTimeout.connect(data, &Vc4CameraData::setCameraTimeout);
/*
* List the available streams an application may request. At present, we
* do not advertise Unicam Embedded and ISP Statistics streams, as there
* is no mechanism for the application to request non-image buffer formats.
*/
std::set<Stream *> streams;
streams.insert(&data->unicam_[Unicam::Image]);
streams.insert(&data->isp_[Isp::Output0]);
streams.insert(&data->isp_[Isp::Output1]);
/* Create and register the camera. */
const std::string &id = data->sensor_->id();
std::shared_ptr<Camera> camera =
Camera::create(std::move(cameraData), id, streams);
PipelineHandler::registerCamera(std::move(camera));
LOG(RPI, Info) << "Registered camera " << id
<< " to Unicam device " << unicam->deviceNode()
<< " and ISP device " << isp->deviceNode();
return 0;
}
CameraConfiguration::Status Vc4CameraData::platformValidate(RPi::RPiCameraConfiguration *rpiConfig) const
{
std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
CameraConfiguration::Status status = CameraConfiguration::Status::Valid;
/* Can only output 1 RAW stream, or 2 YUV/RGB streams. */
if (rawStreams.size() > 1 || outStreams.size() > 2) {
LOG(RPI, Error) << "Invalid number of streams requested";
return CameraConfiguration::Status::Invalid;
}
if (!rawStreams.empty()) {
rawStreams[0].dev = unicam_[Unicam::Image].dev();
/* Adjust the RAW stream to match the computed sensor format. */
StreamConfiguration *rawStream = rawStreams[0].cfg;
BayerFormat rawBayer = BayerFormat::fromPixelFormat(rawStream->pixelFormat);
/* Apply the sensor bitdepth. */
rawBayer.bitDepth = BayerFormat::fromMbusCode(rpiConfig->sensorFormat_.code).bitDepth;
/* Default to CSI2 packing if the user request is unsupported. */
if (rawBayer.packing != BayerFormat::Packing::CSI2 &&
rawBayer.packing != BayerFormat::Packing::None)
rawBayer.packing = BayerFormat::Packing::CSI2;
PixelFormat rawFormat = rawBayer.toPixelFormat();
/*
* Try for an unpacked format if a packed one wasn't available.
* This catches 8 (and 16) bit formats which would otherwise
* fail.
*/
if (!rawFormat.isValid() && rawBayer.packing != BayerFormat::Packing::None) {
rawBayer.packing = BayerFormat::Packing::None;
rawFormat = rawBayer.toPixelFormat();
}
if (rawStream->pixelFormat != rawFormat ||
rawStream->size != rpiConfig->sensorFormat_.size) {
rawStream->pixelFormat = rawFormat;
rawStream->size = rpiConfig->sensorFormat_.size;
status = CameraConfiguration::Adjusted;
}
rawStreams[0].format =
RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam_[Unicam::Image].dev(), rawStream);
}
/*
* For the two ISP outputs, one stream must be equal or smaller than the
* other in all dimensions.
*
* Index 0 contains the largest requested resolution.
*/
for (unsigned int i = 0; i < outStreams.size(); i++) {
Size size;
/*
* \todo Should we warn if upscaling, as it reduces the image
* quality and is usually undesired ?
*/
size.width = std::min(outStreams[i].cfg->size.width,
outStreams[0].cfg->size.width);
size.height = std::min(outStreams[i].cfg->size.height,
outStreams[0].cfg->size.height);
if (outStreams[i].cfg->size != size) {
outStreams[i].cfg->size = size;
status = CameraConfiguration::Status::Adjusted;
}
/*
* Output 0 must be for the largest resolution. We will
* have that fixed up in the code above.
*/
outStreams[i].dev = isp_[i == 0 ? Isp::Output0 : Isp::Output1].dev();
outStreams[i].format = RPi::PipelineHandlerBase::toV4L2DeviceFormat(outStreams[i].dev, outStreams[i].cfg);
}
return status;
}
int Vc4CameraData::platformPipelineConfigure(const std::unique_ptr<YamlObject> &root)
{
config_ = {
.minUnicamBuffers = 2,
.minTotalUnicamBuffers = 4,
};
if (!root)
return 0;
std::optional<double> ver = (*root)["version"].get<double>();
if (!ver || *ver != 1.0) {
LOG(RPI, Error) << "Unexpected configuration file version reported";
return -EINVAL;
}
std::optional<std::string> target = (*root)["target"].get<std::string>();
if (!target || *target != "bcm2835") {
LOG(RPI, Error) << "Unexpected target reported: expected \"bcm2835\", got "
<< *target;
return -EINVAL;
}
const YamlObject &phConfig = (*root)["pipeline_handler"];
config_.minUnicamBuffers =
phConfig["min_unicam_buffers"].get<unsigned int>(config_.minUnicamBuffers);
config_.minTotalUnicamBuffers =
phConfig["min_total_unicam_buffers"].get<unsigned int>(config_.minTotalUnicamBuffers);
if (config_.minTotalUnicamBuffers < config_.minUnicamBuffers) {
LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= min_unicam_buffers";
return -EINVAL;
}
if (config_.minTotalUnicamBuffers < 1) {
LOG(RPI, Error) << "Invalid configuration: min_total_unicam_buffers must be >= 1";
return -EINVAL;
}
return 0;
}
int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfig)
{
const std::vector<StreamParams> &rawStreams = rpiConfig->rawStreams_;
const std::vector<StreamParams> &outStreams = rpiConfig->outStreams_;
int ret;
V4L2VideoDevice *unicam = unicam_[Unicam::Image].dev();
V4L2DeviceFormat unicamFormat;
/*
* See which streams are requested, and route the user
* StreamConfiguration appropriately.
*/
if (!rawStreams.empty()) {
rawStreams[0].cfg->setStream(&unicam_[Unicam::Image]);
unicam_[Unicam::Image].setFlags(StreamFlag::External);
unicamFormat = rawStreams[0].format;
} else {
unicamFormat =
RPi::PipelineHandlerBase::toV4L2DeviceFormat(unicam,
rpiConfig->sensorFormat_,
BayerFormat::Packing::CSI2);
}
ret = unicam->setFormat(&unicamFormat);
if (ret)
return ret;
ret = isp_[Isp::Input].dev()->setFormat(&unicamFormat);
if (ret)
return ret;
LOG(RPI, Info) << "Sensor: " << sensor_->id()
<< " - Selected sensor format: " << rpiConfig->sensorFormat_
<< " - Selected unicam format: " << unicamFormat;
/* Use a sensible small default size if no output streams are configured. */
Size maxSize = outStreams.empty() ? Size(320, 240) : outStreams[0].cfg->size;
V4L2DeviceFormat format;
for (unsigned int i = 0; i < outStreams.size(); i++) {
StreamConfiguration *cfg = outStreams[i].cfg;
/* The largest resolution gets routed to the ISP Output 0 node. */
RPi::Stream *stream = i == 0 ? &isp_[Isp::Output0] : &isp_[Isp::Output1];
format = outStreams[i].format;
LOG(RPI, Debug) << "Setting " << stream->name() << " to "
<< format;
ret = stream->dev()->setFormat(&format);
if (ret)
return -EINVAL;
LOG(RPI, Debug)
<< "Stream " << stream->name() << " has color space "
<< ColorSpace::toString(cfg->colorSpace);
cfg->setStream(stream);
stream->setFlags(StreamFlag::External);
}
ispOutputTotal_ = outStreams.size();
/*
* If ISP::Output0 stream has not been configured by the application,
* we must allow the hardware to generate an output so that the data
* flow in the pipeline handler remains consistent, and we still generate
* statistics for the IPA to use. So enable the output at a very low
* resolution for internal use.
*
* \todo Allow the pipeline to work correctly without Output0 and only
* statistics coming from the hardware.
*/
if (outStreams.empty()) {
V4L2VideoDevice *dev = isp_[Isp::Output0].dev();
format = {};
format.size = maxSize;
format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
/* No one asked for output, so the color space doesn't matter. */
format.colorSpace = ColorSpace::Sycc;
ret = dev->setFormat(&format);
if (ret) {
LOG(RPI, Error)
<< "Failed to set default format on ISP Output0: "
<< ret;
return -EINVAL;
}
ispOutputTotal_++;
LOG(RPI, Debug) << "Defaulting ISP Output0 format to "
<< format;
}
/*
* If ISP::Output1 stream has not been requested by the application, we
* set it up for internal use now. This second stream will be used for
* fast colour denoise, and must be a quarter resolution of the ISP::Output0
* stream. However, also limit the maximum size to 1200 pixels in the
* larger dimension, just to avoid being wasteful with buffer allocations
* and memory bandwidth.
*
* \todo If Output 1 format is not YUV420, Output 1 ought to be disabled as
* colour denoise will not run.
*/
if (outStreams.size() <= 1) {
V4L2VideoDevice *dev = isp_[Isp::Output1].dev();
V4L2DeviceFormat output1Format;
constexpr Size maxDimensions(1200, 1200);
const Size limit = maxDimensions.boundedToAspectRatio(format.size);
output1Format.size = (format.size / 2).boundedTo(limit).alignedDownTo(2, 2);
output1Format.colorSpace = format.colorSpace;
output1Format.fourcc = dev->toV4L2PixelFormat(formats::YUV420);
LOG(RPI, Debug) << "Setting ISP Output1 (internal) to "
<< output1Format;
ret = dev->setFormat(&output1Format);
if (ret) {
LOG(RPI, Error) << "Failed to set format on ISP Output1: "
<< ret;
return -EINVAL;
}
ispOutputTotal_++;
}
/* ISP statistics output format. */
format = {};
format.fourcc = V4L2PixelFormat(V4L2_META_FMT_BCM2835_ISP_STATS);
ret = isp_[Isp::Stats].dev()->setFormat(&format);
if (ret) {
LOG(RPI, Error) << "Failed to set format on ISP stats stream: "
<< format;
return ret;
}
ispOutputTotal_++;
/*
* Configure the Unicam embedded data output format only if the sensor
* supports it.
*/
if (sensorMetadata_) {
V4L2SubdeviceFormat embeddedFormat;
sensor_->device()->getFormat(1, &embeddedFormat);
format = {};
format.fourcc = V4L2PixelFormat(V4L2_META_FMT_SENSOR_DATA);
format.planes[0].size = embeddedFormat.size.width * embeddedFormat.size.height;
LOG(RPI, Debug) << "Setting embedded data format " << format.toString();
ret = unicam_[Unicam::Embedded].dev()->setFormat(&format);
if (ret) {
LOG(RPI, Error) << "Failed to set format on Unicam embedded: "
<< format;
return ret;
}
}
/* Figure out the smallest selection the ISP will allow. */
Rectangle testCrop(0, 0, 1, 1);
isp_[Isp::Input].dev()->setSelection(V4L2_SEL_TGT_CROP, &testCrop);
ispMinCropSize_ = testCrop.size();
/* Adjust aspect ratio by providing crops on the input image. */
Size size = unicamFormat.size.boundedToAspectRatio(maxSize);
ispCrop_ = size.centeredTo(Rectangle(unicamFormat.size).center());
platformSetIspCrop();
return 0;
}
int Vc4CameraData::platformConfigureIpa(ipa::RPi::ConfigParams ¶ms)
{
params.ispControls = isp_[Isp::Input].dev()->controls();
/* Allocate the lens shading table via dmaHeap and pass to the IPA. */
if (!lsTable_.isValid()) {
lsTable_ = SharedFD(dmaHeap_.alloc("ls_grid", ipa::RPi::MaxLsGridSize));
if (!lsTable_.isValid())
return -ENOMEM;
/* Allow the IPA to mmap the LS table via the file descriptor. */
/*
* \todo Investigate if mapping the lens shading table buffer
* could be handled with mapBuffers().
*/
params.lsTableHandle = lsTable_;
}
return 0;
}
void Vc4CameraData::platformStart()
{
}
void Vc4CameraData::platformStop()
{
bayerQueue_ = {};
embeddedQueue_ = {};
}
void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer)
{
RPi::Stream *stream = nullptr;
unsigned int index;
if (!isRunning())
return;
for (RPi::Stream &s : unicam_) {
index = s.getBufferId(buffer);
if (index) {
stream = &s;
break;
}
}
/* The buffer must belong to one of our streams. */
ASSERT(stream);
LOG(RPI, Debug) << "Stream " << stream->name() << " buffer dequeue"
<< ", buffer id " << index
<< ", timestamp: " << buffer->metadata().timestamp;
if (stream == &unicam_[Unicam::Image]) {
/*
* Lookup the sensor controls used for this frame sequence from
* DelayedControl and queue them along with the frame buffer.
*/
auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence);
/*
* Add the frame timestamp to the ControlList for the IPA to use
* as it does not receive the FrameBuffer object.
*/
ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp);
bayerQueue_.push({ buffer, std::move(ctrl), delayContext });
} else {
embeddedQueue_.push(buffer);
}
handleState();
}
void Vc4CameraData::ispInputDequeue(FrameBuffer *buffer)
{
if (!isRunning())
return;
LOG(RPI, Debug) << "Stream ISP Input buffer complete"
<< ", buffer id " << unicam_[Unicam::Image].getBufferId(buffer)
<< ", timestamp: " << buffer->metadata().timestamp;
/* The ISP input buffer gets re-queued into Unicam. */
handleStreamBuffer(buffer, &unicam_[Unicam::Image]);
handleState();
}
void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer)
{
RPi::Stream *stream = nullptr;
unsigned int index;
if (!isRunning())
return;
for (RPi::Stream &s : isp_) {
index = s.getBufferId(buffer);
if (index) {
stream = &s;
break;
}
}
/* The buffer must belong to one of our ISP output streams. */
ASSERT(stream);
LOG(RPI, Debug) << "Stream " << stream->name() << " buffer complete"
<< ", buffer id " << index
<< ", timestamp: " << buffer->metadata().timestamp;
/*
* ISP statistics buffer must not be re-queued or sent back to the
* application until after the IPA signals so.
*/
if (stream == &isp_[Isp::Stats]) {
ipa::RPi::ProcessParams params;
params.buffers.stats = index | RPi::MaskStats;
params.ipaContext = requestQueue_.front()->sequence();
ipa_->processStats(params);
} else {
/* Any other ISP output can be handed back to the application now. */
handleStreamBuffer(buffer, stream);
}
/*
* Increment the number of ISP outputs generated.
* This is needed to track dropped frames.
*/
ispOutputCount_++;
handleState();
}
void Vc4CameraData::processStatsComplete(const ipa::RPi::BufferIds &buffers)
{
if (!isRunning())
return;
FrameBuffer *buffer = isp_[Isp::Stats].getBuffers().at(buffers.stats & RPi::MaskID).buffer;
handleStreamBuffer(buffer, &isp_[Isp::Stats]);
state_ = State::IpaComplete;
handleState();
}
void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers,
[[maybe_unused]] bool stitchSwapBuffers)
{
unsigned int embeddedId = buffers.embedded & RPi::MaskID;
unsigned int bayer = buffers.bayer & RPi::MaskID;
FrameBuffer *buffer;
if (!isRunning())
return;
buffer = unicam_[Unicam::Image].getBuffers().at(bayer & RPi::MaskID).buffer;
LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << (bayer & RPi::MaskID)
<< ", timestamp: " << buffer->metadata().timestamp;
isp_[Isp::Input].queueBuffer(buffer);
ispOutputCount_ = 0;
if (sensorMetadata_ && embeddedId) {
buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer;
handleStreamBuffer(buffer, &unicam_[Unicam::Embedded]);
}
handleState();
}
void Vc4CameraData::setIspControls(const ControlList &controls)
{
ControlList ctrls = controls;
if (ctrls.contains(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING)) {
ControlValue &value =
const_cast<ControlValue &>(ctrls.get(V4L2_CID_USER_BCM2835_ISP_LENS_SHADING));
Span<uint8_t> s = value.data();
bcm2835_isp_lens_shading *ls =
reinterpret_cast<bcm2835_isp_lens_shading *>(s.data());
ls->dmabuf = lsTable_.get();
}
isp_[Isp::Input].dev()->setControls(&ctrls);
handleState();
}
void Vc4CameraData::setCameraTimeout(uint32_t maxFrameLengthMs)
{
/*
* Set the dequeue timeout to the larger of 5x the maximum reported
* frame length advertised by the IPA over a number of frames. Allow
* a minimum timeout value of 1s.
*/
utils::Duration timeout =
std::max<utils::Duration>(1s, 5 * maxFrameLengthMs * 1ms);
LOG(RPI, Debug) << "Setting Unicam timeout to " << timeout;
unicam_[Unicam::Image].dev()->setDequeueTimeout(timeout);
}
void Vc4CameraData::tryRunPipeline()
{
FrameBuffer *embeddedBuffer;
BayerFrame bayerFrame;
/* If any of our request or buffer queues are empty, we cannot proceed. */
if (state_ != State::Idle || requestQueue_.empty() ||
bayerQueue_.empty() || (embeddedQueue_.empty() && sensorMetadata_))
return;
if (!findMatchingBuffers(bayerFrame, embeddedBuffer))
return;
/* Take the first request from the queue and action the IPA. */
Request *request = requestQueue_.front();
/* See if a new ScalerCrop value needs to be applied. */
applyScalerCrop(request->controls());
/*
* Clear the request metadata and fill it with some initial non-IPA
* related controls. We clear it first because the request metadata
* may have been populated if we have dropped the previous frame.
*/
request->metadata().clear();
fillRequestMetadata(bayerFrame.controls, request);
/* Set our state to say the pipeline is active. */
state_ = State::Busy;
unsigned int bayer = unicam_[Unicam::Image].getBufferId(bayerFrame.buffer);
LOG(RPI, Debug) << "Signalling prepareIsp:"
<< " Bayer buffer id: " << bayer;
ipa::RPi::PrepareParams params;
params.buffers.bayer = RPi::MaskBayerData | bayer;
params.sensorControls = std::move(bayerFrame.controls);
params.requestControls = request->controls();
params.ipaContext = request->sequence();
params.delayContext = bayerFrame.delayContext;
params.buffers.embedded = 0;
if (embeddedBuffer) {
unsigned int embeddedId = unicam_[Unicam::Embedded].getBufferId(embeddedBuffer);
params.buffers.embedded = RPi::MaskEmbeddedData | embeddedId;
LOG(RPI, Debug) << "Signalling prepareIsp:"
<< " Embedded buffer id: " << embeddedId;
}
ipa_->prepareIsp(params);
}
bool Vc4CameraData::findMatchingBuffers(BayerFrame &bayerFrame, FrameBuffer *&embeddedBuffer)
{
if (bayerQueue_.empty())
return false;
/*
* Find the embedded data buffer with a matching timestamp to pass to
* the IPA. Any embedded buffers with a timestamp lower than the
* current bayer buffer will be removed and re-queued to the driver.
*/
uint64_t ts = bayerQueue_.front().buffer->metadata().timestamp;
embeddedBuffer = nullptr;
while (!embeddedQueue_.empty()) {
FrameBuffer *b = embeddedQueue_.front();
if (b->metadata().timestamp < ts) {
embeddedQueue_.pop();
unicam_[Unicam::Embedded].returnBuffer(b);
LOG(RPI, Debug) << "Dropping unmatched input frame in stream "
<< unicam_[Unicam::Embedded].name();
} else if (b->metadata().timestamp == ts) {
/* Found a match! */
embeddedBuffer = b;
embeddedQueue_.pop();
break;
} else {
break; /* Only higher timestamps from here. */
}
}
if (!embeddedBuffer && sensorMetadata_) {
if (embeddedQueue_.empty()) {
/*
* If the embedded buffer queue is empty, wait for the next
* buffer to arrive - dequeue ordering may send the image
* buffer first.
*/
LOG(RPI, Debug) << "Waiting for next embedded buffer.";
return false;
}
/* Log if there is no matching embedded data buffer found. */
LOG(RPI, Debug) << "Returning bayer frame without a matching embedded buffer.";
}
bayerFrame = std::move(bayerQueue_.front());
bayerQueue_.pop();
return true;
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerVc4, "rpi/vc4")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline/rpi/vc4 | repos/libcamera/src/libcamera/pipeline/rpi/vc4/data/example.yaml | {
"version": 1.0,
"target": "bcm2835",
"pipeline_handler":
{
# The minimum number of internal buffers to be allocated for
# Unicam. This value must be greater than 0, but less than or
# equal to min_total_unicam_buffers.
#
# A larger number of internal buffers can reduce the occurrence
# of frame drops during high CPU loads, but might also cause
# additional latency in the system.
#
# Note that the pipeline handler might override this value and
# not allocate any internal buffers if it knows they will never
# be used. For example if the RAW stream is marked as mandatory
# and there are no dropped frames signalled for algorithm
# convergence.
#
# "min_unicam_buffers": 2,
# The minimum total (internal + external) buffer count used for
# Unicam. The number of internal buffers allocated for Unicam is
# given by:
#
# internal buffer count = max(min_unicam_buffers,
# min_total_unicam_buffers - external buffer count)
#
# "min_total_unicam_buffers": 4,
# Override any request from the IPA to drop a number of startup
# frames.
#
# "disable_startup_frame_drops": false,
# Custom timeout value (in ms) for camera to use. This overrides
# the value computed by the pipeline handler based on frame
# durations.
#
# Set this value to 0 to use the pipeline handler computed
# timeout value.
#
# "camera_timeout_value_ms": 0,
}
}
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/mali-c55/mali-c55.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Pipeline Handler for ARM's Mali-C55 ISP
*/
#include <algorithm>
#include <array>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <linux/media-bus-format.h>
#include <linux/media.h>
#include <libcamera/base/log.h>
#include <libcamera/camera.h>
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
namespace {
bool isFormatRaw(const libcamera::PixelFormat &pixFmt)
{
return libcamera::PixelFormatInfo::info(pixFmt).colourEncoding ==
libcamera::PixelFormatInfo::ColourEncodingRAW;
}
} /* namespace */
namespace libcamera {
LOG_DEFINE_CATEGORY(MaliC55)
const std::map<libcamera::PixelFormat, unsigned int> maliC55FmtToCode = {
/* \todo Support all formats supported by the driver in libcamera. */
{ formats::RGB565, MEDIA_BUS_FMT_RGB121212_1X36 },
{ formats::RGB888, MEDIA_BUS_FMT_RGB121212_1X36 },
{ formats::YUYV, MEDIA_BUS_FMT_YUV10_1X30 },
{ formats::UYVY, MEDIA_BUS_FMT_YUV10_1X30 },
{ formats::R8, MEDIA_BUS_FMT_YUV10_1X30 },
{ formats::NV12, MEDIA_BUS_FMT_YUV10_1X30 },
{ formats::NV21, MEDIA_BUS_FMT_YUV10_1X30 },
/* RAW formats, FR pipe only. */
{ formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
{ formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
{ formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
{ formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
{ formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
{ formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
{ formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
{ formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
{ formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
{ formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
{ formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
{ formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
{ formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
{ formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
{ formats::SGBRG16, MEDIA_BUS_FMT_SGBRG16_1X16 },
{ formats::SRGGB16, MEDIA_BUS_FMT_SRGGB16_1X16 },
{ formats::SBGGR16, MEDIA_BUS_FMT_SBGGR16_1X16 },
{ formats::SGRBG16, MEDIA_BUS_FMT_SGRBG16_1X16 },
};
constexpr Size kMaliC55MinSize = { 128, 128 };
constexpr Size kMaliC55MaxSize = { 8192, 8192 };
constexpr unsigned int kMaliC55ISPInternalFormat = MEDIA_BUS_FMT_RGB121212_1X36;
class MaliC55CameraData : public Camera::Private
{
public:
MaliC55CameraData(PipelineHandler *pipe, MediaEntity *entity)
: Camera::Private(pipe), entity_(entity)
{
}
int init();
/* Deflect these functionalities to either TPG or CameraSensor. */
const std::vector<unsigned int> mbusCodes() const;
const std::vector<Size> sizes(unsigned int mbusCode) const;
const Size resolution() const;
PixelFormat bestRawFormat() const;
PixelFormat adjustRawFormat(const PixelFormat &pixFmt) const;
Size adjustRawSizes(const PixelFormat &pixFmt, const Size &rawSize) const;
std::unique_ptr<CameraSensor> sensor_;
MediaEntity *entity_;
std::unique_ptr<V4L2Subdevice> csi_;
std::unique_ptr<V4L2Subdevice> sd_;
Stream frStream_;
Stream dsStream_;
private:
void initTPGData();
std::string id_;
std::vector<unsigned int> tpgCodes_;
std::vector<Size> tpgSizes_;
Size tpgResolution_;
};
int MaliC55CameraData::init()
{
int ret;
sd_ = std::make_unique<V4L2Subdevice>(entity_);
ret = sd_->open();
if (ret) {
LOG(MaliC55, Error) << "Failed to open sensor subdevice";
return ret;
}
/* If this camera is created from TPG, we return here. */
if (entity_->name() == "mali-c55 tpg") {
initTPGData();
return 0;
}
/*
* Register a CameraSensor if we connect to a sensor and create
* an entity for the connected CSI-2 receiver.
*/
sensor_ = std::make_unique<CameraSensor>(entity_);
ret = sensor_->init();
if (ret)
return ret;
const MediaPad *sourcePad = entity_->getPadByIndex(0);
MediaEntity *csiEntity = sourcePad->links()[0]->sink()->entity();
csi_ = std::make_unique<V4L2Subdevice>(csiEntity);
if (csi_->open()) {
LOG(MaliC55, Error) << "Failed to open CSI-2 subdevice";
return false;
}
return 0;
}
void MaliC55CameraData::initTPGData()
{
/* Replicate the CameraSensor implementation for TPG. */
V4L2Subdevice::Formats formats = sd_->formats(0);
if (formats.empty())
return;
tpgCodes_ = utils::map_keys(formats);
std::sort(tpgCodes_.begin(), tpgCodes_.end());
for (const auto &format : formats) {
const std::vector<SizeRange> &ranges = format.second;
std::transform(ranges.begin(), ranges.end(), std::back_inserter(tpgSizes_),
[](const SizeRange &range) { return range.max; });
}
tpgResolution_ = tpgSizes_.back();
}
const std::vector<unsigned int> MaliC55CameraData::mbusCodes() const
{
if (sensor_)
return sensor_->mbusCodes();
return tpgCodes_;
}
const std::vector<Size> MaliC55CameraData::sizes(unsigned int mbusCode) const
{
if (sensor_)
return sensor_->sizes(mbusCode);
V4L2Subdevice::Formats formats = sd_->formats(0);
if (formats.empty())
return {};
std::vector<Size> sizes;
const auto &format = formats.find(mbusCode);
if (format == formats.end())
return {};
const std::vector<SizeRange> &ranges = format->second;
std::transform(ranges.begin(), ranges.end(), std::back_inserter(sizes),
[](const SizeRange &range) { return range.max; });
std::sort(sizes.begin(), sizes.end());
return sizes;
}
const Size MaliC55CameraData::resolution() const
{
if (sensor_)
return sensor_->resolution();
return tpgResolution_;
}
PixelFormat MaliC55CameraData::bestRawFormat() const
{
unsigned int bitDepth = 0;
PixelFormat rawFormat;
/*
* Iterate over all the supported PixelFormat and find the one
* supported by the camera with the largest bitdepth.
*/
for (const auto &maliFormat : maliC55FmtToCode) {
PixelFormat pixFmt = maliFormat.first;
if (!isFormatRaw(pixFmt))
continue;
unsigned int rawCode = maliFormat.second;
const auto rawSizes = sizes(rawCode);
if (rawSizes.empty())
continue;
BayerFormat bayer = BayerFormat::fromMbusCode(rawCode);
if (bayer.bitDepth > bitDepth) {
bitDepth = bayer.bitDepth;
rawFormat = pixFmt;
}
}
return rawFormat;
}
/*
* Make sure the provided raw pixel format is supported and adjust it to
* one of the supported ones if it's not.
*/
PixelFormat MaliC55CameraData::adjustRawFormat(const PixelFormat &rawFmt) const
{
/* Make sure the provided raw format is supported by the pipeline. */
auto it = maliC55FmtToCode.find(rawFmt);
if (it == maliC55FmtToCode.end())
return bestRawFormat();
/* Now make sure the RAW mbus code is supported by the image source. */
unsigned int rawCode = it->second;
const auto rawSizes = sizes(rawCode);
if (rawSizes.empty())
return bestRawFormat();
return rawFmt;
}
Size MaliC55CameraData::adjustRawSizes(const PixelFormat &rawFmt, const Size &rawSize) const
{
/* Just make sure the format is supported. */
auto it = maliC55FmtToCode.find(rawFmt);
if (it == maliC55FmtToCode.end())
return {};
/* Check if the size is natively supported. */
unsigned int rawCode = it->second;
const auto rawSizes = sizes(rawCode);
auto sizeIt = std::find(rawSizes.begin(), rawSizes.end(), rawSize);
if (sizeIt != rawSizes.end())
return rawSize;
/* Or adjust it to the closest supported size. */
uint16_t distance = std::numeric_limits<uint16_t>::max();
Size bestSize;
for (const Size &size : rawSizes) {
uint16_t dist = std::abs(static_cast<int>(rawSize.width) -
static_cast<int>(size.width)) +
std::abs(static_cast<int>(rawSize.height) -
static_cast<int>(size.height));
if (dist < distance) {
dist = distance;
bestSize = size;
}
}
return bestSize;
}
class MaliC55CameraConfiguration : public CameraConfiguration
{
public:
MaliC55CameraConfiguration(MaliC55CameraData *data)
: CameraConfiguration(), data_(data)
{
}
Status validate() override;
V4L2SubdeviceFormat sensorFormat_;
private:
static constexpr unsigned int kMaxStreams = 2;
const MaliC55CameraData *data_;
};
CameraConfiguration::Status MaliC55CameraConfiguration::validate()
{
Status status = Valid;
if (config_.empty())
return Invalid;
/* Only 2 streams available. */
if (config_.size() > kMaxStreams) {
config_.resize(kMaxStreams);
status = Adjusted;
}
bool frPipeAvailable = true;
StreamConfiguration *rawConfig = nullptr;
for (StreamConfiguration &config : config_) {
if (!isFormatRaw(config.pixelFormat))
continue;
if (rawConfig) {
LOG(MaliC55, Error)
<< "Only a single RAW stream is supported";
return Invalid;
}
rawConfig = &config;
}
Size maxSize = kMaliC55MaxSize;
if (rawConfig) {
/*
* \todo Take into account the Bayer components ordering once
* we support rotations.
*/
PixelFormat rawFormat =
data_->adjustRawFormat(rawConfig->pixelFormat);
if (rawFormat != rawConfig->pixelFormat) {
LOG(MaliC55, Debug)
<< "RAW format adjusted to " << rawFormat;
rawConfig->pixelFormat = rawFormat;
status = Adjusted;
}
Size rawSize =
data_->adjustRawSizes(rawFormat, rawConfig->size);
if (rawSize != rawConfig->size) {
LOG(MaliC55, Debug)
<< "RAW sizes adjusted to " << rawSize;
rawConfig->size = rawSize;
status = Adjusted;
}
maxSize = rawSize;
rawConfig->setStream(const_cast<Stream *>(&data_->frStream_));
frPipeAvailable = false;
}
/* Adjust processed streams. */
Size maxYuvSize;
for (StreamConfiguration &config : config_) {
if (isFormatRaw(config.pixelFormat))
continue;
/* Adjust format and size for processed streams. */
const auto it = maliC55FmtToCode.find(config.pixelFormat);
if (it == maliC55FmtToCode.end()) {
LOG(MaliC55, Debug)
<< "Format adjusted to " << formats::RGB565;
config.pixelFormat = formats::RGB565;
status = Adjusted;
}
Size size = std::clamp(config.size, kMaliC55MinSize, maxSize);
if (size != config.size) {
LOG(MaliC55, Debug)
<< "Size adjusted to " << size;
config.size = size;
status = Adjusted;
}
if (maxYuvSize < size)
maxYuvSize = size;
if (frPipeAvailable) {
config.setStream(const_cast<Stream *>(&data_->frStream_));
frPipeAvailable = false;
} else {
config.setStream(const_cast<Stream *>(&data_->dsStream_));
}
}
/* Compute the sensor format. */
/* If there's a RAW config, sensor configuration follows it. */
if (rawConfig) {
const auto it = maliC55FmtToCode.find(rawConfig->pixelFormat);
sensorFormat_.code = it->second;
sensorFormat_.size = rawConfig->size;
return status;
}
/* If there's no RAW config, compute the sensor configuration here. */
PixelFormat rawFormat = data_->bestRawFormat();
const auto it = maliC55FmtToCode.find(rawFormat);
sensorFormat_.code = it->second;
uint16_t distance = std::numeric_limits<uint16_t>::max();
const auto sizes = data_->sizes(it->second);
Size bestSize;
for (const auto &size : sizes) {
/* Skip sensor sizes that are smaller than the max YUV size. */
if (maxYuvSize.width > size.width ||
maxYuvSize.height > size.height)
continue;
uint16_t dist = std::abs(static_cast<int>(maxYuvSize.width) -
static_cast<int>(size.width)) +
std::abs(static_cast<int>(maxYuvSize.height) -
static_cast<int>(size.height));
if (dist < distance) {
dist = distance;
bestSize = size;
}
}
sensorFormat_.size = bestSize;
LOG(MaliC55, Debug) << "Computed sensor configuration " << sensorFormat_;
return status;
}
class PipelineHandlerMaliC55 : public PipelineHandler
{
public:
PipelineHandlerMaliC55(CameraManager *manager);
std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
void bufferReady(FrameBuffer *buffer);
bool match(DeviceEnumerator *enumerator) override;
private:
struct MaliC55Pipe {
std::unique_ptr<V4L2Subdevice> resizer;
std::unique_ptr<V4L2VideoDevice> cap;
Stream *stream;
};
enum {
MaliC55FR,
MaliC55DS,
MaliC55NumPipes,
};
MaliC55CameraData *cameraData(Camera *camera)
{
return static_cast<MaliC55CameraData *>(camera->_d());
}
MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, Stream *stream)
{
if (stream == &data->frStream_)
return &pipes_[MaliC55FR];
else if (stream == &data->dsStream_)
return &pipes_[MaliC55DS];
else
LOG(MaliC55, Fatal) << "Stream " << stream << " not valid";
return nullptr;
}
MaliC55Pipe *pipeFromStream(MaliC55CameraData *data, const Stream *stream)
{
return pipeFromStream(data, const_cast<Stream *>(stream));
}
void resetPipes()
{
for (MaliC55Pipe &pipe : pipes_)
pipe.stream = nullptr;
}
int configureRawStream(MaliC55CameraData *data,
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat);
int configureProcessedStream(MaliC55CameraData *data,
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat);
void registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
const std::string &name);
bool registerTPGCamera(MediaLink *link);
bool registerSensorCamera(MediaLink *link);
MediaDevice *media_;
std::unique_ptr<V4L2Subdevice> isp_;
std::array<MaliC55Pipe, MaliC55NumPipes> pipes_;
bool dsFitted_;
};
PipelineHandlerMaliC55::PipelineHandlerMaliC55(CameraManager *manager)
: PipelineHandler(manager), dsFitted_(true)
{
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerMaliC55::generateConfiguration(Camera *camera,
Span<const StreamRole> roles)
{
MaliC55CameraData *data = cameraData(camera);
std::unique_ptr<CameraConfiguration> config =
std::make_unique<MaliC55CameraConfiguration>(data);
bool frPipeAvailable = true;
if (roles.empty())
return config;
/* Check if one stream is RAW to reserve the FR pipe for it. */
if (std::find(roles.begin(), roles.end(), StreamRole::Raw) != roles.end())
frPipeAvailable = false;
for (const StreamRole &role : roles) {
struct MaliC55Pipe *pipe;
/* Assign pipe for this role. */
if (role == StreamRole::Raw) {
pipe = &pipes_[MaliC55FR];
} else {
if (frPipeAvailable) {
pipe = &pipes_[MaliC55FR];
frPipeAvailable = false;
} else {
pipe = &pipes_[MaliC55DS];
}
}
Size size = std::min(Size{ 1920, 1080 }, data->resolution());
PixelFormat pixelFormat;
switch (role) {
case StreamRole::StillCapture:
size = data->resolution();
[[fallthrough]];
case StreamRole::VideoRecording:
pixelFormat = formats::NV12;
break;
case StreamRole::Viewfinder:
pixelFormat = formats::RGB565;
break;
case StreamRole::Raw:
pixelFormat = data->bestRawFormat();
if (!pixelFormat.isValid()) {
LOG(MaliC55, Error)
<< "Camera does not support RAW formats";
return nullptr;
}
size = data->resolution();
break;
default:
LOG(MaliC55, Error)
<< "Requested stream role not supported: " << role;
return nullptr;
}
std::map<PixelFormat, std::vector<SizeRange>> formats;
for (const auto &maliFormat : maliC55FmtToCode) {
PixelFormat pixFmt = maliFormat.first;
bool isRaw = isFormatRaw(pixFmt);
/* RAW formats are only supported on the FR pipe. */
if (pipe != &pipes_[MaliC55FR] && isRaw)
continue;
if (isRaw) {
/* Make sure the mbus code is supported. */
unsigned int rawCode = maliFormat.second;
const auto sizes = data->sizes(rawCode);
if (sizes.empty())
continue;
/* And list all sizes the sensor can produce. */
std::vector<SizeRange> sizeRanges;
std::transform(sizes.begin(), sizes.end(),
std::back_inserter(sizeRanges),
[](const Size &s) {
return SizeRange(s);
});
formats[pixFmt] = sizeRanges;
} else {
/* Processed formats are always available. */
Size maxSize = std::min(kMaliC55MaxSize,
data->resolution());
formats[pixFmt] = { kMaliC55MinSize, maxSize };
}
}
StreamFormats streamFormats(formats);
StreamConfiguration cfg(streamFormats);
cfg.pixelFormat = pixelFormat;
cfg.bufferCount = 4;
cfg.size = size;
config->addConfiguration(cfg);
}
if (config->validate() == CameraConfiguration::Invalid)
return nullptr;
return config;
}
int PipelineHandlerMaliC55::configureRawStream(MaliC55CameraData *data,
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat)
{
Stream *stream = config.stream();
MaliC55Pipe *pipe = pipeFromStream(data, stream);
if (pipe != &pipes_[MaliC55FR]) {
LOG(MaliC55, Fatal) << "Only the FR pipe supports RAW capture.";
return -EINVAL;
}
/* Enable the debayer route to set fixed internal format on pad #0. */
V4L2Subdevice::Routing routing = {};
routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
V4L2Subdevice::Stream{ 1, 0 },
V4L2_SUBDEV_ROUTE_FL_ACTIVE);
int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret)
return ret;
unsigned int rawCode = subdevFormat.code;
subdevFormat.code = kMaliC55ISPInternalFormat;
ret = pipe->resizer->setFormat(0, &subdevFormat);
if (ret)
return ret;
/* Enable the bypass route and apply RAW formats there. */
routing.clear();
routing.emplace_back(V4L2Subdevice::Stream{ 2, 0 },
V4L2Subdevice::Stream{ 1, 0 },
V4L2_SUBDEV_ROUTE_FL_ACTIVE);
ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret)
return ret;
subdevFormat.code = rawCode;
ret = pipe->resizer->setFormat(2, &subdevFormat);
if (ret)
return ret;
ret = pipe->resizer->setFormat(1, &subdevFormat);
if (ret)
return ret;
return 0;
}
int PipelineHandlerMaliC55::configureProcessedStream(MaliC55CameraData *data,
const StreamConfiguration &config,
V4L2SubdeviceFormat &subdevFormat)
{
Stream *stream = config.stream();
MaliC55Pipe *pipe = pipeFromStream(data, stream);
/* Enable the debayer route on the resizer pipe. */
V4L2Subdevice::Routing routing = {};
routing.emplace_back(V4L2Subdevice::Stream{ 0, 0 },
V4L2Subdevice::Stream{ 1, 0 },
V4L2_SUBDEV_ROUTE_FL_ACTIVE);
int ret = pipe->resizer->setRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret)
return ret;
subdevFormat.code = kMaliC55ISPInternalFormat;
ret = pipe->resizer->setFormat(0, &subdevFormat);
if (ret)
return ret;
/* \todo Configure the resizer crop/compose rectangles. */
Rectangle ispCrop = { 0, 0, config.size };
ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
if (ret)
return ret;
ret = pipe->resizer->setSelection(0, V4L2_SEL_TGT_COMPOSE, &ispCrop);
if (ret)
return ret;
subdevFormat.code = maliC55FmtToCode.find(config.pixelFormat)->second;
return pipe->resizer->setFormat(1, &subdevFormat);
}
int PipelineHandlerMaliC55::configure(Camera *camera,
CameraConfiguration *config)
{
resetPipes();
int ret = media_->disableLinks();
if (ret)
return ret;
/* Link the graph depending if we are operating the TPG or a sensor. */
MaliC55CameraData *data = cameraData(camera);
if (data->csi_) {
const MediaEntity *csiEntity = data->csi_->entity();
ret = csiEntity->getPadByIndex(1)->links()[0]->setEnabled(true);
} else {
ret = data->entity_->getPadByIndex(0)->links()[0]->setEnabled(true);
}
if (ret)
return ret;
MaliC55CameraConfiguration *maliConfig =
static_cast<MaliC55CameraConfiguration *>(config);
V4L2SubdeviceFormat subdevFormat = maliConfig->sensorFormat_;
ret = data->sd_->getFormat(0, &subdevFormat);
if (ret)
return ret;
if (data->csi_) {
ret = data->csi_->setFormat(0, &subdevFormat);
if (ret)
return ret;
ret = data->csi_->setFormat(1, &subdevFormat);
if (ret)
return ret;
}
/*
* Propagate the format to the ISP sink pad and configure the input
* crop rectangle (no crop at the moment).
*
* \todo Configure the CSI-2 receiver.
*/
ret = isp_->setFormat(0, &subdevFormat);
if (ret)
return ret;
Rectangle ispCrop(0, 0, subdevFormat.size);
ret = isp_->setSelection(0, V4L2_SEL_TGT_CROP, &ispCrop);
if (ret)
return ret;
/*
* Configure the resizer: fixed format the sink pad; use the media
* bus code associated with the desired capture format on the source
* pad.
*
* Configure the crop and compose rectangles to match the desired
* stream output size
*
* \todo Make the crop/scaler configurable
*/
for (const StreamConfiguration &streamConfig : *config) {
Stream *stream = streamConfig.stream();
MaliC55Pipe *pipe = pipeFromStream(data, stream);
if (isFormatRaw(streamConfig.pixelFormat))
ret = configureRawStream(data, streamConfig, subdevFormat);
else
ret = configureProcessedStream(data, streamConfig, subdevFormat);
if (ret) {
LOG(MaliC55, Error) << "Failed to configure pipeline";
return ret;
}
/* Now apply the pixel format and size to the capture device. */
V4L2DeviceFormat captureFormat;
captureFormat.fourcc = pipe->cap->toV4L2PixelFormat(streamConfig.pixelFormat);
captureFormat.size = streamConfig.size;
ret = pipe->cap->setFormat(&captureFormat);
if (ret)
return ret;
pipe->stream = stream;
}
return 0;
}
int PipelineHandlerMaliC55::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
unsigned int count = stream->configuration().bufferCount;
return pipe->cap->exportBuffers(count, buffers);
}
int PipelineHandlerMaliC55::start([[maybe_unused]] Camera *camera, [[maybe_unused]] const ControlList *controls)
{
for (MaliC55Pipe &pipe : pipes_) {
if (!pipe.stream)
continue;
Stream *stream = pipe.stream;
int ret = pipe.cap->importBuffers(stream->configuration().bufferCount);
if (ret) {
LOG(MaliC55, Error) << "Failed to import buffers";
return ret;
}
ret = pipe.cap->streamOn();
if (ret) {
LOG(MaliC55, Error) << "Failed to start stream";
return ret;
}
}
return 0;
}
void PipelineHandlerMaliC55::stopDevice([[maybe_unused]] Camera *camera)
{
for (MaliC55Pipe &pipe : pipes_) {
if (!pipe.stream)
continue;
pipe.cap->streamOff();
pipe.cap->releaseBuffers();
}
}
int PipelineHandlerMaliC55::queueRequestDevice(Camera *camera, Request *request)
{
int ret;
for (auto &[stream, buffer] : request->buffers()) {
MaliC55Pipe *pipe = pipeFromStream(cameraData(camera), stream);
ret = pipe->cap->queueBuffer(buffer);
if (ret)
return ret;
}
return 0;
}
void PipelineHandlerMaliC55::bufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
completeBuffer(request, buffer);
if (request->hasPendingBuffers())
return;
completeRequest(request);
}
void PipelineHandlerMaliC55::registerMaliCamera(std::unique_ptr<MaliC55CameraData> data,
const std::string &name)
{
std::set<Stream *> streams{ &data->frStream_ };
if (dsFitted_)
streams.insert(&data->dsStream_);
std::shared_ptr<Camera> camera = Camera::create(std::move(data),
name, streams);
registerCamera(std::move(camera));
}
/*
* The only camera we support through direct connection to the ISP is the
* Mali-C55 TPG. Check we have that and warn if not.
*/
bool PipelineHandlerMaliC55::registerTPGCamera(MediaLink *link)
{
const std::string &name = link->source()->entity()->name();
if (name != "mali-c55 tpg") {
LOG(MaliC55, Warning) << "Unsupported direct connection to "
<< link->source()->entity()->name();
/*
* Return true and just skip registering a camera for this
* entity.
*/
return true;
}
std::unique_ptr<MaliC55CameraData> data =
std::make_unique<MaliC55CameraData>(this, link->source()->entity());
if (data->init())
return false;
registerMaliCamera(std::move(data), name);
return true;
}
/*
* Register a Camera for each sensor connected to the ISP through a CSI-2
* receiver.
*
* \todo Support more complex topologies, such as video muxes.
*/
bool PipelineHandlerMaliC55::registerSensorCamera(MediaLink *ispLink)
{
MediaEntity *csi2 = ispLink->source()->entity();
const MediaPad *csi2Sink = csi2->getPadByIndex(0);
for (MediaLink *link : csi2Sink->links()) {
MediaEntity *sensor = link->source()->entity();
unsigned int function = sensor->function();
if (function != MEDIA_ENT_F_CAM_SENSOR)
continue;
std::unique_ptr<MaliC55CameraData> data =
std::make_unique<MaliC55CameraData>(this, sensor);
if (data->init())
return false;
/* \todo: Init properties and controls. */
registerMaliCamera(std::move(data), sensor->name());
}
return true;
}
bool PipelineHandlerMaliC55::match(DeviceEnumerator *enumerator)
{
const MediaPad *ispSink;
/*
* We search for just the ISP subdevice and the full resolution pipe.
* The TPG and the downscale pipe are both optional blocks and may not
* be fitted.
*/
DeviceMatch dm("mali-c55");
dm.add("mali-c55 isp");
dm.add("mali-c55 resizer fr");
dm.add("mali-c55 fr");
media_ = acquireMediaDevice(enumerator, dm);
if (!media_)
return false;
isp_ = V4L2Subdevice::fromEntityName(media_, "mali-c55 isp");
if (isp_->open() < 0)
return false;
MaliC55Pipe *frPipe = &pipes_[MaliC55FR];
frPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer fr");
if (frPipe->resizer->open() < 0)
return false;
frPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 fr");
if (frPipe->cap->open() < 0)
return false;
frPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
dsFitted_ = !!media_->getEntityByName("mali-c55 ds");
if (dsFitted_) {
LOG(MaliC55, Debug) << "Downscaler pipe is fitted";
MaliC55Pipe *dsPipe = &pipes_[MaliC55DS];
dsPipe->resizer = V4L2Subdevice::fromEntityName(media_, "mali-c55 resizer ds");
if (dsPipe->resizer->open() < 0)
return false;
dsPipe->cap = V4L2VideoDevice::fromEntityName(media_, "mali-c55 ds");
if (dsPipe->cap->open() < 0)
return false;
dsPipe->cap->bufferReady.connect(this, &PipelineHandlerMaliC55::bufferReady);
}
ispSink = isp_->entity()->getPadByIndex(0);
if (!ispSink || ispSink->links().empty()) {
LOG(MaliC55, Error) << "ISP sink pad error";
return false;
}
/*
* We could have several links pointing to the ISP's sink pad, which
* will be from entities with one of the following functions:
*
* MEDIA_ENT_F_CAM_SENSOR - The test pattern generator
* MEDIA_ENT_F_VID_IF_BRIDGE - A CSI-2 receiver
* MEDIA_ENT_F_IO_V4L - An input device
*
* The last one will be unsupported for now. The TPG is relatively easy,
* we just register a Camera for it. If we have a CSI-2 receiver we need
* to check its sink pad and register Cameras for anything connected to
* it (probably...there are some complex situations in which that might
* not be true but let's pretend they don't exist until we come across
* them)
*/
bool registered;
for (MediaLink *link : ispSink->links()) {
unsigned int function = link->source()->entity()->function();
switch (function) {
case MEDIA_ENT_F_CAM_SENSOR:
registered = registerTPGCamera(link);
if (!registered)
return registered;
break;
case MEDIA_ENT_F_VID_IF_BRIDGE:
registered = registerSensorCamera(link);
if (!registered)
return registered;
break;
case MEDIA_ENT_F_IO_V4L:
LOG(MaliC55, Warning) << "Memory input not yet supported";
break;
default:
LOG(MaliC55, Error) << "Unsupported entity function";
return false;
}
}
return true;
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerMaliC55, "mali-c55")
} /* namespace libcamera */
|
0 | repos/libcamera/src/libcamera/pipeline | repos/libcamera/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2022 - Jacopo Mondi <[email protected]>
*
* Pipeline handler for ISI interface found on NXP i.MX8 SoC
*/
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/utils.h>
#include <libcamera/camera_manager.h>
#include <libcamera/formats.h>
#include <libcamera/geometry.h>
#include <libcamera/stream.h>
#include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/device_enumerator.h"
#include "libcamera/internal/media_device.h"
#include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/v4l2_subdevice.h"
#include "libcamera/internal/v4l2_videodevice.h"
#include "linux/media-bus-format.h"
namespace libcamera {
LOG_DEFINE_CATEGORY(ISI)
class PipelineHandlerISI;
class ISICameraData : public Camera::Private
{
public:
ISICameraData(PipelineHandler *ph)
: Camera::Private(ph)
{
/*
* \todo Assume 2 channels only for now, as that's the number of
* available channels on i.MX8MP.
*/
streams_.resize(2);
}
PipelineHandlerISI *pipe();
int init();
unsigned int pipeIndex(const Stream *stream)
{
return stream - &*streams_.begin();
}
unsigned int getRawMediaBusFormat(PixelFormat *pixelFormat) const;
unsigned int getYuvMediaBusFormat(const PixelFormat &pixelFormat) const;
unsigned int getMediaBusFormat(PixelFormat *pixelFormat) const;
std::unique_ptr<CameraSensor> sensor_;
std::unique_ptr<V4L2Subdevice> csis_;
std::vector<Stream> streams_;
std::vector<Stream *> enabledStreams_;
unsigned int xbarSink_;
};
class ISICameraConfiguration : public CameraConfiguration
{
public:
ISICameraConfiguration(ISICameraData *data)
: data_(data)
{
}
Status validate() override;
static const std::map<PixelFormat, unsigned int> formatsMap_;
V4L2SubdeviceFormat sensorFormat_;
private:
CameraConfiguration::Status
validateRaw(std::set<Stream *> &availableStreams, const Size &maxResolution);
CameraConfiguration::Status
validateYuv(std::set<Stream *> &availableStreams, const Size &maxResolution);
const ISICameraData *data_;
};
class PipelineHandlerISI : public PipelineHandler
{
public:
PipelineHandlerISI(CameraManager *manager);
bool match(DeviceEnumerator *enumerator) override;
std::unique_ptr<CameraConfiguration>
generateConfiguration(Camera *camera, Span<const StreamRole> roles) override;
int configure(Camera *camera, CameraConfiguration *config) override;
int exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override;
protected:
void stopDevice(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override;
private:
static constexpr Size kPreviewSize = { 1920, 1080 };
static constexpr Size kMinISISize = { 1, 1 };
struct Pipe {
std::unique_ptr<V4L2Subdevice> isi;
std::unique_ptr<V4L2VideoDevice> capture;
};
ISICameraData *cameraData(Camera *camera)
{
return static_cast<ISICameraData *>(camera->_d());
}
Pipe *pipeFromStream(Camera *camera, const Stream *stream);
StreamConfiguration generateYUVConfiguration(Camera *camera,
const Size &size);
StreamConfiguration generateRawConfiguration(Camera *camera);
void bufferReady(FrameBuffer *buffer);
MediaDevice *isiDev_;
std::unique_ptr<V4L2Subdevice> crossbar_;
std::vector<Pipe> pipes_;
};
/* -----------------------------------------------------------------------------
* Camera Data
*/
PipelineHandlerISI *ISICameraData::pipe()
{
return static_cast<PipelineHandlerISI *>(Camera::Private::pipe());
}
/* Open and initialize pipe components. */
int ISICameraData::init()
{
int ret = sensor_->init();
if (ret)
return ret;
ret = csis_->open();
if (ret)
return ret;
properties_ = sensor_->properties();
return 0;
}
/*
* Get a RAW Bayer media bus format compatible with the requested pixelFormat.
*
* If the requested pixelFormat cannot be produced by the sensor adjust it to
* the one corresponding to the media bus format with the largest bit-depth.
*/
unsigned int ISICameraData::getRawMediaBusFormat(PixelFormat *pixelFormat) const
{
std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
static const std::map<PixelFormat, unsigned int> rawFormats = {
{ formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
{ formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
{ formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
{ formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
{ formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
{ formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
{ formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
{ formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
{ formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
{ formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
{ formats::SBGGR14, MEDIA_BUS_FMT_SBGGR14_1X14 },
{ formats::SGBRG14, MEDIA_BUS_FMT_SGBRG14_1X14 },
{ formats::SGRBG14, MEDIA_BUS_FMT_SGRBG14_1X14 },
{ formats::SRGGB14, MEDIA_BUS_FMT_SRGGB14_1X14 },
};
/*
* Make sure the requested PixelFormat is supported in the above
* map and the sensor can produce the compatible mbus code.
*/
auto it = rawFormats.find(*pixelFormat);
if (it != rawFormats.end() &&
std::count(mbusCodes.begin(), mbusCodes.end(), it->second))
return it->second;
if (it == rawFormats.end())
LOG(ISI, Warning) << pixelFormat
<< " not supported in ISI formats map.";
/*
* The desired pixel format cannot be produced. Adjust it to the one
* corresponding to the raw media bus format with the largest bit-depth
* the sensor provides.
*/
unsigned int sensorCode = 0;
unsigned int maxDepth = 0;
*pixelFormat = {};
for (unsigned int code : mbusCodes) {
/* Make sure the media bus format is RAW Bayer. */
const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
if (!bayerFormat.isValid())
continue;
/* Make sure the media format is supported. */
it = std::find_if(rawFormats.begin(), rawFormats.end(),
[code](auto &rawFormat) {
return rawFormat.second == code;
});
if (it == rawFormats.end()) {
LOG(ISI, Warning) << bayerFormat
<< " not supported in ISI formats map.";
continue;
}
/* Pick the one with the largest bit depth. */
if (bayerFormat.bitDepth > maxDepth) {
maxDepth = bayerFormat.bitDepth;
*pixelFormat = it->first;
sensorCode = code;
}
}
if (!pixelFormat->isValid())
LOG(ISI, Error) << "Cannot find a supported RAW format";
return sensorCode;
}
/*
* Get a YUV/RGB media bus format from which the ISI can produce a processed
* stream, preferring codes with the same colour encoding as the requested
* pixelformat.
*
* If the sensor does not provide any YUV/RGB media bus format the ISI cannot
* generate any processed pixel format as it cannot debayer.
*/
unsigned int ISICameraData::getYuvMediaBusFormat(const PixelFormat &pixelFormat) const
{
std::vector<unsigned int> mbusCodes = sensor_->mbusCodes();
/*
* The ISI can produce YUV/RGB pixel formats from any non-RAW Bayer
* media bus formats.
*
* Keep the list in sync with the mxc_isi_bus_formats[] array in
* the ISI driver.
*/
std::vector<unsigned int> yuvCodes = {
MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_RGB565_1X16,
MEDIA_BUS_FMT_RGB888_1X24,
};
std::sort(mbusCodes.begin(), mbusCodes.end());
std::sort(yuvCodes.begin(), yuvCodes.end());
std::vector<unsigned int> supportedCodes;
std::set_intersection(mbusCodes.begin(), mbusCodes.end(),
yuvCodes.begin(), yuvCodes.end(),
std::back_inserter(supportedCodes));
if (supportedCodes.empty()) {
LOG(ISI, Warning) << "Cannot find a supported YUV/RGB format";
return 0;
}
/* Prefer codes with the same encoding as the requested pixel format. */
const PixelFormatInfo &info = PixelFormatInfo::info(pixelFormat);
for (unsigned int code : supportedCodes) {
if (info.colourEncoding == PixelFormatInfo::ColourEncodingYUV &&
(code == MEDIA_BUS_FMT_UYVY8_1X16 ||
code == MEDIA_BUS_FMT_YUV8_1X24))
return code;
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRGB &&
(code == MEDIA_BUS_FMT_RGB565_1X16 ||
code == MEDIA_BUS_FMT_RGB888_1X24))
return code;
}
/* Otherwise return the first found code. */
return supportedCodes[0];
}
unsigned int ISICameraData::getMediaBusFormat(PixelFormat *pixelFormat) const
{
if (PixelFormatInfo::info(*pixelFormat).colourEncoding ==
PixelFormatInfo::ColourEncodingRAW)
return getRawMediaBusFormat(pixelFormat);
return getYuvMediaBusFormat(*pixelFormat);
}
/* -----------------------------------------------------------------------------
* Camera Configuration
*/
/*
* ISICameraConfiguration::formatsMap_ records the association between an output
* pixel format and the ISI source pixel format to be applied to the pipeline.
*/
const std::map<PixelFormat, unsigned int> ISICameraConfiguration::formatsMap_ = {
{ formats::YUYV, MEDIA_BUS_FMT_YUV8_1X24 },
{ formats::AVUY8888, MEDIA_BUS_FMT_YUV8_1X24 },
{ formats::NV12, MEDIA_BUS_FMT_YUV8_1X24 },
{ formats::NV16, MEDIA_BUS_FMT_YUV8_1X24 },
{ formats::YUV444, MEDIA_BUS_FMT_YUV8_1X24 },
{ formats::RGB565, MEDIA_BUS_FMT_RGB888_1X24 },
{ formats::BGR888, MEDIA_BUS_FMT_RGB888_1X24 },
{ formats::RGB888, MEDIA_BUS_FMT_RGB888_1X24 },
{ formats::XRGB8888, MEDIA_BUS_FMT_RGB888_1X24 },
{ formats::ABGR8888, MEDIA_BUS_FMT_RGB888_1X24 },
{ formats::SBGGR8, MEDIA_BUS_FMT_SBGGR8_1X8 },
{ formats::SGBRG8, MEDIA_BUS_FMT_SGBRG8_1X8 },
{ formats::SGRBG8, MEDIA_BUS_FMT_SGRBG8_1X8 },
{ formats::SRGGB8, MEDIA_BUS_FMT_SRGGB8_1X8 },
{ formats::SBGGR10, MEDIA_BUS_FMT_SBGGR10_1X10 },
{ formats::SGBRG10, MEDIA_BUS_FMT_SGBRG10_1X10 },
{ formats::SGRBG10, MEDIA_BUS_FMT_SGRBG10_1X10 },
{ formats::SRGGB10, MEDIA_BUS_FMT_SRGGB10_1X10 },
{ formats::SBGGR12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ formats::SGBRG12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ formats::SGRBG12, MEDIA_BUS_FMT_SGRBG12_1X12 },
{ formats::SRGGB12, MEDIA_BUS_FMT_SRGGB12_1X12 },
};
/*
* Adjust stream configuration when the first requested stream is RAW: all the
* streams will have the same RAW pixelformat and size.
*/
CameraConfiguration::Status
ISICameraConfiguration::validateRaw(std::set<Stream *> &availableStreams,
const Size &maxResolution)
{
CameraConfiguration::Status status = Valid;
/*
* Make sure the requested RAW format is supported by the
* pipeline, otherwise adjust it.
*/
std::vector<unsigned int> mbusCodes = data_->sensor_->mbusCodes();
StreamConfiguration &rawConfig = config_[0];
PixelFormat rawFormat = rawConfig.pixelFormat;
unsigned int sensorCode = data_->getRawMediaBusFormat(&rawFormat);
if (!sensorCode) {
LOG(ISI, Error) << "Cannot adjust RAW pixelformat "
<< rawConfig.pixelFormat;
return Invalid;
}
if (rawFormat != rawConfig.pixelFormat) {
LOG(ISI, Debug) << "RAW pixelformat adjusted to "
<< rawFormat;
rawConfig.pixelFormat = rawFormat;
status = Adjusted;
}
/* Cap the RAW stream size to the maximum resolution. */
const Size configSize = rawConfig.size;
rawConfig.size.boundTo(maxResolution);
if (rawConfig.size != configSize) {
LOG(ISI, Debug) << "RAW size adjusted to "
<< rawConfig.size;
status = Adjusted;
}
/* Adjust all other streams to RAW. */
for (const auto &[i, cfg] : utils::enumerate(config_)) {
LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
const PixelFormat pixFmt = cfg.pixelFormat;
const Size size = cfg.size;
cfg.pixelFormat = rawConfig.pixelFormat;
cfg.size = rawConfig.size;
if (cfg.pixelFormat != pixFmt || cfg.size != size) {
LOG(ISI, Debug) << "Stream " << i << " adjusted to "
<< cfg.toString();
status = Adjusted;
}
const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
cfg.stride = info.stride(cfg.size.width, 0);
cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
/* Assign streams in the order they are presented. */
auto stream = availableStreams.extract(availableStreams.begin());
cfg.setStream(stream.value());
}
return status;
}
/*
* Adjust stream configuration when the first requested stream is not RAW: all
* the streams will be either YUV or RGB processed formats.
*/
CameraConfiguration::Status
ISICameraConfiguration::validateYuv(std::set<Stream *> &availableStreams,
const Size &maxResolution)
{
CameraConfiguration::Status status = Valid;
StreamConfiguration &yuvConfig = config_[0];
PixelFormat yuvPixelFormat = yuvConfig.pixelFormat;
/*
* Make sure the sensor can produce a compatible YUV/RGB media bus
* format. If the sensor can only produce RAW Bayer we can only fail
* here as we can't adjust to anything but RAW.
*/
unsigned int yuvMediaBusCode = data_->getYuvMediaBusFormat(yuvPixelFormat);
if (!yuvMediaBusCode) {
LOG(ISI, Error) << "Cannot adjust pixelformat "
<< yuvConfig.pixelFormat;
return Invalid;
}
/* Adjust all the other streams. */
for (const auto &[i, cfg] : utils::enumerate(config_)) {
LOG(ISI, Debug) << "Stream " << i << ": " << cfg.toString();
/* If the stream is RAW or not supported default it to YUYV. */
const PixelFormatInfo &cfgInfo = PixelFormatInfo::info(cfg.pixelFormat);
if (cfgInfo.colourEncoding == PixelFormatInfo::ColourEncodingRAW ||
!formatsMap_.count(cfg.pixelFormat)) {
LOG(ISI, Debug) << "Stream " << i << " format: "
<< cfg.pixelFormat << " adjusted to YUYV";
cfg.pixelFormat = formats::YUYV;
status = Adjusted;
}
/* Cap the streams size to the maximum accepted resolution. */
Size configSize = cfg.size;
cfg.size.boundTo(maxResolution);
if (cfg.size != configSize) {
LOG(ISI, Debug)
<< "Stream " << i << " adjusted to " << cfg.size;
status = Adjusted;
}
/* Re-fetch the pixel format info in case it has been adjusted. */
const PixelFormatInfo &info = PixelFormatInfo::info(cfg.pixelFormat);
/* \todo Multiplane ? */
cfg.stride = info.stride(cfg.size.width, 0);
cfg.frameSize = info.frameSize(cfg.size, info.bitsPerPixel);
/* Assign streams in the order they are presented. */
auto stream = availableStreams.extract(availableStreams.begin());
cfg.setStream(stream.value());
}
return status;
}
CameraConfiguration::Status ISICameraConfiguration::validate()
{
Status status = Valid;
std::set<Stream *> availableStreams;
std::transform(data_->streams_.begin(), data_->streams_.end(),
std::inserter(availableStreams, availableStreams.end()),
[](const Stream &s) { return const_cast<Stream *>(&s); });
if (config_.empty())
return Invalid;
/* Cap the number of streams to the number of available ISI pipes. */
if (config_.size() > availableStreams.size()) {
config_.resize(availableStreams.size());
status = Adjusted;
}
/*
* If more than a single stream is requested, the maximum allowed input
* image width is 2048. Cap the maximum image size accordingly.
*
* \todo The (size > 1) check only applies to i.MX8MP which has 2 ISI
* channels. SoCs with more channels than the i.MX8MP are capable of
* supporting more streams with input width > 2048 by chaining
* successive channels together. Define a policy for channels allocation
* to fully support other SoCs.
*/
CameraSensor *sensor = data_->sensor_.get();
Size maxResolution = sensor->resolution();
if (config_.size() > 1)
maxResolution.width = std::min(2048U, maxResolution.width);
/* Validate streams according to the format of the first one. */
const PixelFormatInfo info = PixelFormatInfo::info(config_[0].pixelFormat);
Status validationStatus;
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
validationStatus = validateRaw(availableStreams, maxResolution);
else
validationStatus = validateYuv(availableStreams, maxResolution);
if (validationStatus == Invalid)
return Invalid;
if (validationStatus == Adjusted)
status = Adjusted;
/*
* Sensor format selection policy: the first stream selects the media
* bus code to use, the largest stream selects the size.
*
* \todo The sensor format selection policy could be changed to
* prefer operating the sensor at full resolution to prioritize
* image quality in exchange of a usually slower frame rate.
* Usage of the STILL_CAPTURE role could be consider for this.
*/
Size maxSize;
for (const auto &cfg : config_) {
if (cfg.size > maxSize)
maxSize = cfg.size;
}
PixelFormat pixelFormat = config_[0].pixelFormat;
V4L2SubdeviceFormat sensorFormat{};
sensorFormat.code = data_->getMediaBusFormat(&pixelFormat);
sensorFormat.size = maxSize;
LOG(ISI, Debug) << "Computed sensor configuration: " << sensorFormat;
/*
* We can't use CameraSensor::getFormat() as it might return a
* format larger than our strict width limit, as that function
* prioritizes formats with the same aspect ratio over formats with less
* difference in size.
*
* Manually walk all the sensor supported sizes searching for
* the smallest larger format without considering the aspect ratio
* as the ISI can freely scale.
*/
auto sizes = sensor->sizes(sensorFormat.code);
Size bestSize;
for (const Size &s : sizes) {
/* Ignore smaller sizes. */
if (s.width < sensorFormat.size.width ||
s.height < sensorFormat.size.height)
continue;
/* Make sure the width stays in the limits. */
if (s.width > maxResolution.width)
continue;
bestSize = s;
break;
}
/*
* This should happen only if the sensor can only produce formats that
* exceed the maximum allowed input width.
*/
if (bestSize.isNull()) {
LOG(ISI, Error) << "Unable to find a suitable sensor format";
return Invalid;
}
sensorFormat_.code = sensorFormat.code;
sensorFormat_.size = bestSize;
LOG(ISI, Debug) << "Selected sensor format: " << sensorFormat_;
return status;
}
/* -----------------------------------------------------------------------------
* Pipeline Handler
*/
PipelineHandlerISI::PipelineHandlerISI(CameraManager *manager)
: PipelineHandler(manager)
{
}
/*
* Generate a StreamConfiguration for YUV/RGB use case.
*
* Verify it the sensor can produce a YUV/RGB media bus format and collect
* all the processed pixel formats the ISI can generate as supported stream
* configurations.
*/
StreamConfiguration PipelineHandlerISI::generateYUVConfiguration(Camera *camera,
const Size &size)
{
ISICameraData *data = cameraData(camera);
PixelFormat pixelFormat = formats::YUYV;
unsigned int mbusCode;
mbusCode = data->getYuvMediaBusFormat(pixelFormat);
if (!mbusCode)
return {};
/* Adjust the requested size to the sensor's capabilities. */
V4L2SubdeviceFormat sensorFmt;
sensorFmt.code = mbusCode;
sensorFmt.size = size;
int ret = data->sensor_->tryFormat(&sensorFmt);
if (ret) {
LOG(ISI, Error) << "Failed to try sensor format.";
return {};
}
Size sensorSize = sensorFmt.size;
/*
* Populate the StreamConfiguration.
*
* As the sensor supports at least one YUV/RGB media bus format all the
* processed ones in formatsMap_ can be generated from it.
*/
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
for (const auto &[pixFmt, pipeFmt] : ISICameraConfiguration::formatsMap_) {
const PixelFormatInfo &info = PixelFormatInfo::info(pixFmt);
if (info.colourEncoding == PixelFormatInfo::ColourEncodingRAW)
continue;
streamFormats[pixFmt] = { { kMinISISize, sensorSize } };
}
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
cfg.pixelFormat = pixelFormat;
cfg.size = sensorSize;
cfg.bufferCount = 4;
return cfg;
}
/*
* Generate a StreamConfiguration for Raw Bayer use case. Verify if the sensor
* can produce the requested RAW bayer format and eventually adjust it to
* the one with the largest bit-depth the sensor can produce.
*/
StreamConfiguration PipelineHandlerISI::generateRawConfiguration(Camera *camera)
{
static const std::map<unsigned int, PixelFormat> rawFormats = {
{ MEDIA_BUS_FMT_SBGGR8_1X8, formats::SBGGR8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, formats::SGBRG8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, formats::SGRBG8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, formats::SRGGB8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, formats::SBGGR10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, formats::SGBRG10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, formats::SGRBG10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, formats::SRGGB10 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, formats::SBGGR12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, formats::SGBRG12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, formats::SGRBG12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, formats::SRGGB12 },
{ MEDIA_BUS_FMT_SBGGR14_1X14, formats::SBGGR14 },
{ MEDIA_BUS_FMT_SGBRG14_1X14, formats::SGBRG14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, formats::SGRBG14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, formats::SRGGB14 },
};
ISICameraData *data = cameraData(camera);
PixelFormat pixelFormat = formats::SBGGR10;
unsigned int mbusCode;
/* pixelFormat will be adjusted, if the sensor can produce RAW. */
mbusCode = data->getRawMediaBusFormat(&pixelFormat);
if (!mbusCode)
return {};
/*
* Populate the StreamConfiguration with all the supported Bayer
* formats the sensor can produce.
*/
std::map<PixelFormat, std::vector<SizeRange>> streamFormats;
const CameraSensor *sensor = data->sensor_.get();
for (unsigned int code : sensor->mbusCodes()) {
/* Find a Bayer media bus code from the sensor. */
const BayerFormat &bayerFormat = BayerFormat::fromMbusCode(code);
if (!bayerFormat.isValid())
continue;
auto it = rawFormats.find(code);
if (it == rawFormats.end()) {
LOG(ISI, Warning) << bayerFormat
<< " not supported in ISI formats map.";
continue;
}
streamFormats[it->second] = { { sensor->resolution(), sensor->resolution() } };
}
StreamFormats formats(streamFormats);
StreamConfiguration cfg(formats);
cfg.size = sensor->resolution();
cfg.pixelFormat = pixelFormat;
cfg.bufferCount = 4;
return cfg;
}
std::unique_ptr<CameraConfiguration>
PipelineHandlerISI::generateConfiguration(Camera *camera,
Span<const StreamRole> roles)
{
ISICameraData *data = cameraData(camera);
std::unique_ptr<ISICameraConfiguration> config =
std::make_unique<ISICameraConfiguration>(data);
if (roles.empty())
return config;
if (roles.size() > data->streams_.size()) {
LOG(ISI, Error) << "Only up to " << data->streams_.size()
<< " streams are supported";
return nullptr;
}
for (const auto &role : roles) {
/*
* Prefer the following formats:
* - Still Capture: Full resolution YUYV
* - ViewFinder/VideoRecording: 1080p YUYV
* - RAW: Full resolution Bayer
*/
StreamConfiguration cfg;
switch (role) {
case StreamRole::StillCapture:
case StreamRole::Viewfinder:
case StreamRole::VideoRecording: {
Size size = role == StreamRole::StillCapture
? data->sensor_->resolution()
: PipelineHandlerISI::kPreviewSize;
cfg = generateYUVConfiguration(camera, size);
if (cfg.pixelFormat.isValid())
break;
/*
* Fallback to use a Bayer format if that's what the
* sensor supports.
*/
[[fallthrough]];
}
case StreamRole::Raw: {
cfg = generateRawConfiguration(camera);
break;
}
default:
LOG(ISI, Error) << "Requested stream role not supported: " << role;
return nullptr;
}
if (!cfg.pixelFormat.isValid()) {
LOG(ISI, Error)
<< "Cannot generate configuration for role: " << role;
return nullptr;
}
config->addConfiguration(cfg);
}
config->validate();
return config;
}
int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c)
{
ISICameraConfiguration *camConfig = static_cast<ISICameraConfiguration *>(c);
ISICameraData *data = cameraData(camera);
/* All links are immutable except the sensor -> csis link. */
const MediaPad *sensorSrc = data->sensor_->entity()->getPadByIndex(0);
sensorSrc->links()[0]->setEnabled(true);
/*
* Reset the crossbar switch routing and enable one route for each
* requested stream configuration.
*
* \todo Handle concurrent usage of multiple cameras by adjusting the
* routing table instead of resetting it.
*/
V4L2Subdevice::Routing routing = {};
unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1;
for (const auto &[idx, config] : utils::enumerate(*c)) {
uint32_t sourcePad = xbarFirstSource + idx;
routing.emplace_back(V4L2Subdevice::Stream{ data->xbarSink_, 0 },
V4L2Subdevice::Stream{ sourcePad, 0 },
V4L2_SUBDEV_ROUTE_FL_ACTIVE);
}
int ret = crossbar_->setRouting(&routing, V4L2Subdevice::ActiveFormat);
if (ret)
return ret;
/* Apply format to the sensor and CSIS receiver. */
V4L2SubdeviceFormat format = camConfig->sensorFormat_;
ret = data->sensor_->setFormat(&format);
if (ret)
return ret;
ret = data->csis_->setFormat(0, &format);
if (ret)
return ret;
ret = crossbar_->setFormat(data->xbarSink_, &format);
if (ret)
return ret;
/* Now configure the ISI and video node instances, one per stream. */
data->enabledStreams_.clear();
for (const auto &config : *c) {
Pipe *pipe = pipeFromStream(camera, config.stream());
/*
* Set the format on the ISI sink pad: it must match what is
* received by the CSIS.
*/
ret = pipe->isi->setFormat(0, &format);
if (ret)
return ret;
/*
* Configure the ISI sink compose rectangle to downscale the
* image.
*
* \todo Additional cropping could be applied on the ISI source
* pad to further reduce the output image size.
*/
Rectangle isiScale(config.size);
ret = pipe->isi->setSelection(0, V4L2_SEL_TGT_COMPOSE, &isiScale);
if (ret)
return ret;
/*
* Set the format on ISI source pad: only the media bus code
* is relevant as it configures format conversion, while the
* size is taken from the sink's COMPOSE (or source's CROP,
* if any) rectangles.
*/
unsigned int isiCode = ISICameraConfiguration::formatsMap_.at(config.pixelFormat);
V4L2SubdeviceFormat isiFormat{};
isiFormat.code = isiCode;
isiFormat.size = config.size;
ret = pipe->isi->setFormat(1, &isiFormat);
if (ret)
return ret;
V4L2DeviceFormat captureFmt{};
captureFmt.fourcc = pipe->capture->toV4L2PixelFormat(config.pixelFormat);
captureFmt.size = config.size;
/* \todo Set stride and format. */
ret = pipe->capture->setFormat(&captureFmt);
if (ret)
return ret;
/* Store the list of enabled streams for later use. */
data->enabledStreams_.push_back(config.stream());
}
return 0;
}
int PipelineHandlerISI::exportFrameBuffers(Camera *camera, Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers)
{
unsigned int count = stream->configuration().bufferCount;
Pipe *pipe = pipeFromStream(camera, stream);
return pipe->capture->exportBuffers(count, buffers);
}
int PipelineHandlerISI::start(Camera *camera,
[[maybe_unused]] const ControlList *controls)
{
ISICameraData *data = cameraData(camera);
for (const auto &stream : data->enabledStreams_) {
Pipe *pipe = pipeFromStream(camera, stream);
const StreamConfiguration &config = stream->configuration();
int ret = pipe->capture->importBuffers(config.bufferCount);
if (ret)
return ret;
ret = pipe->capture->streamOn();
if (ret)
return ret;
}
return 0;
}
void PipelineHandlerISI::stopDevice(Camera *camera)
{
ISICameraData *data = cameraData(camera);
for (const auto &stream : data->enabledStreams_) {
Pipe *pipe = pipeFromStream(camera, stream);
pipe->capture->streamOff();
pipe->capture->releaseBuffers();
}
}
int PipelineHandlerISI::queueRequestDevice(Camera *camera, Request *request)
{
for (auto &[stream, buffer] : request->buffers()) {
Pipe *pipe = pipeFromStream(camera, stream);
int ret = pipe->capture->queueBuffer(buffer);
if (ret)
return ret;
}
return 0;
}
bool PipelineHandlerISI::match(DeviceEnumerator *enumerator)
{
DeviceMatch dm("mxc-isi");
dm.add("crossbar");
dm.add("mxc_isi.0");
dm.add("mxc_isi.0.capture");
isiDev_ = acquireMediaDevice(enumerator, dm);
if (!isiDev_)
return false;
/*
* Acquire the subdevs and video nodes for the crossbar switch and the
* processing pipelines.
*/
crossbar_ = V4L2Subdevice::fromEntityName(isiDev_, "crossbar");
if (!crossbar_)
return false;
int ret = crossbar_->open();
if (ret)
return false;
for (unsigned int i = 0; ; ++i) {
std::string entityName = "mxc_isi." + std::to_string(i);
std::unique_ptr<V4L2Subdevice> isi =
V4L2Subdevice::fromEntityName(isiDev_, entityName);
if (!isi)
break;
ret = isi->open();
if (ret)
return false;
entityName += ".capture";
std::unique_ptr<V4L2VideoDevice> capture =
V4L2VideoDevice::fromEntityName(isiDev_, entityName);
if (!capture)
return false;
capture->bufferReady.connect(this, &PipelineHandlerISI::bufferReady);
ret = capture->open();
if (ret)
return ret;
pipes_.push_back({ std::move(isi), std::move(capture) });
}
if (pipes_.empty()) {
LOG(ISI, Error) << "Unable to enumerate pipes";
return false;
}
/*
* Loop over all the crossbar switch sink pads to find connected CSI-2
* receivers and camera sensors.
*/
unsigned int numCameras = 0;
unsigned int numSinks = 0;
for (MediaPad *pad : crossbar_->entity()->pads()) {
unsigned int sink = numSinks;
if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
continue;
/*
* Count each crossbar sink pad to correctly configure
* routing and format for this camera.
*/
numSinks++;
MediaEntity *csi = pad->links()[0]->source()->entity();
if (csi->pads().size() != 2) {
LOG(ISI, Debug) << "Skip unsupported CSI-2 receiver "
<< csi->name();
continue;
}
pad = csi->pads()[0];
if (!(pad->flags() & MEDIA_PAD_FL_SINK) || pad->links().empty())
continue;
MediaEntity *sensor = pad->links()[0]->source()->entity();
if (sensor->function() != MEDIA_ENT_F_CAM_SENSOR) {
LOG(ISI, Debug) << "Skip unsupported subdevice "
<< sensor->name();
continue;
}
/* Create the camera data. */
std::unique_ptr<ISICameraData> data =
std::make_unique<ISICameraData>(this);
data->sensor_ = std::make_unique<CameraSensor>(sensor);
data->csis_ = std::make_unique<V4L2Subdevice>(csi);
data->xbarSink_ = sink;
ret = data->init();
if (ret) {
LOG(ISI, Error) << "Failed to initialize camera data";
return false;
}
/* Register the camera. */
const std::string &id = data->sensor_->id();
std::set<Stream *> streams;
std::transform(data->streams_.begin(), data->streams_.end(),
std::inserter(streams, streams.end()),
[](Stream &s) { return &s; });
std::shared_ptr<Camera> camera =
Camera::create(std::move(data), id, streams);
registerCamera(std::move(camera));
numCameras++;
}
return numCameras > 0;
}
PipelineHandlerISI::Pipe *PipelineHandlerISI::pipeFromStream(Camera *camera,
const Stream *stream)
{
ISICameraData *data = cameraData(camera);
unsigned int pipeIndex = data->pipeIndex(stream);
ASSERT(pipeIndex < pipes_.size());
return &pipes_[pipeIndex];
}
void PipelineHandlerISI::bufferReady(FrameBuffer *buffer)
{
Request *request = buffer->request();
/* Record the sensor's timestamp in the request metadata. */
ControlList &metadata = request->metadata();
if (!metadata.contains(controls::SensorTimestamp.id()))
metadata.set(controls::SensorTimestamp,
buffer->metadata().timestamp);
completeBuffer(request, buffer);
if (request->hasPendingBuffers())
return;
completeRequest(request);
}
REGISTER_PIPELINE_HANDLER(PipelineHandlerISI, "imx8-isi")
} /* namespace libcamera */
|
0 | repos/libcamera | repos/libcamera/utils/gen-formats.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Generate formats definitions from YAML
import argparse
import re
import string
import sys
import yaml
class DRMFourCC(object):
format_regex = re.compile(r"#define (DRM_FORMAT_[A-Z0-9_]+)[ \t]+fourcc_code\(('.', '.', '.', '.')\)")
mod_vendor_regex = re.compile(r"#define DRM_FORMAT_MOD_VENDOR_([A-Z0-9_]+)[ \t]+([0-9a-fA-Fx]+)")
mod_regex = re.compile(r"#define ([A-Za-z0-9_]+)[ \t]+fourcc_mod_code\(([A-Z0-9_]+), ([0-9a-fA-Fx]+)\)")
def __init__(self, filename):
self.formats = {}
self.vendors = {}
self.mods = {}
for line in open(filename, 'rb').readlines():
line = line.decode('utf-8')
match = DRMFourCC.format_regex.match(line)
if match:
format, fourcc = match.groups()
self.formats[format] = fourcc
continue
match = DRMFourCC.mod_vendor_regex.match(line)
if match:
vendor, value = match.groups()
self.vendors[vendor] = int(value, 0)
continue
match = DRMFourCC.mod_regex.match(line)
if match:
mod, vendor, value = match.groups()
self.mods[mod] = (vendor, int(value, 0))
continue
def fourcc(self, name):
return self.formats[name]
def mod(self, name):
vendor, value = self.mods[name]
return self.vendors[vendor], value
def generate_h(formats, drm_fourcc):
template = string.Template('constexpr PixelFormat ${name}{ __fourcc(${fourcc}), __mod(${mod}) };')
fmts = []
for format in formats:
name, format = format.popitem()
fourcc = drm_fourcc.fourcc(format['fourcc'])
if format.get('big-endian'):
fourcc += '| DRM_FORMAT_BIG_ENDIAN'
data = {
'name': name,
'fourcc': fourcc,
'mod': '0, 0',
}
mod = format.get('mod')
if mod:
data['mod'] = '%u, %u' % drm_fourcc.mod(mod)
fmts.append(template.substitute(data))
return {'formats': '\n'.join(fmts)}
def fill_template(template, data):
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
return template.substitute(data)
def main(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output', metavar='file', type=str,
help='Output file name. Defaults to standard output if not specified.')
parser.add_argument('input', type=str,
help='Input file name.')
parser.add_argument('template', type=str,
help='Template file name.')
parser.add_argument('drm_fourcc', type=str,
help='Path to drm_fourcc.h.')
args = parser.parse_args(argv[1:])
data = open(args.input, 'rb').read()
formats = yaml.safe_load(data)['formats']
drm_fourcc = DRMFourCC(args.drm_fourcc)
data = generate_h(formats, drm_fourcc)
data = fill_template(args.template, data)
if args.output:
output = open(args.output, 'wb')
output.write(data.encode('utf-8'))
output.close()
else:
sys.stdout.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera | repos/libcamera/utils/release.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Prepare a project release
set -e
# Abort if we are not within the project root or the tree is not clean.
if [ ! -e utils/gen-version.sh ] || [ ! -e .git ]; then
echo "This release script must be run from the root of libcamera git tree."
exit 1
fi
if ! git diff-index --quiet HEAD; then
echo "Tree must be clean to release."
exit 1
fi
# Identify current version components
version=$(./utils/gen-version.sh)
# Decide if we are here to bump major, minor, or patch release.
case $1 in
major|minor|patch)
bump=$1;
;;
*)
echo "You must specify the version bump level: (major, minor, patch)"
exit 1
;;
esac
new_version=$(./utils/semver bump "$bump" "$version")
echo "Bumping $bump"
echo " Existing version is: $version"
echo " New version is : $new_version"
# Patch in the version to our meson.build
sed -i -E "s/ version : '.*',/ version : '$new_version',/" meson.build
# Commit the update
git commit meson.build -esm "libcamera v$new_version"
# Create a tag from that commit
git show -s --format=%B | git tag "v$new_version" -s -F -
|
0 | repos/libcamera | repos/libcamera/utils/update-mojo.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Update mojo copy from a chromium source tree
set -e
if [ $# != 1 ] ; then
echo "Usage: $0 <chromium dir>"
exit 1
fi
ipc_dir="$(dirname "$(realpath "$0")")/ipc"
chromium_dir="$(realpath "$1")"
cd "${ipc_dir}/../../"
# Reject dirty libcamera trees
if [ -n "$(git status --porcelain -uno)" ] ; then
echo "libcamera tree is dirty"
exit 1
fi
if [ ! -d "${chromium_dir}/mojo" ] ; then
echo "Directory ${chromium_dir} doesn't contain mojo"
exit 1
fi
if [ ! -d "${chromium_dir}/.git" ] ; then
echo "Directory ${chromium_dir} doesn't contain a git tree"
exit 1
fi
# Get the chromium commit id
version=$(git -C "${chromium_dir}" rev-parse --short HEAD)
# Reject dirty chromium trees
if [ -n "$(git -C "${chromium_dir}" status --porcelain)" ] ; then
echo "Chromium tree in ${chromium_dir} is dirty"
exit 1
fi
# Remove the previously imported files.
rm -rf utils/ipc/mojo/
rm -rf utils/ipc/tools/
# Copy the diagnosis file
mkdir -p utils/ipc/tools/diagnosis/
cp "${chromium_dir}/tools/diagnosis/crbug_1001171.py" utils/ipc/tools/diagnosis/
# Copy the rest of mojo
mkdir -p utils/ipc/mojo/public/
cp "${chromium_dir}/mojo/public/LICENSE" utils/ipc/mojo/public/
(
cd "${chromium_dir}" || exit
find ./mojo/public/tools -type f \
-not -path "*/generators/*" \
-not -path "*/fuzzers/*" \
-exec cp --parents "{}" "${ipc_dir}" ";"
)
# Update the README files
readme=$(cat <<EOF
# SPDX-License-Identifier: CC0-1.0
Files in this directory are imported from ${version} of Chromium. Do not
modify them manually.
EOF
)
echo "$readme" > utils/ipc/mojo/README
echo "$readme" > utils/ipc/tools/README
# Commit the update. Use 'git commit -n' to avoid checkstyle pre-commit hook
# failures, as mojo doesn't comply with the Python coding style enforced by
# checkstyle.py.
git add utils/ipc/mojo/
git add utils/ipc/tools/
echo "utils: ipc: Update mojo
Update mojo from commit
$(git -C "${chromium_dir}" show --pretty='%H "%s"' --no-patch)
from the Chromium repository.
The update-mojo.sh script was used for this update." | \
git commit -n -s -F -
|
0 | repos/libcamera | repos/libcamera/utils/run-dist.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
#
# On a meson dist run, generate the version string and store it in a file.
# This will later be picked up by the utils/gen-version.sh script and used
# instead of re-generating it. This way, if we are not building in the upstream
# git source tree, the upstream version information will be preserved.
cd "$MESON_SOURCE_ROOT" || return
./utils/gen-version.sh > "$MESON_DIST_ROOT"/.tarball-version
|
0 | repos/libcamera | repos/libcamera/utils/gen-header.sh | #!/bin/sh
src_dir="$1"
dst_file="$2"
cat <<EOF > "$dst_file"
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/* This file is auto-generated, do not edit! */
/*
* Copyright (C) 2018-2019, Google Inc.
*
* libcamera public API
*/
#pragma once
EOF
headers=$(for header in "$src_dir"/*.h "$src_dir"/*.h.in ; do
header=$(basename "$header")
header="${header%.in}"
echo "$header"
done | sort)
for header in $headers ; do
echo "#include <libcamera/$header>" >> "$dst_file"
done
|
0 | repos/libcamera | repos/libcamera/utils/update-kernel-headers.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Update the kernel headers copy from a kernel source tree
if [ $# != 1 ] ; then
echo "Usage: $0 <kernel dir>"
exit 1
fi
header_dir="$(dirname "$(realpath "$0")")/../include/linux"
kernel_dir="$1"
# Bail out if the directory doesn't contain kernel sources
line=$(head -3 "${kernel_dir}/Kbuild" 2>/dev/null | tail -1)
if [ "$line" != "# Kbuild for top-level directory of the kernel" ] ; then
echo "Directory ${kernel_dir} doesn't contain a kernel source tree"
exit 1
fi
if [ ! -e "${kernel_dir}/.git" ] ; then
echo "Directory ${kernel_dir} doesn't contain a git tree"
exit 1
fi
# Check the kernel version, and reject dirty trees
version=$(git -C "${kernel_dir}" describe --dirty)
echo $version
if echo "${version}" | grep -q dirty ; then
echo "Kernel tree in ${kernel_dir} is dirty"
exit 1
fi
# Install the headers to a temporary directory
install_dir=$(mktemp -d)
if [ ! -d "${install_dir}" ] ; then
echo "Failed to create temporary directory"
exit 1
fi
trap "rm -rf ${install_dir}" EXIT
set -e
make -C "${kernel_dir}" O="${install_dir}" headers_install
set +e
# Copy the headers
headers="
drm/drm_fourcc.h
linux/dma-buf.h
linux/dma-heap.h
linux/media-bus-format.h
linux/media.h
linux/rkisp1-config.h
linux/udmabuf.h
linux/v4l2-common.h
linux/v4l2-controls.h
linux/v4l2-mediabus.h
linux/v4l2-subdev.h
linux/videodev2.h
"
for header in $headers ; do
name=$(basename "${header}")
cp "${install_dir}/usr/include/${header}" "${header_dir}/${name}"
done
# The IPU3 header is a special case, as it's stored in staging. Handle it
# manually.
(cd "${install_dir}" ; "${kernel_dir}/scripts/headers_install.sh" \
"${kernel_dir}/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h" \
"${header_dir}/intel-ipu3.h")
# Update the README file
cat <<EOF > "${header_dir}/README"
# SPDX-License-Identifier: CC0-1.0
Files in this directory are imported from ${version} of the Linux kernel. Do not
modify them manually.
EOF
# Cleanup
rm -rf "${install_dir}"
cat <<EOF
----------------------------------------------------------------------
Kernel headers updated. Please review and up-port local changes before
committing.
----------------------------------------------------------------------
EOF
|
0 | repos/libcamera | repos/libcamera/utils/abi-compat.sh | #!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
# Generate and compare the ABI compatibilty of two libcamera versions
name=$(basename "$0")
usage() {
cat << EOF
$name: Determine the ABI/API compatibility of two build versions
$name [--help] [--abi-dir=<PATH>] [--tmp-dir=<PATH>] ARGS
The positional arguments (ARGS) determine the versions that will be compared and
take three variants:
- No positional arguments:
$name [optional arguments]
It is assumed to compare the current git HEAD against the most recent TAG
- One positional argument:
$name [optional aguments] COMMITISH
The given COMMITISH is compared against it's most recent TAG
- Two positional arguments:
$name [optional aguments] BASE COMMITISH
The given COMMITISH is compared against the given BASE.
Optional Arguments:
--abi-dir <path> Use <path> for storing (or retrieving existing) ABI data
files
--tmp-dir <path> Specify temporary build location for building ABI data.
This could be a tmpfs/RAM disk to save on disk writes.
EOF
}
dbg () {
echo "$@" >&2
}
die () {
echo "$name: $*" >&2
exit 1
}
describe () {
git describe --tags "$1" \
|| die "Failed to describe $1"
}
prev_release () {
git describe --tags --abbrev=0 "$1"^ \
|| die "Failed to identify previous release tag from $1"
}
# Make sure we exit on errors during argument parsing.
set -Eeuo pipefail
positional=()
while [[ $# -gt 0 ]] ; do
option="$1"
shift
case $option in
-h|--help)
usage
exit 0
;;
--abi-dir)
abi_dir=$1
shift
;;
--tmp-dir)
tmp=$1
shift
;;
-*)
die "Unrecognised argument $option"
;;
*) # Parse unidentified arguments based on position.
positional+=("$option")
;;
esac
done
set -- "${positional[@]}" # restore positional parameters.
# Parse positional arguments.
case $# in
0) # Check HEAD against previous 'release'.
from=$(prev_release HEAD)
to=$(describe HEAD)
;;
1) # Check COMMIT against previous release.
from=$(prev_release "$1")
to=$(describe "$1")
;;
2) # Check ABI between FROM and TO explicitly.
from=$(describe "$1")
to=$(describe "$2")
;;
*)
die "Invalid arguments"
;;
esac
if ! which abi-compliance-checker; then
die "This tool requires 'abi-compliance-checker' to be installed."
fi
abi_dir=${abi_dir:-abi}
tmp=${tmp:-"$abi_dir/tmp/"}
echo "Validating ABI compatibility between $from and $to"
mkdir -p "$abi_dir"
mkdir -p "$tmp"
# Generate an abi-compliance-checker xml description file.
create_xml() {
local output="$1"
local version="$2"
local root="$3"
echo "<version>$version</version>" > "$output"
echo "<headers>$root/usr/local/include/</headers>" >> "$output"
echo "<libs>$root/usr/local/lib/</libs>" >> "$output"
}
# Check if an ABI dump file exists, and if not create one by building a minimal
# configuration of libcamera at the specified version using a clean worktree.
create_abi_dump() {
local version="$1"
local abi_file="$abi_dir/$version.abi.dump"
local worktree="$tmp/$version"
local build="$tmp/$version-build"
# Use a fully qualified path when calling ninja -C.
install=$(realpath "$tmp/$version-install")
if [[ ! -e "$abi_file" ]] ; then
dbg "Creating ABI dump for $version in $abi_dir"
git worktree add --force "$worktree" "$version"
# Generate a minimal libcamera build. "lib" and "prefix" are
# defined explicitly to avoid system default ambiguities.
meson setup "$build" "$worktree" \
-Dlibdir=lib \
-Dprefix=/usr/local/ \
-Ddocumentation=disabled \
-Dcam=disabled \
-Dqcam=disabled \
-Dgstreamer=disabled \
-Dlc-compliance=disabled \
-Dtracing=disabled \
-Dpipelines=
ninja -C "$build"
DESTDIR="$install" ninja -C "$build" install
# Create an xml descriptor with parameters to generate the dump file.
create_xml \
"$install/libcamera-abi-dump.xml" \
"$version" \
"$install"
abi-compliance-checker \
-lib libcamera \
-v1 "$version" \
-dump "$install/libcamera-abi-dump.xml" \
-dump-path "$abi_file"
dbg Created "$abi_file"
dbg Removing Worktree "$worktree"
git worktree remove -f "$worktree"
dbg Removing "$build"
rm -r "$build"
dbg Removing "$install"
rm -r "$install"
fi
}
# Create the requested ABI dump files if they don't yet exist.
create_abi_dump "$from"
create_abi_dump "$to"
# TODO: Future iterations and extensions here could add "-stdout -xml" and
# parse the results automatically.
abi-compliance-checker -l libcamera \
-old "$abi_dir/$from.abi.dump" \
-new "$abi_dir/$to.abi.dump"
# On (far too many) occasions, the tools keep running leaving a cpu core @ 100%
# CPU usage. Perhaps some subprocess gets launched but never rejoined. Stop
# them all.
#
# TODO: Investigate this and report upstream.
killall abi-compliance-checker 2>/dev/null
|
0 | repos/libcamera | repos/libcamera/utils/gen-version.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Generate a version string using git describe
build_dir="$1"
src_dir="$2"
project_version="$3"
# If .tarball-version exists, output the version string from the file and exit.
# This file is auto-generated on a 'meson dist' command from the run-dist.sh
# script.
if [ -n "$src_dir" ] && [ -f "$src_dir"/.tarball-version ]
then
cat "$src_dir"/.tarball-version
exit 0
fi
# Bail out if the directory isn't under git control
git_dir=$(git rev-parse --git-dir 2>&1) || exit 1
# Derive the source directory from the git directory if not specified.
if [ -z "$src_dir" ]
then
src_dir=$(readlink -f "$git_dir/..")
fi
# Get a short description from the tree.
version=$(git describe --abbrev=8 --match "v[0-9]*" 2>/dev/null)
if [ -z "$version" ]
then
# Handle an un-tagged repository
sha=$(git describe --abbrev=8 --always 2>/dev/null)
commits=$(git log --oneline | wc -l 2>/dev/null)
version="v0.0.0-$commits-g$sha"
fi
# Append a '-dirty' suffix if the working tree is dirty. Prevent false
# positives due to changed timestamps by running git update-index.
if [ -z "$build_dir" ] || (echo "$build_dir" | grep -q "$src_dir")
then
git update-index --refresh > /dev/null 2>&1
fi
git diff-index --quiet HEAD || version="$version-dirty ($(date --iso-8601=seconds))"
# If a project version is provided, use it to replace the version number.
if [ -n "$project_version" ]
then
version=$(echo "$version" | sed -e 's/^[^-]*-//')
version="v$project_version-$version"
fi
# Replace first '-' with a '+' to denote build metadata, strip the 'g' in from
# of the git SHA1 and remove the initial 'v'.
version=$(echo "$version" | sed -e 's/-/+/' | sed -e 's/-g/-/' | cut -c 2-)
echo "$version"
|
0 | repos/libcamera | repos/libcamera/utils/gen-ipa-pub-key.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Generate the IPA module signing public key
import string
import subprocess
import sys
def main(argv):
if len(argv) != 4:
print('Usage: %s priv-key template output' % argv[0])
return 1
priv_key = argv[1]
template = argv[2]
output = argv[3]
try:
ret = subprocess.run(['openssl', 'rsa', '-pubout', '-in', priv_key,
'-outform', 'DER'],
stdout=subprocess.PIPE)
except FileNotFoundError:
print('Please install openssl to sign IPA modules')
return 1
ipa_key = ['0x%02x' % c for c in ret.stdout]
ipa_key = [', '.join(ipa_key[bound:bound + 8]) for bound in range(0, len(ipa_key), 8)]
ipa_key = ',\n\t'.join(ipa_key)
data = {'ipa_key': ipa_key}
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
f = open(output, 'wb')
f.write(template.substitute(data).encode('utf-8'))
f.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera | repos/libcamera/utils/gen-ipa-priv-key.sh | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2020, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Generate an RSA private key to sign IPA modules
key="$1"
openssl genpkey -algorithm RSA -out "${key}" -pkeyopt rsa_keygen_bits:2048
|
0 | repos/libcamera | repos/libcamera/utils/gen-controls.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2019, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Generate control definitions from YAML
import argparse
from functools import reduce
import operator
import string
import sys
import yaml
import os
class ControlEnum(object):
def __init__(self, data):
self.__data = data
@property
def description(self):
"""The enum description"""
return self.__data.get('description')
@property
def name(self):
"""The enum name"""
return self.__data.get('name')
@property
def value(self):
"""The enum value"""
return self.__data.get('value')
class Control(object):
def __init__(self, name, data, vendor):
self.__name = name
self.__data = data
self.__enum_values = None
self.__size = None
self.__vendor = vendor
enum_values = data.get('enum')
if enum_values is not None:
self.__enum_values = [ControlEnum(enum) for enum in enum_values]
size = self.__data.get('size')
if size is not None:
if len(size) == 0:
raise RuntimeError(f'Control `{self.__name}` size must have at least one dimension')
# Compute the total number of elements in the array. If any of the
# array dimension is a string, the array is variable-sized.
num_elems = 1
for dim in size:
if type(dim) is str:
num_elems = 0
break
dim = int(dim)
if dim <= 0:
raise RuntimeError(f'Control `{self.__name}` size must have positive values only')
num_elems *= dim
self.__size = num_elems
@property
def description(self):
"""The control description"""
return self.__data.get('description')
@property
def enum_values(self):
"""The enum values, if the control is an enumeration"""
if self.__enum_values is None:
return
for enum in self.__enum_values:
yield enum
@property
def is_enum(self):
"""Is the control an enumeration"""
return self.__enum_values is not None
@property
def vendor(self):
"""The vendor string, or None"""
return self.__vendor
@property
def name(self):
"""The control name (CamelCase)"""
return self.__name
@property
def type(self):
typ = self.__data.get('type')
size = self.__data.get('size')
if typ == 'string':
return 'std::string'
if self.__size is None:
return typ
if self.__size:
return f"Span<const {typ}, {self.__size}>"
else:
return f"Span<const {typ}>"
def snake_case(s):
return ''.join([c.isupper() and ('_' + c) or c for c in s]).strip('_')
def format_description(description):
description = description.strip('\n').split('\n')
description[0] = '\\brief ' + description[0]
return '\n'.join([(line and ' * ' or ' *') + line for line in description])
def generate_cpp(controls):
enum_doc_start_template = string.Template('''/**
* \\enum ${name}Enum
* \\brief Supported ${name} values''')
enum_doc_value_template = string.Template(''' * \\var ${value}
${description}''')
doc_template = string.Template('''/**
* \\var ${name}
${description}
*/''')
def_template = string.Template('extern const Control<${type}> ${name}(${id_name}, "${name}");')
enum_values_doc = string.Template('''/**
* \\var ${name}Values
* \\brief List of all $name supported values
*/''')
enum_values_start = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values = {''')
enum_values_values = string.Template('''\tstatic_cast<int32_t>(${name}),''')
name_value_map_doc = string.Template('''/**
* \\var ${name}NameValueMap
* \\brief Map of all $name supported value names (in std::string format) to value
*/''')
name_value_map_start = string.Template('''extern const std::map<std::string, ${type}> ${name}NameValueMap = {''')
name_value_values = string.Template('''\t{ "${name}", ${name} },''')
ctrls_doc = {}
ctrls_def = {}
ctrls_map = []
for ctrl in controls:
id_name = snake_case(ctrl.name).upper()
vendor = ctrl.vendor
if vendor not in ctrls_doc:
ctrls_doc[vendor] = []
ctrls_def[vendor] = []
info = {
'name': ctrl.name,
'type': ctrl.type,
'description': format_description(ctrl.description),
'id_name': id_name,
}
target_doc = ctrls_doc[vendor]
target_def = ctrls_def[vendor]
if ctrl.is_enum:
enum_doc = []
enum_doc.append(enum_doc_start_template.substitute(info))
num_entries = 0
for enum in ctrl.enum_values:
value_info = {
'name': ctrl.name,
'value': enum.name,
'description': format_description(enum.description),
}
enum_doc.append(enum_doc_value_template.substitute(value_info))
num_entries += 1
enum_doc = '\n *\n'.join(enum_doc)
enum_doc += '\n */'
target_doc.append(enum_doc)
values_info = {
'name': info['name'],
'type': ctrl.type,
'size': num_entries,
}
target_doc.append(enum_values_doc.substitute(values_info))
target_def.append(enum_values_start.substitute(values_info))
for enum in ctrl.enum_values:
value_info = {
'name': enum.name
}
target_def.append(enum_values_values.substitute(value_info))
target_def.append("};")
target_doc.append(name_value_map_doc.substitute(values_info))
target_def.append(name_value_map_start.substitute(values_info))
for enum in ctrl.enum_values:
value_info = {
'name': enum.name
}
target_def.append(name_value_values.substitute(value_info))
target_def.append("};")
target_doc.append(doc_template.substitute(info))
target_def.append(def_template.substitute(info))
vendor_ns = vendor + '::' if vendor != "libcamera" else ''
ctrls_map.append('\t{ ' + vendor_ns + id_name + ', &' + vendor_ns + ctrl.name + ' },')
vendor_ctrl_doc_sub = []
vendor_ctrl_template = string.Template('''
/**
* \\brief Namespace for ${vendor} controls
*/
namespace ${vendor} {
${vendor_controls_str}
} /* namespace ${vendor} */''')
for vendor in [v for v in ctrls_doc.keys() if v not in ['libcamera']]:
vendor_ctrl_doc_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n\n'.join(ctrls_doc[vendor])}))
vendor_ctrl_def_sub = []
for vendor in [v for v in ctrls_def.keys() if v not in ['libcamera']]:
vendor_ctrl_def_sub.append(vendor_ctrl_template.substitute({'vendor': vendor, 'vendor_controls_str': '\n'.join(ctrls_def[vendor])}))
return {
'controls_doc': '\n\n'.join(ctrls_doc['libcamera']),
'controls_def': '\n'.join(ctrls_def['libcamera']),
'controls_map': '\n'.join(ctrls_map),
'vendor_controls_doc': '\n'.join(vendor_ctrl_doc_sub),
'vendor_controls_def': '\n'.join(vendor_ctrl_def_sub),
}
def generate_h(controls, mode, ranges):
enum_template_start = string.Template('''enum ${name}Enum {''')
enum_value_template = string.Template('''\t${name} = ${value},''')
enum_values_template = string.Template('''extern const std::array<const ControlValue, ${size}> ${name}Values;''')
name_value_map_template = string.Template('''extern const std::map<std::string, ${type}> ${name}NameValueMap;''')
template = string.Template('''extern const Control<${type}> ${name};''')
ctrls = {}
ids = {}
id_value = {}
for ctrl in controls:
id_name = snake_case(ctrl.name).upper()
vendor = ctrl.vendor
if vendor not in ctrls:
if vendor not in ranges.keys():
raise RuntimeError(f'Control id range is not defined for vendor {vendor}')
id_value[vendor] = ranges[vendor] + 1
ids[vendor] = []
ctrls[vendor] = []
target_ids = ids[vendor]
target_ids.append('\t' + id_name + ' = ' + str(id_value[vendor]) + ',')
info = {
'name': ctrl.name,
'type': ctrl.type,
}
target_ctrls = ctrls[vendor]
if ctrl.is_enum:
target_ctrls.append(enum_template_start.substitute(info))
num_entries = 0
for enum in ctrl.enum_values:
value_info = {
'name': enum.name,
'value': enum.value,
}
target_ctrls.append(enum_value_template.substitute(value_info))
num_entries += 1
target_ctrls.append("};")
values_info = {
'name': info['name'],
'type': ctrl.type,
'size': num_entries,
}
target_ctrls.append(enum_values_template.substitute(values_info))
target_ctrls.append(name_value_map_template.substitute(values_info))
target_ctrls.append(template.substitute(info))
id_value[vendor] += 1
vendor_template = string.Template('''
namespace ${vendor} {
#define LIBCAMERA_HAS_${vendor_def}_VENDOR_${mode}
enum {
${vendor_enums}
};
${vendor_controls}
} /* namespace ${vendor} */
''')
vendor_sub = []
for vendor in [v for v in ctrls.keys() if v != 'libcamera']:
vendor_sub.append(vendor_template.substitute({'mode': mode.upper(),
'vendor': vendor,
'vendor_def': vendor.upper(),
'vendor_enums': '\n'.join(ids[vendor]),
'vendor_controls': '\n'.join(ctrls[vendor])}))
return {
'ids': '\n'.join(ids['libcamera']),
'controls': '\n'.join(ctrls['libcamera']),
'vendor_controls': '\n'.join(vendor_sub)
}
def fill_template(template, data):
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
return template.substitute(data)
def main(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-m', type=str, required=True, choices=['controls', 'properties'],
help='Mode of operation')
parser.add_argument('--output', '-o', metavar='file', type=str,
help='Output file name. Defaults to standard output if not specified.')
parser.add_argument('--ranges', '-r', type=str, required=True,
help='Control id range reservation file.')
parser.add_argument('--template', '-t', dest='template', type=str, required=True,
help='Template file name.')
parser.add_argument('input', type=str, nargs='+',
help='Input file name.')
args = parser.parse_args(argv[1:])
ranges = {}
with open(args.ranges, 'rb') as f:
data = open(args.ranges, 'rb').read()
ranges = yaml.safe_load(data)['ranges']
controls = []
for input in args.input:
with open(input, 'rb') as f:
data = f.read()
vendor = yaml.safe_load(data)['vendor']
ctrls = yaml.safe_load(data)['controls']
controls = controls + [Control(*ctrl.popitem(), vendor) for ctrl in ctrls]
if args.template.endswith('.cpp.in'):
data = generate_cpp(controls)
elif args.template.endswith('.h.in'):
data = generate_h(controls, args.mode, ranges)
else:
raise RuntimeError('Unknown template type')
data = fill_template(args.template, data)
if args.output:
output = open(args.output, 'wb')
output.write(data.encode('utf-8'))
output.close()
else:
sys.stdout.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera | repos/libcamera/utils/checkstyle.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2018, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# A patch style checker script based on clang-format
#
# TODO:
#
# - Support other formatting tools and checkers (cppcheck, cpplint, kwstyle, ...)
# - Split large hunks to minimize context noise
# - Improve style issues counting
#
import argparse
import difflib
import fnmatch
import os.path
import re
import shutil
import subprocess
import sys
dependencies = {
'clang-format': True,
'git': True,
}
# ------------------------------------------------------------------------------
# Colour terminal handling
#
class Colours:
Default = 0
Black = 0
Red = 31
Green = 32
Yellow = 33
Blue = 34
Magenta = 35
Cyan = 36
LightGrey = 37
DarkGrey = 90
LightRed = 91
LightGreen = 92
Lightyellow = 93
LightBlue = 94
LightMagenta = 95
LightCyan = 96
White = 97
@staticmethod
def fg(colour):
if sys.stdout.isatty():
return '\033[%um' % colour
else:
return ''
@staticmethod
def bg(colour):
if sys.stdout.isatty():
return '\033[%um' % (colour + 10)
else:
return ''
@staticmethod
def reset():
if sys.stdout.isatty():
return '\033[0m'
else:
return ''
# ------------------------------------------------------------------------------
# Diff parsing, handling and printing
#
class DiffHunkSide(object):
"""A side of a diff hunk, recording line numbers"""
def __init__(self, start):
self.start = start
self.touched = []
self.untouched = []
def __len__(self):
return len(self.touched) + len(self.untouched)
class DiffHunk(object):
diff_header_regex = re.compile(r'@@ -([0-9]+),?([0-9]+)? \+([0-9]+),?([0-9]+)? @@')
def __init__(self, line):
match = DiffHunk.diff_header_regex.match(line)
if not match:
raise RuntimeError("Malformed diff hunk header '%s'" % line)
self.__from_line = int(match.group(1))
self.__to_line = int(match.group(3))
self.__from = DiffHunkSide(self.__from_line)
self.__to = DiffHunkSide(self.__to_line)
self.lines = []
def __repr__(self):
s = '%s@@ -%u,%u +%u,%u @@\n' % \
(Colours.fg(Colours.Cyan),
self.__from.start, len(self.__from),
self.__to.start, len(self.__to))
for line in self.lines:
if line[0] == '-':
s += Colours.fg(Colours.Red)
elif line[0] == '+':
s += Colours.fg(Colours.Green)
if line[0] == '-':
spaces = 0
for i in range(len(line)):
if line[-i-1].isspace():
spaces += 1
else:
break
spaces = len(line) - spaces
line = line[0:spaces] + Colours.bg(Colours.Red) + line[spaces:]
s += line
s += Colours.reset()
s += '\n'
return s[:-1]
def append(self, line):
if line[0] == ' ':
self.__from.untouched.append(self.__from_line)
self.__from_line += 1
self.__to.untouched.append(self.__to_line)
self.__to_line += 1
elif line[0] == '-':
self.__from.touched.append(self.__from_line)
self.__from_line += 1
elif line[0] == '+':
self.__to.touched.append(self.__to_line)
self.__to_line += 1
self.lines.append(line.rstrip('\n'))
def intersects(self, lines):
for line in lines:
if line in self.__from.touched:
return True
return False
def side(self, side):
if side == 'from':
return self.__from
else:
return self.__to
def parse_diff(diff):
hunks = []
hunk = None
for line in diff:
if line.startswith('@@'):
if hunk:
hunks.append(hunk)
hunk = DiffHunk(line)
elif hunk is not None:
# Work around https://github.com/python/cpython/issues/46395
# See https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
if line[-1] != '\n':
hunk.append(line + '\n')
line = '\\ No newline at end of file\n'
hunk.append(line)
if hunk:
hunks.append(hunk)
return hunks
# ------------------------------------------------------------------------------
# Commit, Staged Changes & Amendments
#
class CommitFile:
def __init__(self, name):
info = name.split()
self.__status = info[0][0]
# For renamed files, store the new name
if self.__status == 'R':
self.__filename = info[2]
else:
self.__filename = info[1]
def __repr__(self):
return f'{self.__status} {self.__filename}'
@property
def filename(self):
return self.__filename
@property
def status(self):
return self.__status
class Commit:
def __init__(self, commit):
self.commit = commit
self._trailers = []
self._parse()
def _parse_trailers(self, lines):
for index in range(1, len(lines)):
line = lines[index]
if not line:
break
self._trailers.append(line)
return index
def _parse(self):
# Get the commit title and list of files.
ret = subprocess.run(['git', 'show', '--format=%s%n%(trailers:only,unfold)', '--name-status',
self.commit],
stdout=subprocess.PIPE).stdout.decode('utf-8')
lines = ret.splitlines()
self._title = lines[0]
index = self._parse_trailers(lines)
self._files = [CommitFile(f) for f in lines[index:] if f]
def files(self, filter='AMR'):
return [f.filename for f in self._files if f.status in filter]
@property
def title(self):
return self._title
@property
def trailers(self):
return self._trailers
def get_diff(self, top_level, filename):
diff = subprocess.run(['git', 'diff', '%s~..%s' % (self.commit, self.commit),
'--', '%s/%s' % (top_level, filename)],
stdout=subprocess.PIPE).stdout.decode('utf-8')
return parse_diff(diff.splitlines(True))
def get_file(self, filename):
return subprocess.run(['git', 'show', '%s:%s' % (self.commit, filename)],
stdout=subprocess.PIPE).stdout.decode('utf-8')
class StagedChanges(Commit):
def __init__(self):
Commit.__init__(self, '')
def _parse(self):
ret = subprocess.run(['git', 'diff', '--staged', '--name-status'],
stdout=subprocess.PIPE).stdout.decode('utf-8')
self._title = 'Staged changes'
self._files = [CommitFile(f) for f in ret.splitlines()]
def get_diff(self, top_level, filename):
diff = subprocess.run(['git', 'diff', '--staged', '--',
'%s/%s' % (top_level, filename)],
stdout=subprocess.PIPE).stdout.decode('utf-8')
return parse_diff(diff.splitlines(True))
class Amendment(Commit):
def __init__(self):
Commit.__init__(self, '')
def _parse(self):
# Create a title using HEAD commit and parse the trailers.
ret = subprocess.run(['git', 'show', '--format=%H %s%n%(trailers:only,unfold)',
'--no-patch'],
stdout=subprocess.PIPE).stdout.decode('utf-8')
lines = ret.splitlines()
self._title = 'Amendment of ' + lines[0].strip()
self._parse_trailers(lines)
# Extract the list of modified files
ret = subprocess.run(['git', 'diff', '--staged', '--name-status', 'HEAD~'],
stdout=subprocess.PIPE).stdout.decode('utf-8')
self._files = [CommitFile(f) for f in ret.splitlines()]
def get_diff(self, top_level, filename):
diff = subprocess.run(['git', 'diff', '--staged', 'HEAD~', '--',
'%s/%s' % (top_level, filename)],
stdout=subprocess.PIPE).stdout.decode('utf-8')
return parse_diff(diff.splitlines(True))
# ------------------------------------------------------------------------------
# Helpers
#
class ClassRegistry(type):
def __new__(cls, clsname, bases, attrs):
newclass = super().__new__(cls, clsname, bases, attrs)
if bases:
bases[0].subclasses.append(newclass)
bases[0].subclasses.sort(key=lambda x: getattr(x, 'priority', 0),
reverse=True)
return newclass
# ------------------------------------------------------------------------------
# Commit Checkers
#
class CommitChecker(metaclass=ClassRegistry):
subclasses = []
def __init__(self):
pass
#
# Class methods
#
@classmethod
def checkers(cls, names):
for checker in cls.subclasses:
if names and checker.__name__ not in names:
continue
yield checker
class CommitIssue(object):
def __init__(self, msg):
self.msg = msg
class HeaderAddChecker(CommitChecker):
@classmethod
def check(cls, commit, top_level):
issues = []
meson_files = [f for f in commit.files()
if os.path.basename(f) == 'meson.build']
for filename in commit.files('AR'):
if not filename.startswith('include/libcamera/') or \
not filename.endswith('.h'):
continue
meson = os.path.dirname(filename) + '/meson.build'
header = os.path.basename(filename)
issue = CommitIssue('Header %s added without corresponding update to %s' %
(filename, meson))
if meson not in meson_files:
issues.append(issue)
continue
diff = commit.get_diff(top_level, meson)
found = False
for hunk in diff:
for line in hunk.lines:
if line[0] != '+':
continue
if line.find("'%s'" % header) != -1:
found = True
break
if found:
break
if not found:
issues.append(issue)
return issues
class TitleChecker(CommitChecker):
prefix_regex = re.compile(r'^([a-zA-Z0-9_.-]+: )+')
release_regex = re.compile(r'libcamera v[0-9]+\.[0-9]+\.[0-9]+')
@classmethod
def check(cls, commit, top_level):
title = commit.title
# Skip the check when validating staged changes (as done through a
# pre-commit hook) as there is no title to check in that case.
if isinstance(commit, StagedChanges):
return []
# Ignore release commits, they don't need a prefix.
if TitleChecker.release_regex.fullmatch(title):
return []
prefix_pos = title.find(': ')
if prefix_pos != -1 and prefix_pos != len(title) - 2:
return []
# Find prefix candidates by searching the git history
msgs = subprocess.run(['git', 'log', '--no-decorate', '--oneline', '-n100', '--'] + commit.files(),
stdout=subprocess.PIPE).stdout.decode('utf-8')
prefixes = {}
prefixes_count = 0
for msg in msgs.splitlines():
prefix = TitleChecker.prefix_regex.match(msg)
if not prefix:
continue
prefix = prefix.group(0)
if prefix in prefixes:
prefixes[prefix] += 1
else:
prefixes[prefix] = 1
prefixes_count += 1
if not prefixes:
return [CommitIssue('Commit title is missing prefix')]
# Sort the candidates by number of occurrences and pick the best ones.
# When multiple prefixes are possible without a clear winner, we want to
# display the most common options to the user, but without the most
# unlikely options to avoid too long messages. As a heuristic, select
# enough candidates to cover at least 2/3 of the possible prefixes, but
# never more than 4 candidates.
prefixes = list(prefixes.items())
prefixes.sort(key=lambda x: x[1], reverse=True)
candidates = []
candidates_count = 0
for prefix in prefixes:
candidates.append(f"`{prefix[0]}'")
candidates_count += prefix[1]
if candidates_count >= prefixes_count * 2 / 3 or \
len(candidates) == 4:
break
candidates = candidates[:-2] + [' or '.join(candidates[-2:])]
candidates = ', '.join(candidates)
return [CommitIssue('Commit title is missing prefix, '
'possible candidates are ' + candidates)]
class TrailersChecker(CommitChecker):
commit_regex = re.compile(r'[0-9a-f]{12}[0-9a-f]* \(".*"\)')
coverity_regex = re.compile(r'Coverity CID=.*')
# Simple e-mail address validator regex, with an additional trailing
# comment. The complexity of a full RFC6531 validator isn't worth the
# additional invalid addresses it would reject.
email_regex = re.compile(r'[^<]+ <[^@>]+@[^>]+>( # .*)?')
link_regex = re.compile(r'https?://.*')
@staticmethod
def validate_reported_by(value):
if TrailersChecker.email_regex.fullmatch(value):
return True
if TrailersChecker.coverity_regex.fullmatch(value):
return True
return False
known_trailers = {
'Acked-by': email_regex,
'Bug': link_regex,
'Co-developed-by': email_regex,
'Fixes': commit_regex,
'Link': link_regex,
'Reported-by': validate_reported_by,
'Reviewed-by': email_regex,
'Signed-off-by': email_regex,
'Suggested-by': email_regex,
'Tested-by': email_regex,
}
trailer_regex = re.compile(r'([A-Z][a-zA-Z-]*)\s*:\s*(.*)')
@classmethod
def check(cls, commit, top_level):
issues = []
for trailer in commit.trailers:
match = TrailersChecker.trailer_regex.fullmatch(trailer)
if not match:
issues.append(CommitIssue(f"Malformed commit trailer '{trailer}'"))
continue
key, value = match.groups()
validator = TrailersChecker.known_trailers.get(key)
if not validator:
issues.append(CommitIssue(f"Invalid commit trailer key '{key}'"))
continue
if isinstance(validator, re.Pattern):
valid = bool(validator.fullmatch(value))
else:
valid = validator(value)
if not valid:
issues.append(CommitIssue(f"Malformed value '{value}' for commit trailer '{key}'"))
continue
return issues
# ------------------------------------------------------------------------------
# Style Checkers
#
class StyleChecker(metaclass=ClassRegistry):
subclasses = []
def __init__(self):
pass
#
# Class methods
#
@classmethod
def checkers(cls, filename, names):
for checker in cls.subclasses:
if names and checker.__name__ not in names:
continue
if checker.supports(filename):
yield checker
@classmethod
def supports(cls, filename):
for pattern in cls.patterns:
if fnmatch.fnmatch(os.path.basename(filename), pattern):
return True
return False
@classmethod
def all_patterns(cls):
patterns = set()
for checker in cls.subclasses:
patterns.update(checker.patterns)
return patterns
class StyleIssue(object):
def __init__(self, line_number, position, line, msg):
self.line_number = line_number
self.position = position
self.line = line
self.msg = msg
class HexValueChecker(StyleChecker):
patterns = ('*.c', '*.cpp', '*.h')
regex = re.compile(r'\b0[xX][0-9a-fA-F]+\b')
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
for line_number in line_numbers:
line = self.__content[line_number - 1]
match = HexValueChecker.regex.search(line)
if not match:
continue
value = match.group(0)
if value == value.lower():
continue
issues.append(StyleIssue(line_number, match.span(0), line,
f'Use lowercase hex constant {value.lower()}'))
return issues
class IncludeChecker(StyleChecker):
patterns = ('*.cpp', '*.h')
headers = ('cassert', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes',
'climits', 'clocale', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cuchar',
'cwchar', 'cwctype', 'math.h')
include_regex = re.compile(r'^#include <([a-z.]*)>')
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
for line_number in line_numbers:
line = self.__content[line_number - 1]
match = IncludeChecker.include_regex.match(line)
if not match:
continue
header = match.group(1)
if header not in IncludeChecker.headers:
continue
if header.endswith('.h'):
header_type = 'C++'
header = 'c' + header[:-2]
else:
header_type = 'C compatibility'
header = header[1:] + '.h'
issues.append(StyleIssue(line_number, match.span(1), line,
f'{header_type} header <{header}> is preferred'))
return issues
class LogCategoryChecker(StyleChecker):
log_regex = re.compile(r'\bLOG\((Debug|Info|Warning|Error|Fatal)\)')
patterns = ('*.cpp',)
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
for line_number in line_numbers:
line = self.__content[line_number-1]
match = LogCategoryChecker.log_regex.search(line)
if not match:
continue
issues.append(StyleIssue(line_number, match.span(1), line,
'LOG() should use categories'))
return issues
class MesonChecker(StyleChecker):
patterns = ('meson.build',)
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
for line_number in line_numbers:
line = self.__content[line_number-1]
pos = line.find('\t')
if pos != -1:
issues.append(StyleIssue(line_number, [pos, pos], line,
'meson.build should use spaces for indentation'))
return issues
class Pep8Checker(StyleChecker):
patterns = ('*.py',)
results_regex = re.compile(r'stdin:([0-9]+):([0-9]+)(.*)')
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
data = ''.join(self.__content).encode('utf-8')
try:
ret = subprocess.run(['pycodestyle', '--ignore=E501', '-'],
input=data, stdout=subprocess.PIPE)
except FileNotFoundError:
issues.append(StyleIssue(0, None, None, 'Please install pycodestyle to validate python additions'))
return issues
results = ret.stdout.decode('utf-8').splitlines()
for item in results:
search = re.search(Pep8Checker.results_regex, item)
line_number = int(search.group(1))
position = int(search.group(2))
msg = search.group(3)
if line_number in line_numbers:
line = self.__content[line_number - 1]
issues.append(StyleIssue(line_number, None, line, msg))
return issues
class ShellChecker(StyleChecker):
patterns = ('*.sh',)
results_line_regex = re.compile(r'In - line ([0-9]+):')
def __init__(self, content):
super().__init__()
self.__content = content
def check(self, line_numbers):
issues = []
data = ''.join(self.__content).encode('utf-8')
try:
ret = subprocess.run(['shellcheck', '-Cnever', '-'],
input=data, stdout=subprocess.PIPE)
except FileNotFoundError:
issues.append(StyleIssue(0, None, None, 'Please install shellcheck to validate shell script additions'))
return issues
results = ret.stdout.decode('utf-8').splitlines()
for nr, item in enumerate(results):
search = re.search(ShellChecker.results_line_regex, item)
if search is None:
continue
line_number = int(search.group(1))
line = results[nr + 1]
msg = results[nr + 2]
if line_number in line_numbers:
issues.append(StyleIssue(line_number, None, line, msg))
return issues
# ------------------------------------------------------------------------------
# Formatters
#
class Formatter(metaclass=ClassRegistry):
subclasses = []
def __init__(self):
pass
#
# Class methods
#
@classmethod
def formatters(cls, filename, names):
for formatter in cls.subclasses:
if names and formatter.__name__ not in names:
continue
if formatter.supports(filename):
yield formatter
@classmethod
def supports(cls, filename):
for pattern in cls.patterns:
if fnmatch.fnmatch(os.path.basename(filename), pattern):
return True
return False
@classmethod
def all_patterns(cls):
patterns = set()
for formatter in cls.subclasses:
patterns.update(formatter.patterns)
return patterns
class CLangFormatter(Formatter):
patterns = ('*.c', '*.cpp', '*.h')
priority = -1
@classmethod
def format(cls, filename, data):
ret = subprocess.run(['clang-format', '-style=file',
'-assume-filename=' + filename],
input=data.encode('utf-8'), stdout=subprocess.PIPE)
return ret.stdout.decode('utf-8')
class DoxygenFormatter(Formatter):
patterns = ('*.c', '*.cpp')
oneliner_regex = re.compile(r'^ +\* +\\(brief|param|return)\b.*\.$')
return_regex = re.compile(r' +\* +\\return +[a-z]')
@classmethod
def format(cls, filename, data):
lines = []
in_doxygen = False
for line in data.split('\n'):
if line.find('/**') != -1:
in_doxygen = True
if not in_doxygen:
lines.append(line)
continue
line = cls.oneliner_regex.sub(lambda m: m.group(0)[:-1], line)
line = cls.return_regex.sub(lambda m: m.group(0)[:-1] + m.group(0)[-1].upper(), line)
if line.find('*/') != -1:
in_doxygen = False
lines.append(line)
return '\n'.join(lines)
class DPointerFormatter(Formatter):
# Ensure consistent naming of variables related to the d-pointer design
# pattern.
patterns = ('*.cpp', '*.h')
# The clang formatter runs first, we can thus rely on appropriate coding
# style.
declare_regex = re.compile(r'^(\t*)(const )?([a-zA-Z0-9_]+) \*( ?const )?([a-zA-Z0-9_]+) = (LIBCAMERA_[DO]_PTR)\(([a-zA-Z0-9_]+)\);$')
@classmethod
def format(cls, filename, data):
lines = []
for line in data.split('\n'):
match = cls.declare_regex.match(line)
if match:
indent = match.group(1) or ''
const = match.group(2) or ''
macro = match.group(6)
klass = match.group(7)
if macro == 'LIBCAMERA_D_PTR':
var = 'Private *const d'
else:
var = f'{klass} *const o'
line = f'{indent}{const}{var} = {macro}({klass});'
lines.append(line)
return '\n'.join(lines)
class IncludeOrderFormatter(Formatter):
patterns = ('*.cpp', '*.h')
include_regex = re.compile(r'^#include (["<])([^">]*)([">])')
@classmethod
def format(cls, filename, data):
lines = []
includes = []
# Parse blocks of #include statements, and output them as a sorted list
# when we reach a non #include statement.
for line in data.split('\n'):
match = IncludeOrderFormatter.include_regex.match(line)
if match:
# If the current line is an #include statement, add it to the
# includes group and continue to the next line.
open_token = match.group(1)
file_name = match.group(2)
close_token = match.group(3)
# Ensure the "..." include style for internal headers and the
# <...> style for all other libcamera headers.
if (file_name.startswith('libcamera/internal')):
open_token = '"'
close_token = '"'
elif (file_name.startswith('libcamera/')):
open_token = '<'
close_token = '>'
line = f'#include {open_token}{file_name}{close_token}'
includes.append((line, file_name))
continue
# The current line is not an #include statement, output the sorted
# stashed includes first, and then the current line.
if len(includes):
includes.sort(key=lambda i: i[1])
for include in includes:
lines.append(include[0])
includes = []
lines.append(line)
# In the unlikely case the file ends with an #include statement, make
# sure we output the stashed includes.
if len(includes):
includes.sort(key=lambda i: i[1])
for include in includes:
lines.append(include[0])
includes = []
return '\n'.join(lines)
class StripTrailingSpaceFormatter(Formatter):
patterns = ('*.c', '*.cpp', '*.h', '*.py', 'meson.build')
@classmethod
def format(cls, filename, data):
lines = data.split('\n')
for i in range(len(lines)):
lines[i] = lines[i].rstrip() + '\n'
return ''.join(lines)
# ------------------------------------------------------------------------------
# Style checking
#
def check_file(top_level, commit, filename, checkers):
# Extract the line numbers touched by the commit.
commit_diff = commit.get_diff(top_level, filename)
lines = []
for hunk in commit_diff:
lines.extend(hunk.side('to').touched)
# Skip commits that don't add any line.
if len(lines) == 0:
return 0
# Format the file after the commit with all formatters and compute the diff
# between the unformatted and formatted contents.
after = commit.get_file(filename)
formatted = after
for formatter in Formatter.formatters(filename, checkers):
formatted = formatter.format(filename, formatted)
after = after.splitlines(True)
formatted = formatted.splitlines(True)
diff = difflib.unified_diff(after, formatted)
# Split the diff in hunks, recording line number ranges for each hunk, and
# filter out hunks that are not touched by the commit.
formatted_diff = parse_diff(diff)
formatted_diff = [hunk for hunk in formatted_diff if hunk.intersects(lines)]
# Check for code issues not related to formatting.
issues = []
for checker in StyleChecker.checkers(filename, checkers):
checker = checker(after)
for hunk in commit_diff:
issues += checker.check(hunk.side('to').touched)
# Print the detected issues.
if len(issues) == 0 and len(formatted_diff) == 0:
return 0
print('%s---' % Colours.fg(Colours.Red), filename)
print('%s+++' % Colours.fg(Colours.Green), filename)
if len(formatted_diff):
for hunk in formatted_diff:
print(hunk)
if len(issues):
issues = sorted(issues, key=lambda i: i.line_number)
for issue in issues:
print('%s#%u: %s%s' % (Colours.fg(Colours.Yellow), issue.line_number,
issue.msg, Colours.reset()))
if issue.line is not None:
print('%s+%s%s' % (Colours.fg(Colours.Yellow), issue.line.rstrip(),
Colours.reset()))
if issue.position is not None:
# Align the position marker by using the original line with
# all characters except for tabs replaced with spaces. This
# ensures proper alignment regardless of how the code is
# indented.
start = issue.position[0]
prefix = ''.join([c if c == '\t' else ' ' for c in issue.line[:start]])
length = issue.position[1] - start - 1
print(' ' + prefix + '^' + '~' * length)
return len(formatted_diff) + len(issues)
def check_style(top_level, commit, checkers):
title = commit.commit + ' ' + commit.title
separator = '-' * len(title)
print(separator)
print(title)
print(separator)
issues = 0
# Apply the commit checkers first.
for checker in CommitChecker.checkers(checkers):
for issue in checker.check(commit, top_level):
print('%s%s%s' % (Colours.fg(Colours.Yellow), issue.msg, Colours.reset()))
issues += 1
# Filter out files we have no checker for.
patterns = set()
patterns.update(StyleChecker.all_patterns())
patterns.update(Formatter.all_patterns())
files = [f for f in commit.files() if len([p for p in patterns if fnmatch.fnmatch(os.path.basename(f), p)])]
for f in files:
issues += check_file(top_level, commit, f, checkers)
if issues == 0:
print('No issue detected')
else:
print('---')
print('%u potential %s detected, please review' %
(issues, 'issue' if issues == 1 else 'issues'))
return issues
def extract_commits(revs):
"""Extract a list of commits on which to operate from a revision or revision
range.
"""
ret = subprocess.run(['git', 'rev-parse', revs], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret.returncode != 0:
print(ret.stderr.decode('utf-8').splitlines()[0])
return []
revlist = ret.stdout.decode('utf-8').splitlines()
# If the revlist contains more than one item, pass it to git rev-list to list
# each commit individually.
if len(revlist) > 1:
ret = subprocess.run(['git', 'rev-list', *revlist], stdout=subprocess.PIPE)
revlist = ret.stdout.decode('utf-8').splitlines()
revlist.reverse()
return [Commit(x) for x in revlist]
def git_top_level():
"""Get the absolute path of the git top-level directory."""
ret = subprocess.run(['git', 'rev-parse', '--show-toplevel'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if ret.returncode != 0:
print(ret.stderr.decode('utf-8').splitlines()[0])
return None
return ret.stdout.decode('utf-8').strip()
def main(argv):
# Parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--checkers', '-c', type=str,
help='Specify which checkers to run as a comma-separated list. Defaults to all checkers')
parser.add_argument('--staged', '-s', action='store_true',
help='Include the changes in the index. Defaults to False')
parser.add_argument('--amend', '-a', action='store_true',
help='Include changes in the index and the previous patch combined. Defaults to False')
parser.add_argument('revision_range', type=str, default=None, nargs='?',
help='Revision range (as defined by git rev-parse). Defaults to HEAD if not specified.')
args = parser.parse_args(argv[1:])
if args.checkers:
args.checkers = args.checkers.split(',')
# Check for required dependencies.
for command, mandatory in dependencies.items():
found = shutil.which(command)
if mandatory and not found:
print('Executable %s not found' % command)
return 1
dependencies[command] = found
# Get the top level directory to pass absolute file names to git diff
# commands, in order to support execution from subdirectories of the git
# tree.
top_level = git_top_level()
if top_level is None:
return 1
commits = []
if args.staged:
commits.append(StagedChanges())
if args.amend:
commits.append(Amendment())
# If none of --staged or --amend was passed
if len(commits) == 0:
# And no revisions were passed, then default to HEAD
if not args.revision_range:
args.revision_range = 'HEAD'
if args.revision_range:
commits += extract_commits(args.revision_range)
issues = 0
for commit in commits:
issues += check_style(top_level, commit, args.checkers)
print('')
if issues:
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipu3/ipu3-capture.sh | #!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2018, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Capture raw frames from cameras based on the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# executable from the system-wide path or from the local directory:
#
# - media-ctl (from v4l-utils git://linuxtv.org/v4l-utils.git)
# - raw2pnm (from nvt https://github.com/intel/nvt.git)
# - yavta (from git://git.ideasonboard.org/yavta.git)
# Locate the sensor entity
find_sensor() {
local bus
local sensor_name=$1
bus=$(grep "$sensor_name" /sys/class/video4linux/v4l-subdev*/name | cut -d ' ' -f 2)
if [[ -z $bus ]]; then
echo "Sensor '$sensor_name' not found." >&2
exit 1
fi
echo "$sensor_name $bus"
}
# Locate the media device
find_media_device() {
local mdev
for mdev in /dev/media* ; do
media-ctl -d $mdev -p | grep -q "^driver[ \t]*ipu3-cio2$" && break
mdev=
done
if [[ -z $mdev ]] ; then
echo "IPU3 media device not found." >&2
exit 1
fi
echo $mdev
}
# Locate the CSI2 and CIO2 and get the sensor format
parse_pipeline() {
local cio2_queue
local resolution
local sensor=$1
read cio2_queue bus_format sensor_size <<< $($mediactl -p | awk "
/^- entity [0-9]*:/ {
sensor=0;
}
/^- entity [0-9]*: $sensor/ {
sensor=1;
}
/^[ \t]*(stream:0)?\[fmt:/ {
if (sensor) {
gsub(\".*fmt:\", \"\");
gsub(\"[] ].*\", \"\");
sub(\"/\", \" \");
sub(\"@[0-9]+/[0-9]+\", \"\");
format=\$0;
}
}
/^[ \t]*->/ {
if (sensor)
cio2=substr(\$3, 0, 1);
}
END {
print cio2 \" \" format;
}
")
ipu3_csi2="ipu3-csi2 $cio2_queue"
ipu3_capture="ipu3-cio2 $cio2_queue"
sensor_width=$(echo $sensor_size | cut -d 'x' -f 1)
sensor_height=$(echo $sensor_size | cut -d 'x' -f 2)
echo "Using device $mdev with IPU3 CIO2 queue $cio2_queue"
}
# Configure the pipeline
configure_pipeline() {
local format="fmt:$bus_format/$sensor_size"
echo "Configuring pipeline for $sensor in $format"
$mediactl -r
$mediactl -l "\"$sensor\":0 -> \"$ipu3_csi2\":0[1]"
$mediactl -l "\"$ipu3_csi2\":1 -> \"$ipu3_capture\":0[1]"
$mediactl -V "\"$sensor\":0 [$format]"
$mediactl -V "\"$ipu3_csi2\":1 [$format]"
}
# Capture frames
capture_frames() {
local file_op
local frame_count=$1
local ipu3_format=IPU3_${bus_format/_1X10/}
local save_file=$2
if [[ $save_file -eq 1 ]]; then
file_op="--file=/tmp/frame-#.bin"
fi
yavta -c$frame_count -n5 -I -f $ipu3_format -s $sensor_size $file_op \
$($mediactl -e "$ipu3_capture")
}
# Convert captured files to ppm
convert_files() {
local frame_count=$1
local format=${bus_format/_1X10/}
local padded_width=$(expr \( $sensor_width + 49 \) / 50 \* 50)
echo "Converting ${sensor_width}x${sensor_height} (${padded_width}x${sensor_height})"
for i in `seq -f '%06.0f' 0 $(($frame_count - 1))`; do
ipu3-unpack /tmp/frame-$i.bin /tmp/frame-$i.raw
raw2pnm -x$padded_width -y$sensor_height -f$format /tmp/frame-$i.raw /tmp/frame-$i.ppm
done
}
# Print usage message
usage() {
echo "Usage: $1 [options] sensor-name"
echo "Supported options:"
echo "-c,--count n Number of frame to capture"
echo "--no-save Do not save captured frames to disk"
}
# Parse command line arguments
frame_count=10
save_file=1
while (( "$#" )) ; do
case $1 in
-c|--count)
frame_count=$2
shift 2
;;
--no-save)
save_file=0
shift
;;
-*)
echo "Unsupported option $1" >&2
usage $0
exit 1
;;
*)
break
;;
esac
done
if [[ $# -ne 1 ]] ; then
usage $0
exit 1
fi
sensor_name=$1
sensor=$(find_sensor $sensor_name) || exit
mdev=$(find_media_device) || exit
mediactl="media-ctl -d $mdev"
parse_pipeline $sensor
configure_pipeline
capture_frames $frame_count $save_file
[[ $save_file -eq 1 ]] && convert_files $frame_count
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipu3/ipu3-unpack.c | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ipu3-unpack - Unpack IPU3 raw Bayer format to 16-bit Bayer
*
* Copyright 2018 Laurent Pinchart <[email protected]>
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
static void usage(char *argv0)
{
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Unpack the IPU3 raw Bayer format to 16-bit Bayer\n");
}
int main(int argc, char *argv[])
{
int in_fd;
int out_fd;
int ret;
if (argc != 3) {
usage(argv[0]);
return 1;
}
in_fd = open(argv[1], O_RDONLY);
if (in_fd == -1) {
fprintf(stderr, "Failed to open input file '%s': %s\n",
argv[1], strerror(errno));
return 1;
}
out_fd = open(argv[2], O_WRONLY | O_TRUNC | O_CREAT, 0644);
if (out_fd == -1) {
fprintf(stderr, "Failed to open output file '%s': %s\n",
argv[2], strerror(errno));
return 1;
}
while (1) {
uint8_t in_data[32];
uint8_t out_data[50];
unsigned int i;
ret = read(in_fd, in_data, 32);
if (ret == -1) {
fprintf(stderr, "Failed to read input data: %s\n",
strerror(errno));
goto done;
}
if (ret < 32) {
if (ret != 0)
fprintf(stderr, "%u bytes of stray data at end of input\n",
ret);
break;
}
for (i = 0; i < 25; ++i) {
unsigned int index = (i * 10) / 8;
unsigned int lsb_shift = (i * 10) % 8;
unsigned int msb_shift = 8 - lsb_shift;
uint16_t pixel;
pixel = ((in_data[index+1] << msb_shift) & 0x3ff)
| ((in_data[index+0] >> lsb_shift) & 0x3ff);
out_data[i*2+0] = (pixel >> 0) & 0xff;
out_data[i*2+1] = (pixel >> 8) & 0xff;
}
ret = write(out_fd, out_data, 50);
if (ret == -1) {
fprintf(stderr, "Failed to write output data: %s\n",
strerror(errno));
goto done;
}
}
done:
close(in_fd);
close(out_fd);
return ret ? 1 : 0;
}
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipu3/ipu3-pack.c | /* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* ipu3-pack - Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats
*
* Copyright 2022 Umang Jain <[email protected]>
*/
#define _GNU_SOURCE
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
static void usage(char *argv0)
{
printf("Usage: %s input-file output-file\n", basename(argv0));
printf("Convert unpacked RAW10 Bayer data to the IPU3 packed Bayer formats\n");
printf("If the output-file '-', output data will be written to standard output\n");
}
int main(int argc, char *argv[])
{
int in_fd;
int out_fd;
int ret;
if (argc != 3) {
usage(argv[0]);
return 1;
}
in_fd = open(argv[1], O_RDONLY);
if (in_fd == -1) {
fprintf(stderr, "Failed to open input file '%s': %s\n",
argv[1], strerror(errno));
return 1;
}
if (strcmp(argv[2], "-") == 0) {
out_fd = STDOUT_FILENO;
} else {
out_fd = open(argv[2], O_WRONLY | O_TRUNC | O_CREAT, 0644);
if (out_fd == -1) {
fprintf(stderr, "Failed to open output file '%s': %s\n",
argv[2], strerror(errno));
close(in_fd);
return 1;
}
}
while (1) {
uint16_t in_data[25];
uint8_t out_data[32];
unsigned int i;
ret = read(in_fd, in_data, sizeof(in_data));
if (ret < 0) {
fprintf(stderr, "Failed to read input data: %s\n",
strerror(errno));
goto done;
}
if ((unsigned)ret < sizeof(in_data)) {
if (ret != 0)
fprintf(stderr, "%u bytes of stray data at end of input\n",
ret);
goto done;
}
for (i = 0; i < 30; ++i) {
unsigned int index = (i * 8) / 10;
unsigned int msb_shift = (i * 8) % 10;
unsigned int lsb_shift = 10 - msb_shift;
out_data[i] = ((in_data[index] >> msb_shift) & 0xff)
| ((in_data[index+1] << lsb_shift) & 0xff);
}
out_data[30] = (in_data[24] >> 0) & 0xff;
out_data[31] = (in_data[24] >> 8) & 0x03;
ret = write(out_fd, out_data, sizeof(out_data));
if (ret < 0) {
fprintf(stderr, "Failed to write output data: %s\n",
strerror(errno));
goto done;
}
}
done:
close(in_fd);
if (out_fd != STDOUT_FILENO)
close(out_fd);
return ret ? 1 : 0;
}
|
0 | repos/libcamera/utils | repos/libcamera/utils/ipu3/ipu3-process.sh | #!/bin/bash
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2018, Google Inc.
#
# Author: Laurent Pinchart <[email protected]>
#
# Process raw frames with the Intel IPU3
#
# The scripts makes use of the following tools, which are expected to be
# found in $PATH:
#
# - media-ctl (from v4l-utils git://linuxtv.org/v4l-utils.git)
# - raw2pnm (from nvt https://github.com/intel/nvt.git)
# - yavta (from git://git.ideasonboard.org/yavta.git)
imgu_entity="ipu3-imgu 0"
# Locate the media device
find_media_device() {
local mdev
for mdev in /dev/media* ; do
media-ctl -d $mdev -p | grep -q "^driver[ \t]*ipu3-imgu$" && break
mdev=
done
if [[ -z $mdev ]] ; then
echo "IPU3 media device not found." >&2
exit 1
fi
echo $mdev
}
# Configure the pipeline
configure_pipeline() {
local enable_3a=1
local enable_out=1
local enable_vf=1
local mode=0
# Configure the links
$mediactl -r
$mediactl -l "\"$imgu_entity input\":0 -> \"$imgu_entity\":0[1]"
$mediactl -l "\"$imgu_entity\":2 -> \"$imgu_entity output\":0[$enable_out]"
$mediactl -l "\"$imgu_entity\":3 -> \"$imgu_entity viewfinder\":0[$enable_vf]"
$mediactl -l "\"$imgu_entity\":4 -> \"$imgu_entity 3a stat\":0[$enable_3a]"
# Select processing mode (0 for video, 1 for still image)
yavta --no-query -w "0x009819c1 $mode" $($mediactl -e "$imgu_entity")
# Set formats. The media bus code doesn't matter as it is ignored by the
# driver. We should use the FIXED format, but media-ctl doesn't support
# it.
$mediactl -V "\"$imgu_entity\":0 [fmt:SBGGR10_1X10/$out_size crop:(0,0)/$in_size compose:(0,0)/$in_size]"
$mediactl -V "\"$imgu_entity\":2 [fmt:SBGGR10_1X10/$out_size]"
$mediactl -V "\"$imgu_entity\":3 [fmt:SBGGR10_1X10/$vf_size]"
$mediactl -V "\"$imgu_entity\":4 [fmt:SBGGR10_1X10/$out_size]"
}
# Perform frame processing through the IMGU
process_frames() {
configure_pipeline
local yavta="yavta -n $nbufs -c$frame_count"
# Save the main and viewfinder outputs to disk, capture and drop 3A
# statistics. Sleep 500ms between each execution of yavta to keep the
# stdout messages readable.
$yavta -f $IMGU_OUT_PIXELFORMAT -s $out_size "-F$output_dir/frame-out-#.bin" \
$($mediactl -e "$imgu_entity output") &
sleep 0.5
$yavta -f $IMGU_VF_PIXELFORMAT -s $vf_size "-F$output_dir/frame-vf-#.bin" \
$($mediactl -e "$imgu_entity viewfinder") &
sleep 0.5
$yavta $($mediactl -e "$imgu_entity 3a stat") &
sleep 0.5
# Feed the IMGU input.
$yavta -f $IMGU_IN_PIXELFORMAT -s $in_size "-F$in_file" \
$($mediactl -e "$imgu_entity input")
}
# Convert captured files to ppm
convert_files() {
local index=$1
local type=$2
local size=$3
local format=$4
local width=$(echo $size | awk -F 'x' '{print $1}')
local height=$(echo $size | awk -F 'x' '{print $2}')
local padded_width=$(expr $(expr $width + 63) / 64 \* 64)
raw2pnm -x$padded_width -y$height -f$format \
$output_dir/frame-$type-$index.bin \
$output_dir/frame-$type-$index.ppm
}
run_test() {
IMGU_IN_PIXELFORMAT=IPU3_SGRBG10
IMGU_OUT_PIXELFORMAT=NV12
IMGU_VF_PIXELFORMAT=NV12
echo "==== Test ===="
echo "input: $in_file"
echo "output: $IMGU_OUT_PIXELFORMAT/$out_size"
echo "vf: $IMGU_VF_PIXELFORMAT/$vf_size"
process_frames
for i in `seq -f '%06.0f' 0 $(($frame_count - 1))`; do
convert_files $i out $out_size $IMGU_OUT_PIXELFORMAT
convert_files $i vf $vf_size $IMGU_VF_PIXELFORMAT
done
}
validate_size() {
local size=$1
local width=$(echo $size | awk -F 'x' '{print $1}')
local height=$(echo $size | awk -F 'x' '{print $2}')
[[ "x${size}" == "x${width}x${height}" ]]
}
# Print usage message
usage() {
echo "Usage: $(basename $1) [options] <input-file>"
echo "Supported options:"
echo "--out size output frame size (defaults to input size)"
echo "--vf size viewfinder frame size (defaults to input size)"
echo ""
echo "Where the input file name and size are"
echo ""
echo "input-file = prefix '-' width 'x' height '.' extension"
echo "size = width 'x' height"
}
# Parse command line arguments
while (( "$#" )) ; do
case $1 in
--out)
out_size=$2
if ! validate_size $out_size ; then
echo "Invalid size '$out_size'"
usage $0
exit 1
fi
shift 2
;;
--vf)
vf_size=$2
if ! validate_size $vf_size ; then
echo "Invalid size '$vf_size'"
usage $0
exit 1
fi
shift 2
;;
-*)
echo "Unsupported option $1" >&2
usage $0
exit 1
;;
*)
break
;;
esac
done
if [ $# != 1 ] ; then
usage $0
exit 1
fi
in_file=$1
# Parse the size from the input file name and perform minimal sanity
# checks.
in_size=$(echo $in_file | sed 's/.*-\([0-9]*\)x\([0-9]*\)\.[a-z0-9]*$/\1x\2/')
validate_size $in_size
if [[ $? != 0 ]] ; then
echo "Invalid input file name $in_file" >&2
usage $0
exit 1
fi
out_size=${out_size:-$in_size}
vf_size=${vf_size:-$in_size}
mdev=$(find_media_device) || exit
mediactl="media-ctl -d $mdev"
echo "Using device $mdev"
output_dir="/tmp"
frame_count=5
nbufs=7
run_test
|
0 | repos/libcamera/utils | repos/libcamera/utils/tuning/raspberrypi_alsc_only.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Tuning script for raspberrypi, ALSC only
import sys
import libtuning as lt
from libtuning.parsers import RaspberryPiParser
from libtuning.generators import RaspberryPiOutput
from raspberrypi.alsc import ALSC
tuner = lt.Tuner('Raspberry Pi (ALSC only)')
tuner.add(ALSC)
tuner.set_input_parser(RaspberryPiParser())
tuner.set_output_formatter(RaspberryPiOutput())
tuner.set_output_order([ALSC])
if __name__ == '__main__':
sys.exit(tuner.run(sys.argv))
|
0 | repos/libcamera/utils | repos/libcamera/utils/tuning/rkisp1.py | #!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Tuning script for rkisp1
import sys
import libtuning as lt
from libtuning.parsers import YamlParser
from libtuning.generators import YamlOutput
from libtuning.modules.lsc import LSCRkISP1
from libtuning.modules.agc import AGCRkISP1
tuner = lt.Tuner('RkISP1')
tuner.add(LSCRkISP1(
debug=[lt.Debug.Plot],
# This is for the actual LSC tuning, and is part of the base LSC
# module. rkisp1's table sector sizes (16x16 programmed as mirrored
# 8x8) are separate, and is hardcoded in its specific LSC tuning
# module.
sector_shape=(17, 17),
sector_x_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
sector_y_gradient=lt.gradient.Linear(lt.Remainder.DistributeFront),
# This is the function that will be used to average the pixels in
# each sector. This can also be a custom function.
sector_average_function=lt.average.Mean(),
# This is the function that will be used to smooth the color ratio
# values. This can also be a custom function.
smoothing_function=lt.smoothing.MedianBlur(3),
))
tuner.add(AGCRkISP1(debug=[lt.Debug.Plot]))
tuner.set_input_parser(YamlParser())
tuner.set_output_formatter(YamlOutput())
tuner.set_output_order([AGCRkISP1, LSCRkISP1])
if __name__ == '__main__':
sys.exit(tuner.run(sys.argv))
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/macbeth.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# Locate and extract Macbeth charts from images
# (Copied from: ctt_macbeth_locator.py)
# \todo Add debugging
import cv2
import os
from pathlib import Path
import numpy as np
from libtuning.image import Image
# Reshape image to fixed width without distorting returns image and scale
# factor
def reshape(img, width):
factor = width / img.shape[0]
return cv2.resize(img, None, fx=factor, fy=factor), factor
# Correlation function to quantify match
def correlate(im1, im2):
f1 = im1.flatten()
f2 = im2.flatten()
cor = np.corrcoef(f1, f2)
return cor[0][1]
# @brief Compute coordinates of macbeth chart vertices and square centres
# @return (max_cor, best_map_col_norm, fit_coords, success)
#
# Also returns an error/success message for debugging purposes. Additionally,
# it scores the match with a confidence value.
#
# Brief explanation of the macbeth chart locating algorithm:
# - Find rectangles within image
# - Take rectangles within percentage offset of median perimeter. The
# assumption is that these will be the macbeth squares
# - For each potential square, find the 24 possible macbeth centre locations
# that would produce a square in that location
# - Find clusters of potential macbeth chart centres to find the potential
# macbeth centres with the most votes, i.e. the most likely ones
# - For each potential macbeth centre, use the centres of the squares that
# voted for it to find macbeth chart corners
# - For each set of corners, transform the possible match into normalised
# space and correlate with a reference chart to evaluate the match
# - Select the highest correlation as the macbeth chart match, returning the
# correlation as the confidence score
#
# \todo Clean this up
def get_macbeth_chart(img, ref_data):
ref, ref_w, ref_h, ref_corns = ref_data
# The code will raise and catch a MacbethError in case of a problem, trying
# to give some likely reasons why the problem occured, hence the try/except
try:
# Obtain image, convert to grayscale and normalise
src = img
src, factor = reshape(src, 200)
original = src.copy()
a = 125 / np.average(src)
src_norm = cv2.convertScaleAbs(src, alpha=a, beta=0)
# This code checks if there are seperate colour channels. In the past the
# macbeth locator ran on jpgs and this makes it robust to different
# filetypes. Note that running it on a jpg has 4x the pixels of the
# average bayer channel so coordinates must be doubled.
# This is best done in img_load.py in the get_patches method. The
# coordinates and image width, height must be divided by two if the
# macbeth locator has been run on a demosaicked image.
if len(src_norm.shape) == 3:
src_bw = cv2.cvtColor(src_norm, cv2.COLOR_BGR2GRAY)
else:
src_bw = src_norm
original_bw = src_bw.copy()
# Obtain image edges
sigma = 2
src_bw = cv2.GaussianBlur(src_bw, (0, 0), sigma)
t1, t2 = 50, 100
edges = cv2.Canny(src_bw, t1, t2)
# Dilate edges to prevent self-intersections in contours
k_size = 2
kernel = np.ones((k_size, k_size))
its = 1
edges = cv2.dilate(edges, kernel, iterations=its)
# Find contours in image
conts, _ = cv2.findContours(edges, cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo contours found in image\n'
'Possible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
)
# Find quadrilateral contours
epsilon = 0.07
conts_per = []
for i in range(len(conts)):
per = cv2.arcLength(conts[i], True)
poly = cv2.approxPolyDP(conts[i], epsilon * per, True)
if len(poly) == 4 and cv2.isContourConvex(poly):
conts_per.append((poly, per))
if len(conts_per) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo quadrilateral contours found'
'\nPossible problems:\n'
'- Macbeth chart is too dark or bright\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is out of camera plane\n'
)
# Sort contours by perimeter and get perimeters within percent of median
conts_per = sorted(conts_per, key=lambda x: x[1])
med_per = conts_per[int(len(conts_per) / 2)][1]
side = med_per / 4
perc = 0.1
med_low, med_high = med_per * (1 - perc), med_per * (1 + perc)
squares = []
for i in conts_per:
if med_low <= i[1] and med_high >= i[1]:
squares.append(i[0])
# Obtain coordinates of nomralised macbeth and squares
square_verts, mac_norm = get_square_verts(0.06)
# For each square guess, find 24 possible macbeth chart centres
mac_mids = []
squares_raw = []
for i in range(len(squares)):
square = squares[i]
squares_raw.append(square)
# Convert quads to rotated rectangles. This is required as the
# 'squares' are usually quite irregular quadrilaterls, so
# performing a transform would result in exaggerated warping and
# inaccurate macbeth chart centre placement
rect = cv2.minAreaRect(square)
square = cv2.boxPoints(rect).astype(np.float32)
# Reorder vertices to prevent 'hourglass shape'
square = sorted(square, key=lambda x: x[0])
square_1 = sorted(square[:2], key=lambda x: x[1])
square_2 = sorted(square[2:], key=lambda x: -x[1])
square = np.array(np.concatenate((square_1, square_2)), np.float32)
square = np.reshape(square, (4, 2)).astype(np.float32)
squares[i] = square
# Find 24 possible macbeth chart centres by trasnforming normalised
# macbeth square vertices onto candidate square vertices found in image
for j in range(len(square_verts)):
verts = square_verts[j]
p_mat = cv2.getPerspectiveTransform(verts, square)
mac_guess = cv2.perspectiveTransform(mac_norm, p_mat)
mac_guess = np.round(mac_guess).astype(np.int32)
mac_mid = np.mean(mac_guess, axis=1)
mac_mids.append([mac_mid, (i, j)])
if len(mac_mids) == 0:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNo possible macbeth charts found within image'
'\nPossible problems:\n'
'- Part of the macbeth chart is outside the image\n'
'- Quadrilaterals in image background\n'
)
# Reshape data
for i in range(len(mac_mids)):
mac_mids[i][0] = mac_mids[i][0][0]
# Find where midpoints cluster to identify most likely macbeth centres
clustering = cluster.AgglomerativeClustering(
n_clusters=None,
compute_full_tree=True,
distance_threshold=side * 2
)
mac_mids_list = [x[0] for x in mac_mids]
if len(mac_mids_list) == 1:
# Special case of only one valid centre found (probably not needed)
clus_list = []
clus_list.append([mac_mids, len(mac_mids)])
else:
clustering.fit(mac_mids_list)
# Create list of all clusters
clus_list = []
if clustering.n_clusters_ > 1:
for i in range(clustering.labels_.max() + 1):
indices = [j for j, x in enumerate(clustering.labels_) if x == i]
clus = []
for index in indices:
clus.append(mac_mids[index])
clus_list.append([clus, len(clus)])
clus_list.sort(key=lambda x: -x[1])
elif clustering.n_clusters_ == 1:
# Special case of only one cluster found
clus_list.append([mac_mids, len(mac_mids)])
else:
raise MacbethError(
'\nWARNING: No macebth chart found!'
'\nNo clusters found'
'\nPossible problems:\n'
'- NA\n'
)
# Keep only clusters with enough votes
clus_len_max = clus_list[0][1]
clus_tol = 0.7
for i in range(len(clus_list)):
if clus_list[i][1] < clus_len_max * clus_tol:
clus_list = clus_list[:i]
break
cent = np.mean(clus_list[i][0], axis=0)[0]
clus_list[i].append(cent)
# Get centres of each normalised square
reference = get_square_centres(0.06)
# For each possible macbeth chart, transform image into
# normalised space and find correlation with reference
max_cor = 0
best_map = None
best_fit = None
best_cen_fit = None
best_ref_mat = None
for clus in clus_list:
clus = clus[0]
sq_cents = []
ref_cents = []
i_list = [p[1][0] for p in clus]
for point in clus:
i, j = point[1]
# Remove any square that voted for two different points within
# the same cluster. This causes the same point in the image to be
# mapped to two different reference square centres, resulting in
# a very distorted perspective transform since cv2.findHomography
# simply minimises error.
# This phenomenon is not particularly likely to occur due to the
# enforced distance threshold in the clustering fit but it is
# best to keep this in just in case.
if i_list.count(i) == 1:
square = squares_raw[i]
sq_cent = np.mean(square, axis=0)
ref_cent = reference[j]
sq_cents.append(sq_cent)
ref_cents.append(ref_cent)
# At least four squares need to have voted for a centre in
# order for a transform to be found
if len(sq_cents) < 4:
raise MacbethError(
'\nWARNING: No macbeth chart found!'
'\nNot enough squares found'
'\nPossible problems:\n'
'- Macbeth chart is occluded\n'
'- Macbeth chart is too dark of bright\n'
)
ref_cents = np.array(ref_cents)
sq_cents = np.array(sq_cents)
# Find best fit transform from normalised centres to image
h_mat, mask = cv2.findHomography(ref_cents, sq_cents)
if 'None' in str(type(h_mat)):
raise MacbethError(
'\nERROR\n'
)
# Transform normalised corners and centres into image space
mac_fit = cv2.perspectiveTransform(mac_norm, h_mat)
mac_cen_fit = cv2.perspectiveTransform(np.array([reference]), h_mat)
# Transform located corners into reference space
ref_mat = cv2.getPerspectiveTransform(
mac_fit,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
# Normalise brigthness
a = 125 / np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
# Find correlation with bw reference macbeth
cor = correlate(map_to_ref, ref)
# Keep only if best correlation
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit
best_cen_fit = mac_cen_fit
best_ref_mat = ref_mat
# Rotate macbeth by pi and recorrelate in case macbeth chart is
# upside-down
mac_fit_inv = np.array(
([[mac_fit[0][2], mac_fit[0][3],
mac_fit[0][0], mac_fit[0][1]]])
)
mac_cen_fit_inv = np.flip(mac_cen_fit, axis=1)
ref_mat = cv2.getPerspectiveTransform(
mac_fit_inv,
np.array([ref_corns])
)
map_to_ref = cv2.warpPerspective(
original_bw, ref_mat,
(ref_w, ref_h)
)
a = 125 / np.average(map_to_ref)
map_to_ref = cv2.convertScaleAbs(map_to_ref, alpha=a, beta=0)
cor = correlate(map_to_ref, ref)
if cor > max_cor:
max_cor = cor
best_map = map_to_ref
best_fit = mac_fit_inv
best_cen_fit = mac_cen_fit_inv
best_ref_mat = ref_mat
# Check best match is above threshold
cor_thresh = 0.6
if max_cor < cor_thresh:
raise MacbethError(
'\nWARNING: Correlation too low'
'\nPossible problems:\n'
'- Bad lighting conditions\n'
'- Macbeth chart is occluded\n'
'- Background is too noisy\n'
'- Macbeth chart is out of camera plane\n'
)
# Represent coloured macbeth in reference space
best_map_col = cv2.warpPerspective(
original, best_ref_mat, (ref_w, ref_h)
)
best_map_col = cv2.resize(
best_map_col, None, fx=4, fy=4
)
a = 125 / np.average(best_map_col)
best_map_col_norm = cv2.convertScaleAbs(
best_map_col, alpha=a, beta=0
)
# Rescale coordinates to original image size
fit_coords = (best_fit / factor, best_cen_fit / factor)
return (max_cor, best_map_col_norm, fit_coords, True)
# Catch macbeth errors and continue with code
except MacbethError as error:
eprint(error)
return (0, None, None, False)
def find_macbeth(img, mac_config):
small_chart = mac_config['small']
show = mac_config['show']
# Catch the warnings
warnings.simplefilter("ignore")
warnings.warn("runtime", RuntimeWarning)
# Reference macbeth chart is created that will be correlated with the
# located macbeth chart guess to produce a confidence value for the match.
script_dir = Path(os.path.realpath(os.path.dirname(__file__)))
macbeth_ref_path = script_dir.joinpath('macbeth_ref.pgm')
ref = cv2.imread(str(macbeth_ref_path), flags=cv2.IMREAD_GRAYSCALE)
ref_w = 120
ref_h = 80
rc1 = (0, 0)
rc2 = (0, ref_h)
rc3 = (ref_w, ref_h)
rc4 = (ref_w, 0)
ref_corns = np.array((rc1, rc2, rc3, rc4), np.float32)
ref_data = (ref, ref_w, ref_h, ref_corns)
# Locate macbeth chart
cor, mac, coords, ret = get_macbeth_chart(img, ref_data)
# Following bits of code try to fix common problems with simple techniques.
# If now or at any point the best correlation is of above 0.75, then
# nothing more is tried as this is a high enough confidence to ensure
# reliable macbeth square centre placement.
for brightness in [2, 4]:
if cor >= 0.75:
break
img_br = cv2.convertScaleAbs(img, alpha=brightness, beta=0)
cor_b, mac_b, coords_b, ret_b = get_macbeth_chart(img_br, ref_data)
if cor_b > cor:
cor, mac, coords, ret = cor_b, mac_b, coords_b, ret_b
# In case macbeth chart is too small, take a selection of the image and
# attempt to locate macbeth chart within that. The scale increment is
# root 2
# These variables will be used to transform the found coordinates at
# smaller scales back into the original. If ii is still -1 after this
# section that means it was not successful
ii = -1
w_best = 0
h_best = 0
d_best = 100
# d_best records the scale of the best match. Macbeth charts are only looked
# for at one scale increment smaller than the current best match in order to avoid
# unecessarily searching for macbeth charts at small scales.
# If a macbeth chart ha already been found then set d_best to 0
if cor != 0:
d_best = 0
for index, pair in enumerate([{'sel': 2 / 3, 'inc': 1 / 6},
{'sel': 1 / 2, 'inc': 1 / 8},
{'sel': 1 / 3, 'inc': 1 / 12},
{'sel': 1 / 4, 'inc': 1 / 16}]):
if cor >= 0.75:
break
# Check if we need to check macbeth charts at even smaller scales. This
# slows the code down significantly and has therefore been omitted by
# default, however it is not unusably slow so might be useful if the
# macbeth chart is too small to be picked up to by the current
# subselections. Use this for macbeth charts with side lengths around
# 1/5 image dimensions (and smaller...?) it is, however, recommended
# that macbeth charts take up as large as possible a proportion of the
# image.
if index >= 2 and (not small_chart or d_best <= index - 1):
break
w, h = list(img.shape[:2])
# Set dimensions of the subselection and the step along each axis
# between selections
w_sel = int(w * pair['sel'])
h_sel = int(h * pair['sel'])
w_inc = int(w * pair['inc'])
h_inc = int(h * pair['inc'])
loop = ((1 - pair['sel']) / pair['inc']) + 1
# For each subselection, look for a macbeth chart
for i in range(loop):
for j in range(loop):
w_s, h_s = i * w_inc, j * h_inc
img_sel = img[w_s:w_s + w_sel, h_s:h_s + h_sel]
cor_ij, mac_ij, coords_ij, ret_ij = get_macbeth_chart(img_sel, ref_data)
# If the correlation is better than the best then record the
# scale and current subselection at which macbeth chart was
# found. Also record the coordinates, macbeth chart and message.
if cor_ij > cor:
cor = cor_ij
mac, coords, ret = mac_ij, coords_ij, ret_ij
ii, jj = i, j
w_best, h_best = w_inc, h_inc
d_best = index + 1
# Transform coordinates from subselection to original image
if ii != -1:
for a in range(len(coords)):
for b in range(len(coords[a][0])):
coords[a][0][b][1] += ii * w_best
coords[a][0][b][0] += jj * h_best
if not ret:
return None
coords_fit = coords
if cor < 0.75:
eprint(f'Warning: Low confidence {cor:.3f} for macbeth chart in {img.path.name}')
if show:
draw_macbeth_results(img, coords_fit)
return coords_fit
def locate_macbeth(image: Image, config: dict):
# Find macbeth centres
av_chan = (np.mean(np.array(image.channels), axis=0) / (2**16))
av_val = np.mean(av_chan)
if av_val < image.blacklevel_16 / (2**16) + 1 / 64:
eprint(f'Image {image.path.name} too dark')
return None
macbeth = find_macbeth(av_chan, config['general']['macbeth'])
if macbeth is None:
eprint(f'No macbeth chart found in {image.path.name}')
return None
mac_cen_coords = macbeth[1]
if not image.get_patches(mac_cen_coords):
eprint(f'Macbeth patches have saturated in {image.path.name}')
return None
return macbeth
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
from libtuning.utils import *
from libtuning.libtuning import *
from libtuning.image import *
from libtuning.macbeth import *
from libtuning.average import *
from libtuning.gradient import *
from libtuning.smoothing import *
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/image.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
#
# Container for an image and associated metadata
import binascii
import numpy as np
from pathlib import Path
import pyexiv2 as pyexif
import rawpy as raw
import re
import libtuning as lt
import libtuning.utils as utils
class Image:
def __init__(self, path: Path):
self.path = path
self.lsc_only = False
self.color = -1
self.lux = -1
try:
self._load_metadata_exif()
except Exception as e:
utils.eprint(f'Failed to load metadata from {self.path}: {e}')
raise e
try:
self._read_image_dng()
except Exception as e:
utils.eprint(f'Failed to load image data from {self.path}: {e}')
raise e
@property
def name(self):
return self.path.name
# May raise KeyError as there are too many to check
def _load_metadata_exif(self):
# RawPy doesn't load all the image tags that we need, so we use py3exiv2
metadata = pyexif.ImageMetadata(str(self.path))
metadata.read()
# The DNG and TIFF/EP specifications use different IFDs to store the
# raw image data and the Exif tags. DNG stores them in a SubIFD and in
# an Exif IFD respectively (named "SubImage1" and "Photo" by pyexiv2),
# while TIFF/EP stores them both in IFD0 (name "Image"). Both are used
# in "DNG" files, with libcamera-apps following the DNG recommendation
# and applications based on picamera2 following TIFF/EP.
#
# This code detects which tags are being used, and therefore extracts the
# correct values.
try:
self.w = metadata['Exif.SubImage1.ImageWidth'].value
subimage = 'SubImage1'
photo = 'Photo'
except KeyError:
self.w = metadata['Exif.Image.ImageWidth'].value
subimage = 'Image'
photo = 'Image'
self.pad = 0
self.h = metadata[f'Exif.{subimage}.ImageLength'].value
white = metadata[f'Exif.{subimage}.WhiteLevel'].value
self.sigbits = int(white).bit_length()
self.fmt = (self.sigbits - 4) // 2
self.exposure = int(metadata[f'Exif.{photo}.ExposureTime'].value * 1000000)
self.againQ8 = metadata[f'Exif.{photo}.ISOSpeedRatings'].value * 256 / 100
self.againQ8_norm = self.againQ8 / 256
self.camName = metadata['Exif.Image.Model'].value
self.blacklevel = int(metadata[f'Exif.{subimage}.BlackLevel'].value[0])
self.blacklevel_16 = self.blacklevel << (16 - self.sigbits)
# Channel order depending on bayer pattern
# The key is the order given by exif, where 0 is R, 1 is G, and 2 is B
# The value is the index where the color can be found, where the first
# is R, then G, then G, then B.
bayer_case = {
'0 1 1 2': (lt.Color.R, lt.Color.GR, lt.Color.GB, lt.Color.B),
'1 2 0 1': (lt.Color.GB, lt.Color.R, lt.Color.B, lt.Color.GR),
'2 1 1 0': (lt.Color.B, lt.Color.GB, lt.Color.GR, lt.Color.R),
'1 0 2 1': (lt.Color.GR, lt.Color.R, lt.Color.B, lt.Color.GB)
}
# Note: This needs to be in IFD0
cfa_pattern = metadata[f'Exif.{subimage}.CFAPattern'].value
self.order = bayer_case[cfa_pattern]
def _read_image_dng(self):
raw_im = raw.imread(str(self.path))
raw_data = raw_im.raw_image
shift = 16 - self.sigbits
c0 = np.left_shift(raw_data[0::2, 0::2].astype(np.int64), shift)
c1 = np.left_shift(raw_data[0::2, 1::2].astype(np.int64), shift)
c2 = np.left_shift(raw_data[1::2, 0::2].astype(np.int64), shift)
c3 = np.left_shift(raw_data[1::2, 1::2].astype(np.int64), shift)
self.channels = [c0, c1, c2, c3]
# Reorder the channels into R, GR, GB, B
self.channels = [self.channels[i] for i in self.order]
# \todo Move this to macbeth.py
def get_patches(self, cen_coords, size=16):
saturated = False
# Obtain channel widths and heights
ch_w, ch_h = self.w, self.h
cen_coords = list(np.array((cen_coords[0])).astype(np.int32))
self.cen_coords = cen_coords
# Squares are ordered by stacking macbeth chart columns from left to
# right. Some useful patch indices:
# white = 3
# black = 23
# 'reds' = 9, 10
# 'blues' = 2, 5, 8, 20, 22
# 'greens' = 6, 12, 17
# greyscale = 3, 7, 11, 15, 19, 23
all_patches = []
for ch in self.channels:
ch_patches = []
for cen in cen_coords:
# Macbeth centre is placed at top left of central 2x2 patch to
# account for rounding. Patch pixels are sorted by pixel
# brightness so spatial information is lost.
patch = ch[cen[1] - 7:cen[1] + 9, cen[0] - 7:cen[0] + 9].flatten()
patch.sort()
if patch[-5] == (2**self.sigbits - 1) * 2**(16 - self.sigbits):
saturated = True
ch_patches.append(patch)
all_patches.append(ch_patches)
self.patches = all_patches
return not saturated
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/gradient.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Gradients that can be used to distribute or map numbers
import libtuning as lt
import math
from numbers import Number
# @brief Gradient for how to allocate pixels to sectors
# @description There are no parameters for the gradients as the domain is the
# number of pixels and the range is the number of sectors, and
# there is only one curve that has a startpoint and endpoint at
# (0, 0) and at (#pixels, #sectors). The exception is for curves
# that *do* have multiple solutions for only two points, such as
# gaussian, and curves of higher polynomial orders if we had them.
#
# \todo There will probably be a helper in the Gradient class, as I have a
# feeling that all the other curves (besides Linear and Gaussian) can be
# implemented in the same way.
class Gradient(object):
def __init__(self):
pass
# @brief Distribute pixels into sectors (only in one dimension)
# @param domain Number of pixels
# @param sectors Number of sectors
# @return A list of number of pixels in each sector
def distribute(self, domain: list, sectors: list) -> list:
raise NotImplementedError
# @brief Map a number on a curve
# @param domain Domain of the curve
# @param rang Range of the curve
# @param x Input on the domain of the curve
# @return y from the range of the curve
def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
raise NotImplementedError
class Linear(Gradient):
# @param remainder Mode of handling remainder
def __init__(self, remainder: lt.Remainder = lt.Remainder.Float):
self.remainder = remainder
def distribute(self, domain: list, sectors: list) -> list:
size = domain / sectors
rem = domain % sectors
if rem == 0:
return [int(size)] * sectors
size = math.ceil(size)
rem = domain % size
output_sectors = [int(size)] * (sectors - 1)
if self.remainder == lt.Remainder.Float:
size = domain / sectors
output_sectors = [size] * sectors
elif self.remainder == lt.Remainder.DistributeFront:
output_sectors.append(int(rem))
elif self.remainder == lt.Remainder.DistributeBack:
output_sectors.insert(0, int(rem))
else:
raise ValueError
return output_sectors
def map(self, domain: tuple, rang: tuple, x: Number) -> Number:
m = (rang[1] - rang[0]) / (domain[1] - domain[0])
b = rang[0] - m * domain[0]
return m * x + b
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/average.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Wrapper for numpy averaging functions to enable duck-typing
import numpy as np
# @brief Wrapper for np averaging functions so that they can be duck-typed
class Average(object):
def __init__(self):
pass
def average(self, np_array):
raise NotImplementedError
class Mean(Average):
def average(self, np_array):
return np.mean(np_array)
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/libtuning.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# An infrastructure for camera tuning tools
import argparse
import libtuning as lt
import libtuning.utils as utils
from libtuning.utils import eprint
from enum import Enum, IntEnum
class Color(IntEnum):
R = 0
GR = 1
GB = 2
B = 3
class Debug(Enum):
Plot = 1
# @brief What to do with the leftover pixels after dividing them into ALSC
# sectors, when the division gradient is uniform
# @var Float Force floating point division so all sectors divide equally
# @var DistributeFront Divide the remainder equally (until running out,
# obviously) into the existing sectors, starting from the front
# @var DistributeBack Same as DistributeFront but starting from the back
class Remainder(Enum):
Float = 0
DistributeFront = 1
DistributeBack = 2
# @brief A helper class to contain a default value for a module configuration
# parameter
class Param(object):
# @var Required The value contained in this instance is irrelevant, and the
# value must be provided by the tuning configuration file.
# @var Optional If the value is not provided by the tuning configuration
# file, then the value contained in this instance will be used instead.
# @var Hardcode The value contained in this instance will be used
class Mode(Enum):
Required = 0
Optional = 1
Hardcode = 2
# @param name Name of the parameter. Shall match the name used in the
# configuration file for the parameter
# @param required Whether or not a value is required in the config
# parameter of get_value()
# @param val Default value (only relevant if mode is Optional)
def __init__(self, name: str, required: Mode, val=None):
self.name = name
self.__required = required
self.val = val
def get_value(self, config: dict):
if self.__required is self.Mode.Hardcode:
return self.val
if self.__required is self.Mode.Required and self.name not in config:
raise ValueError(f'Parameter {self.name} is required but not provided in the configuration')
return config[self.name] if self.required else self.val
@property
def required(self):
return self.__required is self.Mode.Required
# @brief Used by libtuning to auto-generate help information for the tuning
# script on the available parameters for the configuration file
# \todo Implement this
@property
def info(self):
raise NotImplementedError
class Tuner(object):
# External functions
def __init__(self, platform_name):
self.name = platform_name
self.modules = []
self.parser = None
self.generator = None
self.output_order = []
self.config = {}
self.output = {}
def add(self, module):
self.modules.append(module)
def set_input_parser(self, parser):
self.parser = parser
def set_output_formatter(self, output):
self.generator = output
def set_output_order(self, modules):
self.output_order = modules
# @brief Convert classes in self.output_order to the instances in self.modules
def _prepare_output_order(self):
output_order = self.output_order
self.output_order = []
for module_type in output_order:
modules = [module for module in self.modules if module.type == module_type.type]
if len(modules) > 1:
eprint(f'Multiple modules found for module type "{module_type.type}"')
return False
if len(modules) < 1:
eprint(f'No module found for module type "{module_type.type}"')
return False
self.output_order.append(modules[0])
return True
# \todo Validate parser and generator at Tuner construction time?
def _validate_settings(self):
if self.parser is None:
eprint('Missing parser')
return False
if self.generator is None:
eprint('Missing generator')
return False
if len(self.modules) == 0:
eprint('No modules added')
return False
if len(self.output_order) != len(self.modules):
eprint('Number of outputs does not match number of modules')
return False
return True
def _process_args(self, argv, platform_name):
parser = argparse.ArgumentParser(description=f'Camera Tuning for {platform_name}')
parser.add_argument('-i', '--input', type=str, required=True,
help='''Directory containing calibration images (required).
Images for ALSC must be named "alsc_{Color Temperature}k_1[u].dng",
and all other images must be named "{Color Temperature}k_{Lux Level}l.dng"''')
parser.add_argument('-o', '--output', type=str, required=True,
help='Output file (required)')
# It is not our duty to scan all modules to figure out their default
# options, so simply return an empty configuration if none is provided.
parser.add_argument('-c', '--config', type=str, default='',
help='Config file (optional)')
# \todo Check if we really need this or if stderr is good enough, or if
# we want a better logging infrastructure with log levels
parser.add_argument('-l', '--log', type=str, default=None,
help='Output log file (optional)')
return parser.parse_args(argv[1:])
def run(self, argv):
args = self._process_args(argv, self.name)
if args is None:
return -1
if not self._validate_settings():
return -1
if not self._prepare_output_order():
return -1
if len(args.config) > 0:
self.config, disable = self.parser.parse(args.config, self.modules)
else:
self.config = {'general': {}}
disable = []
# Remove disabled modules
for module in disable:
if module in self.modules:
self.modules.remove(module)
for module in self.modules:
if not module.validate_config(self.config):
eprint(f'Config is invalid for module {module.type}')
return -1
has_lsc = any(isinstance(m, lt.modules.lsc.LSC) for m in self.modules)
# Only one LSC module allowed
has_only_lsc = has_lsc and len(self.modules) == 1
images = utils.load_images(args.input, self.config, not has_only_lsc, has_lsc)
if images is None or len(images) == 0:
eprint(f'No images were found, or able to load')
return -1
# Do the tuning
for module in self.modules:
out = module.process(self.config, images, self.output)
if out is None:
eprint(f'Module {module.name} failed to process, aborting')
break
self.output[module] = out
self.generator.write(args.output, self.output, self.output_order)
return 0
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/utils.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Utilities for libtuning
import decimal
import math
import numpy as np
import os
from pathlib import Path
import re
import sys
import libtuning as lt
from libtuning.image import Image
from libtuning.macbeth import locate_macbeth
# Utility functions
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_module_by_type_name(modules, name):
for module in modules:
if module.type == name:
return module
return None
# Private utility functions
def _list_image_files(directory):
d = Path(directory)
files = [d.joinpath(f) for f in os.listdir(d)
if re.search(r'\.(jp[e]g$)|(dng$)', f)]
files.sort()
return files
def _parse_image_filename(fn: Path):
result = re.search(r'^(alsc_)?(\d+)[kK]_(\d+)?[lLuU]?.\w{3,4}$', fn.name)
if result is None:
eprint(f'The file name of {fn.name} is incorrectly formatted')
return None, None, None
color = int(result.group(2))
lsc_only = result.group(1) is not None
lux = None if lsc_only else int(result.group(3))
return color, lux, lsc_only
# \todo Implement this from check_imgs() in ctt.py
def _validate_images(images):
return True
# Public utility functions
# @brief Load images into a single list of Image instances
# @param input_dir Directory from which to load image files
# @param config Configuration dictionary
# @param load_nonlsc Whether or not to load non-lsc images
# @param load_lsc Whether or not to load lsc-only images
# @return A list of Image instances
def load_images(input_dir: str, config: dict, load_nonlsc: bool, load_lsc: bool) -> list:
files = _list_image_files(input_dir)
if len(files) == 0:
eprint(f'No images found in {input_dir}')
return None
images = []
for f in files:
color, lux, lsc_only = _parse_image_filename(f)
if color is None:
continue
# Skip lsc image if we don't need it
if lsc_only and not load_lsc:
eprint(f'Skipping {f.name} as this tuner has no LSC module')
continue
# Skip non-lsc image if we don't need it
if not lsc_only and not load_nonlsc:
eprint(f'Skipping {f.name} as this tuner only has an LSC module')
continue
# Load image
try:
image = Image(f)
except Exception as e:
eprint(f'Failed to load image {f.name}: {e}')
continue
# Populate simple fields
image.lsc_only = lsc_only
image.color = color
image.lux = lux
# Black level comes from the TIFF tags, but they are overridable by the
# config file.
if 'blacklevel' in config['general']:
image.blacklevel_16 = config['general']['blacklevel']
if lsc_only:
images.append(image)
continue
# Handle macbeth
macbeth = locate_macbeth(config)
if macbeth is None:
continue
images.append(image)
if not _validate_images(images):
return None
return images
|
0 | repos/libcamera/utils/tuning | repos/libcamera/utils/tuning/libtuning/smoothing.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Wrapper for cv2 smoothing functions to enable duck-typing
import cv2
# @brief Wrapper for cv2 smoothing functions so that they can be duck-typed
class Smoothing(object):
def __init__(self):
pass
def smoothing(self, src):
raise NotImplementedError
class MedianBlur(Smoothing):
def __init__(self, ksize):
self.ksize = ksize
def smoothing(self, src):
return cv2.medianBlur(src.astype('float32'), self.ksize).astype('float64')
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/modules/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/modules/module.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Base class for algorithm-specific tuning modules
# @var type Type of the module. Defined in the base module.
# @var out_name The key that will be used for the algorithm in the algorithms
# dictionary in the tuning output file
# @var hr_name Human-readable module name, mostly for debugging
class Module(object):
type = 'base'
hr_name = 'Base Module'
out_name = 'GenericAlgorithm'
def __init__(self):
pass
def validate_config(self, config: dict) -> bool:
raise NotImplementedError
# @brief Do the module's processing
# @param config Full configuration from the input configuration file
# @param images List of images to process
# @param outputs The outputs of all modules that were executed before this
# module. Note that this is an input parameter, and the
# output of this module should be returned directly
# @return Result of the module's processing. It may be empty. None
# indicates failure and that the result should not be used.
def process(self, config: dict, images: list, outputs: dict) -> dict:
raise NotImplementedError
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/agc/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2024, Paul Elder <[email protected]>
from libtuning.modules.agc.agc import AGC
from libtuning.modules.agc.rkisp1 import AGCRkISP1
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/agc/agc.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2024, Paul Elder <[email protected]>
from ..module import Module
import libtuning as lt
class AGC(Module):
type = 'agc'
hr_name = 'AGC (Base)'
out_name = 'GenericAGC'
# \todo Add sector shapes and stuff just like lsc
def __init__(self, *,
debug: list):
super().__init__()
self.debug = debug
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/agc/rkisp1.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2024, Paul Elder <[email protected]>
#
# rkisp1.py - AGC module for tuning rkisp1
from .agc import AGC
import libtuning as lt
class AGCRkISP1(AGC):
hr_name = 'AGC (RkISP1)'
out_name = 'Agc'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# We don't actually need anything from the config file
def validate_config(self, config: dict) -> bool:
return True
def _generate_metering_modes(self) -> dict:
centre_weighted = [
0, 0, 0, 0, 0,
0, 6, 8, 6, 0,
0, 8, 16, 8, 0,
0, 6, 8, 6, 0,
0, 0, 0, 0, 0
]
spot = [
0, 0, 0, 0, 0,
0, 2, 4, 2, 0,
0, 4, 16, 4, 0,
0, 2, 4, 2, 0,
0, 0, 0, 0, 0
]
matrix = [1 for i in range(0, 25)]
return {
'MeteringCentreWeighted': centre_weighted,
'MeteringSpot': spot,
'MeteringMatrix': matrix
}
def _generate_exposure_modes(self) -> dict:
normal = {'shutter': [100, 10000, 30000, 60000, 120000],
'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
short = {'shutter': [100, 5000, 10000, 20000, 120000],
'gain': [2.0, 4.0, 6.0, 6.0, 6.0]}
return {'ExposureNormal': normal, 'ExposureShort': short}
def _generate_constraint_modes(self) -> dict:
normal = {'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5}}
highlight = {
'lower': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.5},
'upper': {'qLo': 0.98, 'qHi': 1.0, 'yTarget': 0.8}
}
return {'ConstraintNormal': normal, 'ConstraintHighlight': highlight}
def _generate_y_target(self) -> list:
return 0.16
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
output['AeMeteringMode'] = self._generate_metering_modes()
output['AeExposureMode'] = self._generate_exposure_modes()
output['AeConstraintMode'] = self._generate_constraint_modes()
output['relativeLuminanceTarget'] = self._generate_y_target()
# \todo Debug functionality
return output
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/lsc/raspberrypi.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# ALSC module for tuning Raspberry Pi
from .lsc import LSC
import libtuning as lt
import libtuning.utils as utils
from numbers import Number
import numpy as np
class ALSCRaspberryPi(LSC):
# Override the type name so that the parser can match the entry in the
# config file.
type = 'alsc'
hr_name = 'ALSC (Raspberry Pi)'
out_name = 'rpi.alsc'
compatible = ['raspberrypi']
def __init__(self, *,
do_color: lt.Param,
luminance_strength: lt.Param,
**kwargs):
super().__init__(**kwargs)
self.do_color = do_color
self.luminance_strength = luminance_strength
self.output_range = (0, 3.999)
def validate_config(self, config: dict) -> bool:
if self not in config:
utils.eprint(f'{self.type} not in config')
return False
valid = True
conf = config[self]
lum_key = self.luminance_strength.name
color_key = self.do_color.name
if lum_key not in conf and self.luminance_strength.required:
utils.eprint(f'{lum_key} is not in config')
valid = False
if lum_key in conf and (conf[lum_key] < 0 or conf[lum_key] > 1):
utils.eprint(f'Warning: {lum_key} is not in range [0, 1]; defaulting to 0.5')
if color_key not in conf and self.do_color.required:
utils.eprint(f'{color_key} is not in config')
valid = False
return valid
# @return Image color temperature, flattened array of red calibration table
# (containing {sector size} elements), flattened array of blue
# calibration table, flattened array of green calibration
# table
def _do_single_alsc(self, image: lt.Image, do_alsc_colour: bool):
average_green = np.mean((image.channels[lt.Color.GR:lt.Color.GB + 1]), axis=0)
cg, g = self._lsc_single_channel(average_green, image)
if not do_alsc_colour:
return image.color, None, None, cg.flatten()
cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, g)
cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, g)
# \todo implement debug
return image.color, cr.flatten(), cb.flatten(), cg.flatten()
# @return Red shading table, Blue shading table, Green shading table,
# number of images processed
def _do_all_alsc(self, images: list, do_alsc_colour: bool, general_conf: dict) -> (list, list, list, Number, int):
# List of colour temperatures
list_col = []
# Associated calibration tables
list_cr = []
list_cb = []
list_cg = []
count = 0
for image in self._enumerate_lsc_images(images):
col, cr, cb, cg = self._do_single_alsc(image, do_alsc_colour)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cg.append(cg)
count += 1
# Convert to numpy array for data manipulation
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cg = np.array(list_cg)
cal_cr_list = []
cal_cb_list = []
# Note: Calculation of average corners and center of the shading tables
# has been removed (which ctt had, as it was unused)
# Average all values for luminance shading and return one table for all temperatures
lum_lut = list(np.round(np.mean(list_cg, axis=0), 3))
if not do_alsc_colour:
return None, None, lum_lut, count
for ct in sorted(set(list_col)):
# Average tables for the same colour temperature
indices = np.where(list_col == ct)
ct = int(ct)
t_r = np.round(np.mean(list_cr[indices], axis=0), 3)
t_b = np.round(np.mean(list_cb[indices], axis=0), 3)
cr_dict = {
'ct': ct,
'table': list(t_r)
}
cb_dict = {
'ct': ct,
'table': list(t_b)
}
cal_cr_list.append(cr_dict)
cal_cb_list.append(cb_dict)
return cal_cr_list, cal_cb_list, lum_lut, count
# @brief Calculate sigma from two adjacent gain tables
def _calcSigma(self, g1, g2):
g1 = np.reshape(g1, self.sector_shape[::-1])
g2 = np.reshape(g2, self.sector_shape[::-1])
# Apply gains to gain table
gg = g1 / g2
if np.mean(gg) < 1:
gg = 1 / gg
# For each internal patch, compute average difference between it and
# its 4 neighbours, then append to list
diffs = []
for i in range(self.sector_shape[1] - 2):
for j in range(self.sector_shape[0] - 2):
# Indexing is incremented by 1 since all patches on borders are
# not counted
diff = np.abs(gg[i + 1][j + 1] - gg[i][j + 1])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 2][j + 1])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j])
diff += np.abs(gg[i + 1][j + 1] - gg[i + 1][j + 2])
diffs.append(diff / 4)
mean_diff = np.mean(diffs)
return np.round(mean_diff, 5)
# @brief Obtains sigmas for red and blue, effectively a measure of the
# 'error'
def _get_sigma(self, cal_cr_list, cal_cb_list):
# Provided colour alsc tables were generated for two different colour
# temperatures sigma is calculated by comparing two calibration temperatures
# adjacent in colour space
color_temps = [cal['ct'] for cal in cal_cr_list]
# Calculate sigmas for each adjacent color_temps and return worst one
sigma_rs = []
sigma_bs = []
for i in range(len(color_temps) - 1):
sigma_rs.append(self._calcSigma(cal_cr_list[i]['table'], cal_cr_list[i + 1]['table']))
sigma_bs.append(self._calcSigma(cal_cb_list[i]['table'], cal_cb_list[i + 1]['table']))
# Return maximum sigmas, not necessarily from the same colour
# temperature interval
sigma_r = max(sigma_rs) if sigma_rs else 0.005
sigma_b = max(sigma_bs) if sigma_bs else 0.005
return sigma_r, sigma_b
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {
'omega': 1.3,
'n_iter': 100,
'luminance_strength': 0.7
}
conf = config[self]
general_conf = config['general']
do_alsc_colour = self.do_color.get_value(conf)
# \todo I have no idea where this input parameter is used
luminance_strength = self.luminance_strength.get_value(conf)
if luminance_strength < 0 or luminance_strength > 1:
luminance_strength = 0.5
output['luminance_strength'] = luminance_strength
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
alsc_out = self._do_all_alsc(images, do_alsc_colour, general_conf)
# \todo Handle the second green lut
cal_cr_list, cal_cb_list, luminance_lut, count = alsc_out
if not do_alsc_colour:
output['luminance_lut'] = luminance_lut
output['n_iter'] = 0
return output
output['calibrations_Cr'] = cal_cr_list
output['calibrations_Cb'] = cal_cb_list
output['luminance_lut'] = luminance_lut
# The sigmas determine the strength of the adaptive algorithm, that
# cleans up any lens shading that has slipped through the alsc. These
# are determined by measuring a 'worst-case' difference between two
# alsc tables that are adjacent in colour space. If, however, only one
# colour temperature has been provided, then this difference can not be
# computed as only one table is available.
# To determine the sigmas you would have to estimate the error of an
# alsc table with only the image it was taken on as a check. To avoid
# circularity, dfault exaggerated sigmas are used, which can result in
# too much alsc and is therefore not advised.
# In general, just take another alsc picture at another colour
# temperature!
if count == 1:
output['sigma'] = 0.005
output['sigma_Cb'] = 0.005
utils.eprint('Warning: Only one alsc calibration found; standard sigmas used for adaptive algorithm.')
return output
# Obtain worst-case scenario residual sigmas
sigma_r, sigma_b = self._get_sigma(cal_cr_list, cal_cb_list)
output['sigma'] = np.round(sigma_r, 5)
output['sigma_Cb'] = np.round(sigma_b, 5)
return output
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/lsc/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
from libtuning.modules.lsc.lsc import LSC
from libtuning.modules.lsc.raspberrypi import ALSCRaspberryPi
from libtuning.modules.lsc.rkisp1 import LSCRkISP1
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/lsc/lsc.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <[email protected]>
from ..module import Module
import libtuning as lt
import libtuning.utils as utils
import numpy as np
class LSC(Module):
type = 'lsc'
hr_name = 'LSC (Base)'
out_name = 'GenericLSC'
def __init__(self, *,
debug: list,
sector_shape: tuple,
sector_x_gradient: lt.Gradient,
sector_y_gradient: lt.Gradient,
sector_average_function: lt.Average,
smoothing_function: lt.Smoothing):
super().__init__()
self.debug = debug
self.sector_shape = sector_shape
self.sector_x_gradient = sector_x_gradient
self.sector_y_gradient = sector_y_gradient
self.sector_average_function = sector_average_function
self.smoothing_function = smoothing_function
def _enumerate_lsc_images(self, images):
for image in images:
if image.lsc_only:
yield image
def _get_grid(self, channel, img_w, img_h):
# List of number of pixels in each sector
sectors_x = self.sector_x_gradient.distribute(img_w / 2, self.sector_shape[0])
sectors_y = self.sector_y_gradient.distribute(img_h / 2, self.sector_shape[1])
grid = []
r = 0
for y in sectors_y:
c = 0
for x in sectors_x:
grid.append(self.sector_average_function.average(channel[r:r + y, c:c + x]))
c += x
r += y
return np.array(grid)
def _lsc_single_channel(self, channel: np.array,
image: lt.Image, green_grid: np.array = None):
grid = self._get_grid(channel, image.w, image.h)
grid -= image.blacklevel_16
if green_grid is None:
table = np.reshape(1 / grid, self.sector_shape[::-1])
else:
table = np.reshape(green_grid / grid, self.sector_shape[::-1])
table = self.smoothing_function.smoothing(table)
if green_grid is None:
table = table / np.min(table)
return table, grid
|
0 | repos/libcamera/utils/tuning/libtuning/modules | repos/libcamera/utils/tuning/libtuning/modules/lsc/rkisp1.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (C) 2019, Raspberry Pi Ltd
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# LSC module for tuning rkisp1
from .lsc import LSC
import libtuning as lt
import libtuning.utils as utils
from numbers import Number
import numpy as np
class LSCRkISP1(LSC):
hr_name = 'LSC (RkISP1)'
out_name = 'LensShadingCorrection'
# \todo Not sure if this is useful. Probably will remove later.
compatible = ['rkisp1']
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
# We don't actually need anything from the config file
def validate_config(self, config: dict) -> bool:
return True
# @return Image color temperature, flattened array of red calibration table
# (containing {sector size} elements), flattened array of blue
# calibration table, flattened array of (red's) green calibration
# table, flattened array of (blue's) green calibration table
def _do_single_lsc(self, image: lt.Image):
cgr, gr = self._lsc_single_channel(image.channels[lt.Color.GR], image)
cgb, gb = self._lsc_single_channel(image.channels[lt.Color.GB], image)
# \todo Should these ratio against the average of both greens or just
# each green like we've done here?
cr, _ = self._lsc_single_channel(image.channels[lt.Color.R], image, gr)
cb, _ = self._lsc_single_channel(image.channels[lt.Color.B], image, gb)
return image.color, cr.flatten(), cb.flatten(), cgr.flatten(), cgb.flatten()
# @return List of dictionaries of color temperature, red table, red's green
# table, blue's green table, and blue table
def _do_all_lsc(self, images: list) -> list:
output_list = []
output_map_func = lt.gradient.Linear().map
# List of colour temperatures
list_col = []
# Associated calibration tables
list_cr = []
list_cb = []
list_cgr = []
list_cgb = []
for image in self._enumerate_lsc_images(images):
col, cr, cb, cgr, cgb = self._do_single_lsc(image)
list_col.append(col)
list_cr.append(cr)
list_cb.append(cb)
list_cgr.append(cgr)
list_cgb.append(cgb)
# Convert to numpy array for data manipulation
list_col = np.array(list_col)
list_cr = np.array(list_cr)
list_cb = np.array(list_cb)
list_cgr = np.array(list_cgr)
list_cgb = np.array(list_cgb)
for color_temperature in sorted(set(list_col)):
# Average tables for the same colour temperature
indices = np.where(list_col == color_temperature)
color_temperature = int(color_temperature)
tables = []
for lis in [list_cr, list_cgr, list_cgb, list_cb]:
table = np.mean(lis[indices], axis=0)
table = output_map_func((1, 3.999), (1024, 4095), table)
table = np.round(table).astype('int32').tolist()
tables.append(table)
entry = {
'ct': color_temperature,
'r': tables[0],
'gr': tables[1],
'gb': tables[2],
'b': tables[3],
}
output_list.append(entry)
return output_list
def process(self, config: dict, images: list, outputs: dict) -> dict:
output = {}
# \todo This should actually come from self.sector_{x,y}_gradient
size_gradient = lt.gradient.Linear(lt.Remainder.Float)
output['x-size'] = size_gradient.distribute(0.5, 8)
output['y-size'] = size_gradient.distribute(0.5, 8)
output['sets'] = self._do_all_lsc(images)
# \todo Validate images from greyscale camera and force grescale mode
# \todo Debug functionality
return output
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/parsers/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
from libtuning.parsers.raspberrypi_parser import RaspberryPiParser
from libtuning.parsers.yaml_parser import YamlParser
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/parsers/raspberrypi_parser.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Parser for Raspberry Pi config file format
from .parser import Parser
import json
import numbers
import libtuning.utils as utils
class RaspberryPiParser(Parser):
def __init__(self):
super().__init__()
# The string in the 'disable' and 'plot' lists are formatted as
# 'rpi.{module_name}'.
# @brief Enumerate, as a module, @a listt if its value exists in @a dictt
# and it is the name of a valid module in @a modules
def _enumerate_rpi_modules(self, listt, dictt, modules):
for x in listt:
name = x.replace('rpi.', '')
if name not in dictt:
continue
module = utils.get_module_by_typename(modules, name)
if module is not None:
yield module
def _valid_macbeth_option(self, value):
if not isinstance(value, dict):
return False
if list(value.keys()) != ['small', 'show']:
return False
for val in value.values():
if not isinstance(val, numbers.Number):
return False
return True
def parse(self, config_file: str, modules: list) -> (dict, list):
with open(config_file, 'r') as config_json:
config = json.load(config_json)
disable = []
for module in self._enumerate_rpi_modules(config['disable'], config, modules):
disable.append(module)
# Remove the disabled module's config too
config.pop(module.name)
config.pop('disable')
# The raspberrypi config format has 'plot' map to a list of module
# names which should be plotted. libtuning has each module contain the
# plot information in itself so do this conversion.
for module in self._enumerate_rpi_modules(config['plot'], config, modules):
# It's fine to set the value of a potentially disabled module, as
# the object still exists at this point
module.appendValue('debug', 'plot')
config.pop('plot')
# Convert the keys from module name to module instance
new_config = {}
for module_name in config:
module = utils.get_module_by_type_name(modules, module_name)
if module is not None:
new_config[module] = config[module_name]
new_config['general'] = {}
if 'blacklevel' in config:
if not isinstance(config['blacklevel'], numbers.Number):
raise TypeError('Config "blacklevel" must be a number')
# Raspberry Pi's ctt config has magic blacklevel value -1 to mean
# "get it from the image metadata". Since we already do that in
# Image, don't save it to the config here.
if config['blacklevel'] >= 0:
new_config['general']['blacklevel'] = config['blacklevel']
if 'macbeth' in config:
if not self._valid_macbeth_option(config['macbeth']):
raise TypeError('Config "macbeth" must be a dict: {"small": number, "show": number}')
new_config['general']['macbeth'] = config['macbeth']
else:
new_config['general']['macbeth'] = {'small': 0, 'show': 0}
return new_config, disable
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/parsers/parser.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Base class for a parser for a specific format of config file
class Parser(object):
def __init__(self):
pass
# @brief Parse a config file into a config dict
# @details The config dict shall have one key 'general' with a dict value
# for general configuration options, and all other entries shall
# have the module as the key with its configuration options (as a
# dict) as the value. The config dict shall prune entries that are
# for modules that are not in @a modules.
# @param config (str) Path to config file
# @param modules (list) List of modules
# @return (dict, list) Configuration and list of modules to disable
def parse(self, config_file: str, modules: list) -> (dict, list):
raise NotImplementedError
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/parsers/yaml_parser.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
#
# Parser for YAML format config file
from .parser import Parser
class YamlParser(Parser):
def __init__(self):
super().__init__()
# \todo Implement this (it's fine for now as we don't need a config for
# rkisp1 LSC, which is the only user of this so far)
def parse(self, config_file: str, modules: list) -> (dict, list):
return {}, []
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/generators/__init__.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2022, Paul Elder <[email protected]>
from libtuning.generators.raspberrypi_output import RaspberryPiOutput
from libtuning.generators.yaml_output import YamlOutput
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/generators/raspberrypi_output.py | # SPDX-License-Identifier: BSD-2-Clause
#
# Copyright 2022 Raspberry Pi Ltd
#
# Generate tuning file in Raspberry Pi's json format
#
# (Copied from ctt_pretty_print_json.py)
from .generator import Generator
import json
from pathlib import Path
import textwrap
class Encoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.indentation_level = 0
self.hard_break = 120
self.custom_elems = {
'table': 16,
'luminance_lut': 16,
'ct_curve': 3,
'ccm': 3,
'gamma_curve': 2,
'y_target': 2,
'prior': 2
}
def encode(self, o, node_key=None):
if isinstance(o, (list, tuple)):
# Check if we are a flat list of numbers.
if not any(isinstance(el, (list, tuple, dict)) for el in o):
s = ', '.join(json.dumps(el) for el in o)
if node_key in self.custom_elems.keys():
# Special case handling to specify number of elements in a row for tables, ccm, etc.
self.indentation_level += 1
sl = s.split(', ')
num = self.custom_elems[node_key]
chunk = [self.indent_str + ', '.join(sl[x:x + num]) for x in range(0, len(sl), num)]
t = ',\n'.join(chunk)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
elif len(s) > self.hard_break - len(self.indent_str):
# Break a long list with wraps.
self.indentation_level += 1
t = textwrap.fill(s, self.hard_break, break_long_words=False,
initial_indent=self.indent_str, subsequent_indent=self.indent_str)
self.indentation_level -= 1
output = f'\n{self.indent_str}[\n{t}\n{self.indent_str}]'
else:
# Smaller lists can remain on a single line.
output = f' [ {s} ]'
return output
else:
# Sub-structures in the list case.
self.indentation_level += 1
output = [self.indent_str + self.encode(el) for el in o]
self.indentation_level -= 1
output = ',\n'.join(output)
return f' [\n{output}\n{self.indent_str}]'
elif isinstance(o, dict):
self.indentation_level += 1
output = []
for k, v in o.items():
if isinstance(v, dict) and len(v) == 0:
# Empty config block special case.
output.append(self.indent_str + f'{json.dumps(k)}: {{ }}')
else:
# Only linebreak if the next node is a config block.
sep = f'\n{self.indent_str}' if isinstance(v, dict) else ''
output.append(self.indent_str + f'{json.dumps(k)}:{sep}{self.encode(v, k)}')
output = ',\n'.join(output)
self.indentation_level -= 1
return f'{{\n{output}\n{self.indent_str}}}'
else:
return ' ' + json.dumps(o)
@property
def indent_str(self) -> str:
return ' ' * self.indentation_level * self.indent
def iterencode(self, o, **kwargs):
return self.encode(o)
class RaspberryPiOutput(Generator):
def __init__(self):
super().__init__()
def _pretty_print(self, in_json: dict) -> str:
if 'version' not in in_json or \
'target' not in in_json or \
'algorithms' not in in_json or \
in_json['version'] < 2.0:
raise RuntimeError('Incompatible JSON dictionary has been provided')
return json.dumps(in_json, cls=Encoder, indent=4, sort_keys=False)
def write(self, output_file: Path, output_dict: dict, output_order: list):
# Write json dictionary to file using ctt's version 2 format
out_json = {
"version": 2.0,
'target': 'bcm2835',
"algorithms": [{f'{module.out_name}': output_dict[module]} for module in output_order]
}
with open(output_file, 'w') as f:
f.write(self._pretty_print(out_json))
|
0 | repos/libcamera/utils/tuning/libtuning | repos/libcamera/utils/tuning/libtuning/generators/yaml_output.py | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright 2022 Paul Elder <[email protected]>
#
# Generate tuning file in YAML format
from .generator import Generator
from numbers import Number
from pathlib import Path
import libtuning.utils as utils
class YamlOutput(Generator):
def __init__(self):
super().__init__()
def _stringify_number_list(self, listt: list):
line_wrap = 80
line = '[ ' + ', '.join([str(x) for x in listt]) + ' ]'
if len(line) <= line_wrap:
return [line]
out_lines = ['[']
line = ' '
for x in listt:
x_str = str(x)
# If the first number is longer than line_wrap, it'll add an extra line
if len(line) + len(x_str) > line_wrap:
out_lines.append(line)
line = f' {x_str},'
continue
line += f' {x_str},'
out_lines.append(line)
out_lines.append(']')
return out_lines
# @return Array of lines, and boolean of if all elements were numbers
def _stringify_list(self, listt: list):
out_lines = []
all_numbers = set([isinstance(x, Number) for x in listt]).issubset({True})
if all_numbers:
return self._stringify_number_list(listt), True
for value in listt:
if isinstance(value, Number):
out_lines.append(f'- {str(value)}')
elif isinstance(value, str):
out_lines.append(f'- "{value}"')
elif isinstance(value, list):
lines, all_numbers = self._stringify_list(value)
if all_numbers:
out_lines.append( f'- {lines[0]}')
out_lines += [f' {line}' for line in lines[1:]]
else:
out_lines.append( f'-')
out_lines += [f' {line}' for line in lines]
elif isinstance(value, dict):
lines = self._stringify_dict(value)
out_lines.append( f'- {lines[0]}')
out_lines += [f' {line}' for line in lines[1:]]
return out_lines, False
def _stringify_dict(self, dictt: dict):
out_lines = []
for key in dictt:
value = dictt[key]
if isinstance(value, Number):
out_lines.append(f'{key}: {str(value)}')
elif isinstance(value, str):
out_lines.append(f'{key}: "{value}"')
elif isinstance(value, list):
lines, all_numbers = self._stringify_list(value)
if all_numbers:
out_lines.append( f'{key}: {lines[0]}')
out_lines += [f'{" " * (len(key) + 2)}{line}' for line in lines[1:]]
else:
out_lines.append( f'{key}:')
out_lines += [f' {line}' for line in lines]
elif isinstance(value, dict):
lines = self._stringify_dict(value)
out_lines.append( f'{key}:')
out_lines += [f' {line}' for line in lines]
return out_lines
def write(self, output_file: Path, output_dict: dict, output_order: list):
out_lines = [
'%YAML 1.1',
'---',
'version: 1',
# No need to condition this, as libtuning already guarantees that
# we have at least one module. Even if the module has no output,
# its prescence is meaningful.
'algorithms:'
]
for module in output_order:
out_lines.append(f' - {module.out_name}:')
if len(output_dict[module]) == 0:
continue
if not isinstance(output_dict[module], dict):
utils.eprint(f'Error: Output of {module.type} is not a dictionary')
continue
lines = self._stringify_dict(output_dict[module])
out_lines += [f' {line}' for line in lines]
with open(output_file, 'w', encoding='utf-8') as f:
for line in out_lines:
f.write(f'{line}\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.