Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/c2z/use_cases | repos/c2z/use_cases/recast/Recast.h | //
// Copyright (c) 2009-2010 Mikko Mononen [email protected]
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
#ifndef RECAST_H
#define RECAST_H
/// The value of PI used by Recast.
static const float RC_PI = 3.14159265f;
/// Used to ignore unused function parameters and silence any compiler warnings.
template<class T> void rcIgnoreUnused(const T&) { }
/// Recast log categories.
/// @see rcContext
enum rcLogCategory
{
RC_LOG_PROGRESS = 1, ///< A progress log entry.
RC_LOG_WARNING, ///< A warning log entry.
RC_LOG_ERROR ///< An error log entry.
};
/// Recast performance timer categories.
/// @see rcContext
enum rcTimerLabel
{
/// The user defined total time of the build.
RC_TIMER_TOTAL,
/// A user defined build time.
RC_TIMER_TEMP,
/// The time to rasterize the triangles. (See: #rcRasterizeTriangle)
RC_TIMER_RASTERIZE_TRIANGLES,
/// The time to build the compact heightfield. (See: #rcBuildCompactHeightfield)
RC_TIMER_BUILD_COMPACTHEIGHTFIELD,
/// The total time to build the contours. (See: #rcBuildContours)
RC_TIMER_BUILD_CONTOURS,
/// The time to trace the boundaries of the contours. (See: #rcBuildContours)
RC_TIMER_BUILD_CONTOURS_TRACE,
/// The time to simplify the contours. (See: #rcBuildContours)
RC_TIMER_BUILD_CONTOURS_SIMPLIFY,
/// The time to filter ledge spans. (See: #rcFilterLedgeSpans)
RC_TIMER_FILTER_BORDER,
/// The time to filter low height spans. (See: #rcFilterWalkableLowHeightSpans)
RC_TIMER_FILTER_WALKABLE,
/// The time to apply the median filter. (See: #rcMedianFilterWalkableArea)
RC_TIMER_MEDIAN_AREA,
/// The time to filter low obstacles. (See: #rcFilterLowHangingWalkableObstacles)
RC_TIMER_FILTER_LOW_OBSTACLES,
/// The time to build the polygon mesh. (See: #rcBuildPolyMesh)
RC_TIMER_BUILD_POLYMESH,
/// The time to merge polygon meshes. (See: #rcMergePolyMeshes)
RC_TIMER_MERGE_POLYMESH,
/// The time to erode the walkable area. (See: #rcErodeWalkableArea)
RC_TIMER_ERODE_AREA,
/// The time to mark a box area. (See: #rcMarkBoxArea)
RC_TIMER_MARK_BOX_AREA,
/// The time to mark a cylinder area. (See: #rcMarkCylinderArea)
RC_TIMER_MARK_CYLINDER_AREA,
/// The time to mark a convex polygon area. (See: #rcMarkConvexPolyArea)
RC_TIMER_MARK_CONVEXPOLY_AREA,
/// The total time to build the distance field. (See: #rcBuildDistanceField)
RC_TIMER_BUILD_DISTANCEFIELD,
/// The time to build the distances of the distance field. (See: #rcBuildDistanceField)
RC_TIMER_BUILD_DISTANCEFIELD_DIST,
/// The time to blur the distance field. (See: #rcBuildDistanceField)
RC_TIMER_BUILD_DISTANCEFIELD_BLUR,
/// The total time to build the regions. (See: #rcBuildRegions, #rcBuildRegionsMonotone)
RC_TIMER_BUILD_REGIONS,
/// The total time to apply the watershed algorithm. (See: #rcBuildRegions)
RC_TIMER_BUILD_REGIONS_WATERSHED,
/// The time to expand regions while applying the watershed algorithm. (See: #rcBuildRegions)
RC_TIMER_BUILD_REGIONS_EXPAND,
/// The time to flood regions while applying the watershed algorithm. (See: #rcBuildRegions)
RC_TIMER_BUILD_REGIONS_FLOOD,
/// The time to filter out small regions. (See: #rcBuildRegions, #rcBuildRegionsMonotone)
RC_TIMER_BUILD_REGIONS_FILTER,
/// The time to build heightfield layers. (See: #rcBuildHeightfieldLayers)
RC_TIMER_BUILD_LAYERS,
/// The time to build the polygon mesh detail. (See: #rcBuildPolyMeshDetail)
RC_TIMER_BUILD_POLYMESHDETAIL,
/// The time to merge polygon mesh details. (See: #rcMergePolyMeshDetails)
RC_TIMER_MERGE_POLYMESHDETAIL,
/// The maximum number of timers. (Used for iterating timers.)
RC_MAX_TIMERS
};
/// Provides an interface for optional logging and performance tracking of the Recast
/// build process.
///
/// This class does not provide logging or timer functionality on its
/// own. Both must be provided by a concrete implementation
/// by overriding the protected member functions. Also, this class does not
/// provide an interface for extracting log messages. (Only adding them.)
/// So concrete implementations must provide one.
///
/// If no logging or timers are required, just pass an instance of this
/// class through the Recast build process.
///
/// @ingroup recast
class rcContext
{
public:
/// Constructor.
/// @param[in] state TRUE if the logging and performance timers should be enabled. [Default: true]
inline rcContext(bool state = true) : m_logEnabled(state), m_timerEnabled(state) {}
virtual ~rcContext() {}
/// Enables or disables logging.
/// @param[in] state TRUE if logging should be enabled.
inline void enableLog(bool state) { m_logEnabled = state; }
/// Clears all log entries.
inline void resetLog() { if (m_logEnabled) doResetLog(); }
/// Logs a message.
///
/// Example:
/// @code
/// // Where ctx is an instance of rcContext and filepath is a char array.
/// ctx->log(RC_LOG_ERROR, "buildTiledNavigation: Could not load '%s'", filepath);
/// @endcode
///
/// @param[in] category The category of the message.
/// @param[in] format The message.
void log(const rcLogCategory category, const char* format, ...);
/// Enables or disables the performance timers.
/// @param[in] state TRUE if timers should be enabled.
inline void enableTimer(bool state) { m_timerEnabled = state; }
/// Clears all performance timers. (Resets all to unused.)
inline void resetTimers() { if (m_timerEnabled) doResetTimers(); }
/// Starts the specified performance timer.
/// @param label The category of the timer.
inline void startTimer(const rcTimerLabel label) { if (m_timerEnabled) doStartTimer(label); }
/// Stops the specified performance timer.
/// @param label The category of the timer.
inline void stopTimer(const rcTimerLabel label) { if (m_timerEnabled) doStopTimer(label); }
/// Returns the total accumulated time of the specified performance timer.
/// @param label The category of the timer.
/// @return The accumulated time of the timer, or -1 if timers are disabled or the timer has never been started.
inline int getAccumulatedTime(const rcTimerLabel label) const { return m_timerEnabled ? doGetAccumulatedTime(label) : -1; }
protected:
/// Clears all log entries.
virtual void doResetLog();
/// Logs a message.
/// @param[in] category The category of the message.
/// @param[in] msg The formatted message.
/// @param[in] len The length of the formatted message.
virtual void doLog(const rcLogCategory category, const char* msg, const int len) { rcIgnoreUnused(category); rcIgnoreUnused(msg); rcIgnoreUnused(len); }
/// Clears all timers. (Resets all to unused.)
virtual void doResetTimers() {}
/// Starts the specified performance timer.
/// @param[in] label The category of timer.
virtual void doStartTimer(const rcTimerLabel label) { rcIgnoreUnused(label); }
/// Stops the specified performance timer.
/// @param[in] label The category of the timer.
virtual void doStopTimer(const rcTimerLabel label) { rcIgnoreUnused(label); }
/// Returns the total accumulated time of the specified performance timer.
/// @param[in] label The category of the timer.
/// @return The accumulated time of the timer, or -1 if timers are disabled or the timer has never been started.
virtual int doGetAccumulatedTime(const rcTimerLabel label) const { rcIgnoreUnused(label); return -1; }
/// True if logging is enabled.
bool m_logEnabled;
/// True if the performance timers are enabled.
bool m_timerEnabled;
};
/// A helper to first start a timer and then stop it when this helper goes out of scope.
/// @see rcContext
class rcScopedTimer
{
public:
/// Constructs an instance and starts the timer.
/// @param[in] ctx The context to use.
/// @param[in] label The category of the timer.
inline rcScopedTimer(rcContext* ctx, const rcTimerLabel label) : m_ctx(ctx), m_label(label) { m_ctx->startTimer(m_label); }
inline ~rcScopedTimer() { m_ctx->stopTimer(m_label); }
private:
// Explicitly disabled copy constructor and copy assignment operator.
rcScopedTimer(const rcScopedTimer&);
rcScopedTimer& operator=(const rcScopedTimer&);
rcContext* const m_ctx;
const rcTimerLabel m_label;
};
/// Specifies a configuration to use when performing Recast builds.
/// @ingroup recast
struct rcConfig
{
/// The width of the field along the x-axis. [Limit: >= 0] [Units: vx]
int width;
/// The height of the field along the z-axis. [Limit: >= 0] [Units: vx]
int height;
/// The width/height size of tile's on the xz-plane. [Limit: >= 0] [Units: vx]
int tileSize;
/// The size of the non-navigable border around the heightfield. [Limit: >=0] [Units: vx]
int borderSize;
/// The xz-plane cell size to use for fields. [Limit: > 0] [Units: wu]
float cs;
/// The y-axis cell size to use for fields. [Limit: > 0] [Units: wu]
float ch;
/// The minimum bounds of the field's AABB. [(x, y, z)] [Units: wu]
float bmin[3];
/// The maximum bounds of the field's AABB. [(x, y, z)] [Units: wu]
float bmax[3];
/// The maximum slope that is considered walkable. [Limits: 0 <= value < 90] [Units: Degrees]
float walkableSlopeAngle;
/// Minimum floor to 'ceiling' height that will still allow the floor area to
/// be considered walkable. [Limit: >= 3] [Units: vx]
int walkableHeight;
/// Maximum ledge height that is considered to still be traversable. [Limit: >=0] [Units: vx]
int walkableClimb;
/// The distance to erode/shrink the walkable area of the heightfield away from
/// obstructions. [Limit: >=0] [Units: vx]
int walkableRadius;
/// The maximum allowed length for contour edges along the border of the mesh. [Limit: >=0] [Units: vx]
int maxEdgeLen;
/// The maximum distance a simplified contour's border edges should deviate
/// the original raw contour. [Limit: >=0] [Units: vx]
float maxSimplificationError;
/// The minimum number of cells allowed to form isolated island areas. [Limit: >=0] [Units: vx]
int minRegionArea;
/// Any regions with a span count smaller than this value will, if possible,
/// be merged with larger regions. [Limit: >=0] [Units: vx]
int mergeRegionArea;
/// The maximum number of vertices allowed for polygons generated during the
/// contour to polygon conversion process. [Limit: >= 3]
int maxVertsPerPoly;
/// Sets the sampling distance to use when generating the detail mesh.
/// (For height detail only.) [Limits: 0 or >= 0.9] [Units: wu]
float detailSampleDist;
/// The maximum distance the detail mesh surface should deviate from heightfield
/// data. (For height detail only.) [Limit: >=0] [Units: wu]
float detailSampleMaxError;
};
/// Defines the number of bits allocated to rcSpan::smin and rcSpan::smax.
static const int RC_SPAN_HEIGHT_BITS = 13;
/// Defines the maximum value for rcSpan::smin and rcSpan::smax.
static const int RC_SPAN_MAX_HEIGHT = (1 << RC_SPAN_HEIGHT_BITS) - 1;
/// The number of spans allocated per span spool.
/// @see rcSpanPool
static const int RC_SPANS_PER_POOL = 2048;
/// Represents a span in a heightfield.
/// @see rcHeightfield
struct rcSpan
{
unsigned int smin : RC_SPAN_HEIGHT_BITS; ///< The lower limit of the span. [Limit: < #smax]
unsigned int smax : RC_SPAN_HEIGHT_BITS; ///< The upper limit of the span. [Limit: <= #RC_SPAN_MAX_HEIGHT]
unsigned int area : 6; ///< The area id assigned to the span.
rcSpan* next; ///< The next span higher up in column.
};
/// A memory pool used for quick allocation of spans within a heightfield.
/// @see rcHeightfield
struct rcSpanPool
{
rcSpanPool* next; ///< The next span pool.
rcSpan items[RC_SPANS_PER_POOL]; ///< Array of spans in the pool.
};
/// A dynamic heightfield representing obstructed space.
/// @ingroup recast
struct rcHeightfield
{
rcHeightfield();
~rcHeightfield();
int width; ///< The width of the heightfield. (Along the x-axis in cell units.)
int height; ///< The height of the heightfield. (Along the z-axis in cell units.)
float bmin[3]; ///< The minimum bounds in world space. [(x, y, z)]
float bmax[3]; ///< The maximum bounds in world space. [(x, y, z)]
float cs; ///< The size of each cell. (On the xz-plane.)
float ch; ///< The height of each cell. (The minimum increment along the y-axis.)
rcSpan** spans; ///< Heightfield of spans (width*height).
rcSpanPool* pools; ///< Linked list of span pools.
rcSpan* freelist; ///< The next free span.
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcHeightfield(const rcHeightfield&);
rcHeightfield& operator=(const rcHeightfield&);
};
/// Provides information on the content of a cell column in a compact heightfield.
struct rcCompactCell
{
unsigned int index : 24; ///< Index to the first span in the column.
unsigned int count : 8; ///< Number of spans in the column.
};
/// Represents a span of unobstructed space within a compact heightfield.
struct rcCompactSpan
{
unsigned short y; ///< The lower extent of the span. (Measured from the heightfield's base.)
unsigned short reg; ///< The id of the region the span belongs to. (Or zero if not in a region.)
unsigned int con : 24; ///< Packed neighbor connection data.
unsigned int h : 8; ///< The height of the span. (Measured from #y.)
};
/// A compact, static heightfield representing unobstructed space.
/// @ingroup recast
struct rcCompactHeightfield
{
rcCompactHeightfield();
~rcCompactHeightfield();
int width; ///< The width of the heightfield. (Along the x-axis in cell units.)
int height; ///< The height of the heightfield. (Along the z-axis in cell units.)
int spanCount; ///< The number of spans in the heightfield.
int walkableHeight; ///< The walkable height used during the build of the field. (See: rcConfig::walkableHeight)
int walkableClimb; ///< The walkable climb used during the build of the field. (See: rcConfig::walkableClimb)
int borderSize; ///< The AABB border size used during the build of the field. (See: rcConfig::borderSize)
unsigned short maxDistance; ///< The maximum distance value of any span within the field.
unsigned short maxRegions; ///< The maximum region id of any span within the field.
float bmin[3]; ///< The minimum bounds in world space. [(x, y, z)]
float bmax[3]; ///< The maximum bounds in world space. [(x, y, z)]
float cs; ///< The size of each cell. (On the xz-plane.)
float ch; ///< The height of each cell. (The minimum increment along the y-axis.)
rcCompactCell* cells; ///< Array of cells. [Size: #width*#height]
rcCompactSpan* spans; ///< Array of spans. [Size: #spanCount]
unsigned short* dist; ///< Array containing border distance data. [Size: #spanCount]
unsigned char* areas; ///< Array containing area id data. [Size: #spanCount]
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcCompactHeightfield(const rcCompactHeightfield&);
rcCompactHeightfield& operator=(const rcCompactHeightfield&);
};
/// Represents a heightfield layer within a layer set.
/// @see rcHeightfieldLayerSet
struct rcHeightfieldLayer
{
float bmin[3]; ///< The minimum bounds in world space. [(x, y, z)]
float bmax[3]; ///< The maximum bounds in world space. [(x, y, z)]
float cs; ///< The size of each cell. (On the xz-plane.)
float ch; ///< The height of each cell. (The minimum increment along the y-axis.)
int width; ///< The width of the heightfield. (Along the x-axis in cell units.)
int height; ///< The height of the heightfield. (Along the z-axis in cell units.)
int minx; ///< The minimum x-bounds of usable data.
int maxx; ///< The maximum x-bounds of usable data.
int miny; ///< The minimum y-bounds of usable data. (Along the z-axis.)
int maxy; ///< The maximum y-bounds of usable data. (Along the z-axis.)
int hmin; ///< The minimum height bounds of usable data. (Along the y-axis.)
int hmax; ///< The maximum height bounds of usable data. (Along the y-axis.)
unsigned char* heights; ///< The heightfield. [Size: width * height]
unsigned char* areas; ///< Area ids. [Size: Same as #heights]
unsigned char* cons; ///< Packed neighbor connection information. [Size: Same as #heights]
};
/// Represents a set of heightfield layers.
/// @ingroup recast
/// @see rcAllocHeightfieldLayerSet, rcFreeHeightfieldLayerSet
struct rcHeightfieldLayerSet
{
rcHeightfieldLayerSet();
~rcHeightfieldLayerSet();
rcHeightfieldLayer* layers; ///< The layers in the set. [Size: #nlayers]
int nlayers; ///< The number of layers in the set.
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcHeightfieldLayerSet(const rcHeightfieldLayerSet&);
rcHeightfieldLayerSet& operator=(const rcHeightfieldLayerSet&);
};
/// Represents a simple, non-overlapping contour in field space.
struct rcContour
{
int* verts; ///< Simplified contour vertex and connection data. [Size: 4 * #nverts]
int nverts; ///< The number of vertices in the simplified contour.
int* rverts; ///< Raw contour vertex and connection data. [Size: 4 * #nrverts]
int nrverts; ///< The number of vertices in the raw contour.
unsigned short reg; ///< The region id of the contour.
unsigned char area; ///< The area id of the contour.
};
/// Represents a group of related contours.
/// @ingroup recast
struct rcContourSet
{
rcContourSet();
~rcContourSet();
rcContour* conts; ///< An array of the contours in the set. [Size: #nconts]
int nconts; ///< The number of contours in the set.
float bmin[3]; ///< The minimum bounds in world space. [(x, y, z)]
float bmax[3]; ///< The maximum bounds in world space. [(x, y, z)]
float cs; ///< The size of each cell. (On the xz-plane.)
float ch; ///< The height of each cell. (The minimum increment along the y-axis.)
int width; ///< The width of the set. (Along the x-axis in cell units.)
int height; ///< The height of the set. (Along the z-axis in cell units.)
int borderSize; ///< The AABB border size used to generate the source data from which the contours were derived.
float maxError; ///< The max edge error that this contour set was simplified with.
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcContourSet(const rcContourSet&);
rcContourSet& operator=(const rcContourSet&);
};
/// Represents a polygon mesh suitable for use in building a navigation mesh.
/// @ingroup recast
struct rcPolyMesh
{
rcPolyMesh();
~rcPolyMesh();
unsigned short* verts; ///< The mesh vertices. [Form: (x, y, z) * #nverts]
unsigned short* polys; ///< Polygon and neighbor data. [Length: #maxpolys * 2 * #nvp]
unsigned short* regs; ///< The region id assigned to each polygon. [Length: #maxpolys]
unsigned short* flags; ///< The user defined flags for each polygon. [Length: #maxpolys]
unsigned char* areas; ///< The area id assigned to each polygon. [Length: #maxpolys]
int nverts; ///< The number of vertices.
int npolys; ///< The number of polygons.
int maxpolys; ///< The number of allocated polygons.
int nvp; ///< The maximum number of vertices per polygon.
float bmin[3]; ///< The minimum bounds in world space. [(x, y, z)]
float bmax[3]; ///< The maximum bounds in world space. [(x, y, z)]
float cs; ///< The size of each cell. (On the xz-plane.)
float ch; ///< The height of each cell. (The minimum increment along the y-axis.)
int borderSize; ///< The AABB border size used to generate the source data from which the mesh was derived.
float maxEdgeError; ///< The max error of the polygon edges in the mesh.
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcPolyMesh(const rcPolyMesh&);
rcPolyMesh& operator=(const rcPolyMesh&);
};
/// Contains triangle meshes that represent detailed height data associated
/// with the polygons in its associated polygon mesh object.
/// @ingroup recast
struct rcPolyMeshDetail
{
rcPolyMeshDetail();
unsigned int* meshes; ///< The sub-mesh data. [Size: 4*#nmeshes]
float* verts; ///< The mesh vertices. [Size: 3*#nverts]
unsigned char* tris; ///< The mesh triangles. [Size: 4*#ntris]
int nmeshes; ///< The number of sub-meshes defined by #meshes.
int nverts; ///< The number of vertices in #verts.
int ntris; ///< The number of triangles in #tris.
private:
// Explicitly-disabled copy constructor and copy assignment operator.
rcPolyMeshDetail(const rcPolyMeshDetail&);
rcPolyMeshDetail& operator=(const rcPolyMeshDetail&);
};
/// @name Allocation Functions
/// Functions used to allocate and de-allocate Recast objects.
/// @see rcAllocSetCustom
/// @{
/// Allocates a heightfield object using the Recast allocator.
/// @return A heightfield that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcCreateHeightfield, rcFreeHeightField
rcHeightfield* rcAllocHeightfield();
/// Frees the specified heightfield object using the Recast allocator.
/// @param[in] heightfield A heightfield allocated using #rcAllocHeightfield
/// @ingroup recast
/// @see rcAllocHeightfield
void rcFreeHeightField(rcHeightfield* heightfield);
/// Allocates a compact heightfield object using the Recast allocator.
/// @return A compact heightfield that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcBuildCompactHeightfield, rcFreeCompactHeightfield
rcCompactHeightfield* rcAllocCompactHeightfield();
/// Frees the specified compact heightfield object using the Recast allocator.
/// @param[in] compactHeightfield A compact heightfield allocated using #rcAllocCompactHeightfield
/// @ingroup recast
/// @see rcAllocCompactHeightfield
void rcFreeCompactHeightfield(rcCompactHeightfield* compactHeightfield);
/// Allocates a heightfield layer set using the Recast allocator.
/// @return A heightfield layer set that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcBuildHeightfieldLayers, rcFreeHeightfieldLayerSet
rcHeightfieldLayerSet* rcAllocHeightfieldLayerSet();
/// Frees the specified heightfield layer set using the Recast allocator.
/// @param[in] layerSet A heightfield layer set allocated using #rcAllocHeightfieldLayerSet
/// @ingroup recast
/// @see rcAllocHeightfieldLayerSet
void rcFreeHeightfieldLayerSet(rcHeightfieldLayerSet* layerSet);
/// Allocates a contour set object using the Recast allocator.
/// @return A contour set that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcBuildContours, rcFreeContourSet
rcContourSet* rcAllocContourSet();
/// Frees the specified contour set using the Recast allocator.
/// @param[in] contourSet A contour set allocated using #rcAllocContourSet
/// @ingroup recast
/// @see rcAllocContourSet
void rcFreeContourSet(rcContourSet* contourSet);
/// Allocates a polygon mesh object using the Recast allocator.
/// @return A polygon mesh that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcBuildPolyMesh, rcFreePolyMesh
rcPolyMesh* rcAllocPolyMesh();
/// Frees the specified polygon mesh using the Recast allocator.
/// @param[in] polyMesh A polygon mesh allocated using #rcAllocPolyMesh
/// @ingroup recast
/// @see rcAllocPolyMesh
void rcFreePolyMesh(rcPolyMesh* polyMesh);
/// Allocates a detail mesh object using the Recast allocator.
/// @return A detail mesh that is ready for initialization, or null on failure.
/// @ingroup recast
/// @see rcBuildPolyMeshDetail, rcFreePolyMeshDetail
rcPolyMeshDetail* rcAllocPolyMeshDetail();
/// Frees the specified detail mesh using the Recast allocator.
/// @param[in] detailMesh A detail mesh allocated using #rcAllocPolyMeshDetail
/// @ingroup recast
/// @see rcAllocPolyMeshDetail
void rcFreePolyMeshDetail(rcPolyMeshDetail* detailMesh);
/// @}
/// Heightfield border flag.
/// If a heightfield region ID has this bit set, then the region is a border
/// region and its spans are considered un-walkable.
/// (Used during the region and contour build process.)
/// @see rcCompactSpan::reg
static const unsigned short RC_BORDER_REG = 0x8000;
/// Polygon touches multiple regions.
/// If a polygon has this region ID it was merged with or created
/// from polygons of different regions during the polymesh
/// build step that removes redundant border vertices.
/// (Used during the polymesh and detail polymesh build processes)
/// @see rcPolyMesh::regs
static const unsigned short RC_MULTIPLE_REGS = 0;
/// Border vertex flag.
/// If a region ID has this bit set, then the associated element lies on
/// a tile border. If a contour vertex's region ID has this bit set, the
/// vertex will later be removed in order to match the segments and vertices
/// at tile boundaries.
/// (Used during the build process.)
/// @see rcCompactSpan::reg, #rcContour::verts, #rcContour::rverts
static const int RC_BORDER_VERTEX = 0x10000;
/// Area border flag.
/// If a region ID has this bit set, then the associated element lies on
/// the border of an area.
/// (Used during the region and contour build process.)
/// @see rcCompactSpan::reg, #rcContour::verts, #rcContour::rverts
static const int RC_AREA_BORDER = 0x20000;
/// Contour build flags.
/// @see rcBuildContours
enum rcBuildContoursFlags
{
RC_CONTOUR_TESS_WALL_EDGES = 0x01, ///< Tessellate solid (impassable) edges during contour simplification.
RC_CONTOUR_TESS_AREA_EDGES = 0x02 ///< Tessellate edges between areas during contour simplification.
};
/// Applied to the region id field of contour vertices in order to extract the region id.
/// The region id field of a vertex may have several flags applied to it. So the
/// fields value can't be used directly.
/// @see rcContour::verts, rcContour::rverts
static const int RC_CONTOUR_REG_MASK = 0xffff;
/// An value which indicates an invalid index within a mesh.
/// @note This does not necessarily indicate an error.
/// @see rcPolyMesh::polys
static const unsigned short RC_MESH_NULL_IDX = 0xffff;
/// Represents the null area.
/// When a data element is given this value it is considered to no longer be
/// assigned to a usable area. (E.g. It is un-walkable.)
static const unsigned char RC_NULL_AREA = 0;
/// The default area id used to indicate a walkable polygon.
/// This is also the maximum allowed area id, and the only non-null area id
/// recognized by some steps in the build process.
static const unsigned char RC_WALKABLE_AREA = 63;
/// The value returned by #rcGetCon if the specified direction is not connected
/// to another span. (Has no neighbor.)
static const int RC_NOT_CONNECTED = 0x3f;
/// @name General helper functions
/// @{
/// Swaps the values of the two parameters.
/// @param[in,out] a Value A
/// @param[in,out] b Value B
template<class T> inline void rcSwap(T& a, T& b) { T t = a; a = b; b = t; }
/// Returns the minimum of two values.
/// @param[in] a Value A
/// @param[in] b Value B
/// @return The minimum of the two values.
template<class T> inline T rcMin(T a, T b) { return a < b ? a : b; }
/// Returns the maximum of two values.
/// @param[in] a Value A
/// @param[in] b Value B
/// @return The maximum of the two values.
template<class T> inline T rcMax(T a, T b) { return a > b ? a : b; }
/// Returns the absolute value.
/// @param[in] a The value.
/// @return The absolute value of the specified value.
template<class T> inline T rcAbs(T a) { return a < 0 ? -a : a; }
/// Returns the square of the value.
/// @param[in] a The value.
/// @return The square of the value.
template<class T> inline T rcSqr(T a) { return a * a; }
/// Clamps the value to the specified range.
/// @param[in] value The value to clamp.
/// @param[in] minInclusive The minimum permitted return value.
/// @param[in] maxInclusive The maximum permitted return value.
/// @return The value, clamped to the specified range.
template<class T> inline T rcClamp(T value, T minInclusive, T maxInclusive)
{
return value < minInclusive ? minInclusive: (value > maxInclusive ? maxInclusive : value);
}
/// Returns the square root of the value.
/// @param[in] x The value.
/// @return The square root of the vlaue.
float rcSqrt(float x);
/// @}
/// @name Vector helper functions.
/// @{
/// Derives the cross product of two vectors. (@p v1 x @p v2)
/// @param[out] dest The cross product. [(x, y, z)]
/// @param[in] v1 A Vector [(x, y, z)]
/// @param[in] v2 A vector [(x, y, z)]
inline void rcVcross(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[1]*v2[2] - v1[2]*v2[1];
dest[1] = v1[2]*v2[0] - v1[0]*v2[2];
dest[2] = v1[0]*v2[1] - v1[1]*v2[0];
}
/// Derives the dot product of two vectors. (@p v1 . @p v2)
/// @param[in] v1 A Vector [(x, y, z)]
/// @param[in] v2 A vector [(x, y, z)]
/// @return The dot product.
inline float rcVdot(const float* v1, const float* v2)
{
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2];
}
/// Performs a scaled vector addition. (@p v1 + (@p v2 * @p s))
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to scale and add to @p v1. [(x, y, z)]
/// @param[in] s The amount to scale @p v2 by before adding to @p v1.
inline void rcVmad(float* dest, const float* v1, const float* v2, const float s)
{
dest[0] = v1[0]+v2[0]*s;
dest[1] = v1[1]+v2[1]*s;
dest[2] = v1[2]+v2[2]*s;
}
/// Performs a vector addition. (@p v1 + @p v2)
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to add to @p v1. [(x, y, z)]
inline void rcVadd(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[0]+v2[0];
dest[1] = v1[1]+v2[1];
dest[2] = v1[2]+v2[2];
}
/// Performs a vector subtraction. (@p v1 - @p v2)
/// @param[out] dest The result vector. [(x, y, z)]
/// @param[in] v1 The base vector. [(x, y, z)]
/// @param[in] v2 The vector to subtract from @p v1. [(x, y, z)]
inline void rcVsub(float* dest, const float* v1, const float* v2)
{
dest[0] = v1[0]-v2[0];
dest[1] = v1[1]-v2[1];
dest[2] = v1[2]-v2[2];
}
/// Selects the minimum value of each element from the specified vectors.
/// @param[in,out] mn A vector. (Will be updated with the result.) [(x, y, z)]
/// @param[in] v A vector. [(x, y, z)]
inline void rcVmin(float* mn, const float* v)
{
mn[0] = rcMin(mn[0], v[0]);
mn[1] = rcMin(mn[1], v[1]);
mn[2] = rcMin(mn[2], v[2]);
}
/// Selects the maximum value of each element from the specified vectors.
/// @param[in,out] mx A vector. (Will be updated with the result.) [(x, y, z)]
/// @param[in] v A vector. [(x, y, z)]
inline void rcVmax(float* mx, const float* v)
{
mx[0] = rcMax(mx[0], v[0]);
mx[1] = rcMax(mx[1], v[1]);
mx[2] = rcMax(mx[2], v[2]);
}
/// Performs a vector copy.
/// @param[out] dest The result. [(x, y, z)]
/// @param[in] v The vector to copy. [(x, y, z)]
inline void rcVcopy(float* dest, const float* v)
{
dest[0] = v[0];
dest[1] = v[1];
dest[2] = v[2];
}
/// Returns the distance between two points.
/// @param[in] v1 A point. [(x, y, z)]
/// @param[in] v2 A point. [(x, y, z)]
/// @return The distance between the two points.
inline float rcVdist(const float* v1, const float* v2)
{
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dz = v2[2] - v1[2];
return rcSqrt(dx*dx + dy*dy + dz*dz);
}
/// Returns the square of the distance between two points.
/// @param[in] v1 A point. [(x, y, z)]
/// @param[in] v2 A point. [(x, y, z)]
/// @return The square of the distance between the two points.
inline float rcVdistSqr(const float* v1, const float* v2)
{
float dx = v2[0] - v1[0];
float dy = v2[1] - v1[1];
float dz = v2[2] - v1[2];
return dx*dx + dy*dy + dz*dz;
}
/// Normalizes the vector.
/// @param[in,out] v The vector to normalize. [(x, y, z)]
inline void rcVnormalize(float* v)
{
float d = 1.0f / rcSqrt(rcSqr(v[0]) + rcSqr(v[1]) + rcSqr(v[2]));
v[0] *= d;
v[1] *= d;
v[2] *= d;
}
/// @}
/// @name Heightfield Functions
/// @see rcHeightfield
/// @{
/// Calculates the bounding box of an array of vertices.
/// @ingroup recast
/// @param[in] verts An array of vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices in the @p verts array.
/// @param[out] minBounds The minimum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[out] maxBounds The maximum bounds of the AABB. [(x, y, z)] [Units: wu]
void rcCalcBounds(const float* verts, int numVerts, float* minBounds, float* maxBounds);
/// Calculates the grid size based on the bounding box and grid cell size.
/// @ingroup recast
/// @param[in] minBounds The minimum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[in] maxBounds The maximum bounds of the AABB. [(x, y, z)] [Units: wu]
/// @param[in] cellSize The xz-plane cell size. [Limit: > 0] [Units: wu]
/// @param[out] sizeX The width along the x-axis. [Limit: >= 0] [Units: vx]
/// @param[out] sizeZ The height along the z-axis. [Limit: >= 0] [Units: vx]
void rcCalcGridSize(const float* minBounds, const float* maxBounds, float cellSize, int* sizeX, int* sizeZ);
/// Initializes a new heightfield.
/// See the #rcConfig documentation for more information on the configuration parameters.
///
/// @see rcAllocHeightfield, rcHeightfield
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in,out] heightfield The allocated heightfield to initialize.
/// @param[in] sizeX The width of the field along the x-axis. [Limit: >= 0] [Units: vx]
/// @param[in] sizeZ The height of the field along the z-axis. [Limit: >= 0] [Units: vx]
/// @param[in] minBounds The minimum bounds of the field's AABB. [(x, y, z)] [Units: wu]
/// @param[in] maxBounds The maximum bounds of the field's AABB. [(x, y, z)] [Units: wu]
/// @param[in] cellSize The xz-plane cell size to use for the field. [Limit: > 0] [Units: wu]
/// @param[in] cellHeight The y-axis cell size to use for field. [Limit: > 0] [Units: wu]
/// @returns True if the operation completed successfully.
bool rcCreateHeightfield(rcContext* context, rcHeightfield& heightfield, int sizeX, int sizeZ,
const float* minBounds, const float* maxBounds,
float cellSize, float cellHeight);
/// Sets the area id of all triangles with a slope below the specified value
/// to #RC_WALKABLE_AREA.
///
/// Only sets the area id's for the walkable triangles. Does not alter the
/// area id's for un-walkable triangles.
///
/// See the #rcConfig documentation for more information on the configuration parameters.
///
/// @see rcHeightfield, rcClearUnwalkableTriangles, rcRasterizeTriangles
///
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableSlopeAngle The maximum slope that is considered walkable.
/// [Limits: 0 <= value < 90] [Units: Degrees]
/// @param[in] verts The vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices.
/// @param[in] tris The triangle vertex indices. [(vertA, vertB, vertC) * @p nt]
/// @param[in] numTris The number of triangles.
/// @param[out] triAreaIDs The triangle area ids. [Length: >= @p nt]
void rcMarkWalkableTriangles(rcContext* context, float walkableSlopeAngle, const float* verts, int numVerts,
const int* tris, int numTris, unsigned char* triAreaIDs);
/// Sets the area id of all triangles with a slope greater than or equal to the specified value to #RC_NULL_AREA.
///
/// Only sets the area id's for the un-walkable triangles. Does not alter the
/// area id's for walkable triangles.
///
/// See the #rcConfig documentation for more information on the configuration parameters.
///
/// @see rcHeightfield, rcClearUnwalkableTriangles, rcRasterizeTriangles
///
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableSlopeAngle The maximum slope that is considered walkable.
/// [Limits: 0 <= value < 90] [Units: Degrees]
/// @param[in] verts The vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices.
/// @param[in] tris The triangle vertex indices. [(vertA, vertB, vertC) * @p nt]
/// @param[in] numTris The number of triangles.
/// @param[out] triAreaIDs The triangle area ids. [Length: >= @p nt]
void rcClearUnwalkableTriangles(rcContext* context, float walkableSlopeAngle, const float* verts, int numVerts,
const int* tris, int numTris, unsigned char* triAreaIDs);
/// Adds a span to the specified heightfield.
///
/// The span addition can be set to favor flags. If the span is merged to
/// another span and the new @p spanMax is within @p flagMergeThreshold units
/// from the existing span, the span flags are merged.
///
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in,out] heightfield An initialized heightfield.
/// @param[in] x The column x index where the span is to be added.
/// [Limits: 0 <= value < rcHeightfield::width]
/// @param[in] z The column z index where the span is to be added.
/// [Limits: 0 <= value < rcHeightfield::height]
/// @param[in] spanMin The minimum height of the span. [Limit: < @p spanMax] [Units: vx]
/// @param[in] spanMax The maximum height of the span. [Limit: <= #RC_SPAN_MAX_HEIGHT] [Units: vx]
/// @param[in] areaID The area id of the span. [Limit: <= #RC_WALKABLE_AREA)
/// @param[in] flagMergeThreshold The merge threshold. [Limit: >= 0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcAddSpan(rcContext* context, rcHeightfield& heightfield,
int x, int z,
unsigned short spanMin, unsigned short spanMax,
unsigned char areaID, int flagMergeThreshold);
/// Rasterizes a single triangle into the specified heightfield.
///
/// Calling this for each triangle in a mesh is less efficient than calling rcRasterizeTriangles
///
/// No spans will be added if the triangle does not overlap the heightfield grid.
///
/// @see rcHeightfield
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] v0 Triangle vertex 0 [(x, y, z)]
/// @param[in] v1 Triangle vertex 1 [(x, y, z)]
/// @param[in] v2 Triangle vertex 2 [(x, y, z)]
/// @param[in] areaID The area id of the triangle. [Limit: <= #RC_WALKABLE_AREA]
/// @param[in,out] heightfield An initialized heightfield.
/// @param[in] flagMergeThreshold The distance where the walkable flag is favored over the non-walkable flag.
/// [Limit: >= 0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcRasterizeTriangle(rcContext* context,
const float* v0, const float* v1, const float* v2,
unsigned char areaID, rcHeightfield& heightfield, int flagMergeThreshold = 1);
/// Rasterizes an indexed triangle mesh into the specified heightfield.
///
/// Spans will only be added for triangles that overlap the heightfield grid.
///
/// @see rcHeightfield
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] verts The vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices. (unused) TODO (graham): Remove in next major release
/// @param[in] tris The triangle indices. [(vertA, vertB, vertC) * @p nt]
/// @param[in] triAreaIDs The area id's of the triangles. [Limit: <= #RC_WALKABLE_AREA] [Size: @p nt]
/// @param[in] numTris The number of triangles.
/// @param[in,out] heightfield An initialized heightfield.
/// @param[in] flagMergeThreshold The distance where the walkable flag is favored over the non-walkable flag.
/// [Limit: >= 0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcRasterizeTriangles(rcContext* context,
const float* verts, int numVerts,
const int* tris, const unsigned char* triAreaIDs, int numTris,
rcHeightfield& heightfield, int flagMergeThreshold = 1);
/// Rasterizes an indexed triangle mesh into the specified heightfield.
///
/// Spans will only be added for triangles that overlap the heightfield grid.
///
/// @see rcHeightfield
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] verts The vertices. [(x, y, z) * @p nv]
/// @param[in] numVerts The number of vertices. (unused) TODO (graham): Remove in next major release
/// @param[in] tris The triangle indices. [(vertA, vertB, vertC) * @p nt]
/// @param[in] triAreaIDs The area id's of the triangles. [Limit: <= #RC_WALKABLE_AREA] [Size: @p nt]
/// @param[in] numTris The number of triangles.
/// @param[in,out] heightfield An initialized heightfield.
/// @param[in] flagMergeThreshold The distance where the walkable flag is favored over the non-walkable flag.
/// [Limit: >= 0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcRasterizeTriangles(rcContext* context,
const float* verts, int numVerts,
const unsigned short* tris, const unsigned char* triAreaIDs, int numTris,
rcHeightfield& heightfield, int flagMergeThreshold = 1);
/// Rasterizes a triangle list into the specified heightfield.
///
/// Expects each triangle to be specified as three sequential vertices of 3 floats.
///
/// Spans will only be added for triangles that overlap the heightfield grid.
///
/// @see rcHeightfield
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] verts The triangle vertices. [(ax, ay, az, bx, by, bz, cx, by, cx) * @p nt]
/// @param[in] triAreaIDs The area id's of the triangles. [Limit: <= #RC_WALKABLE_AREA] [Size: @p nt]
/// @param[in] numTris The number of triangles.
/// @param[in,out] heightfield An initialized heightfield.
/// @param[in] flagMergeThreshold The distance where the walkable flag is favored over the non-walkable flag.
/// [Limit: >= 0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcRasterizeTriangles(rcContext* context,
const float* verts, const unsigned char* triAreaIDs, int numTris,
rcHeightfield& heightfield, int flagMergeThreshold = 1);
/// Marks non-walkable spans as walkable if their maximum is within @p walkableClimb of a walkable neighbor.
///
/// Allows the formation of walkable regions that will flow over low lying
/// objects such as curbs, and up structures such as stairways.
///
/// Two neighboring spans are walkable if: <tt>rcAbs(currentSpan.smax - neighborSpan.smax) < walkableClimb</tt>
///
/// @warning Will override the effect of #rcFilterLedgeSpans. So if both filters are used, call
/// #rcFilterLedgeSpans after calling this filter.
///
/// @see rcHeightfield, rcConfig
///
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableClimb Maximum ledge height that is considered to still be traversable.
/// [Limit: >=0] [Units: vx]
/// @param[in,out] heightfield A fully built heightfield. (All spans have been added.)
void rcFilterLowHangingWalkableObstacles(rcContext* context, int walkableClimb, rcHeightfield& heightfield);
/// Marks spans that are ledges as not-walkable.
///
/// A ledge is a span with one or more neighbors whose maximum is further away than @p walkableClimb
/// from the current span's maximum.
/// This method removes the impact of the overestimation of conservative voxelization
/// so the resulting mesh will not have regions hanging in the air over ledges.
///
/// A span is a ledge if: <tt>rcAbs(currentSpan.smax - neighborSpan.smax) > walkableClimb</tt>
///
/// @see rcHeightfield, rcConfig
///
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableHeight Minimum floor to 'ceiling' height that will still allow the floor area to
/// be considered walkable. [Limit: >= 3] [Units: vx]
/// @param[in] walkableClimb Maximum ledge height that is considered to still be traversable.
/// [Limit: >=0] [Units: vx]
/// @param[in,out] heightfield A fully built heightfield. (All spans have been added.)
void rcFilterLedgeSpans(rcContext* context, int walkableHeight, int walkableClimb, rcHeightfield& heightfield);
/// Marks walkable spans as not walkable if the clearance above the span is less than the specified height.
///
/// For this filter, the clearance above the span is the distance from the span's
/// maximum to the next higher span's minimum. (Same grid column.)
///
/// @see rcHeightfield, rcConfig
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableHeight Minimum floor to 'ceiling' height that will still allow the floor area to
/// be considered walkable. [Limit: >= 3] [Units: vx]
/// @param[in,out] heightfield A fully built heightfield. (All spans have been added.)
void rcFilterWalkableLowHeightSpans(rcContext* context, int walkableHeight, rcHeightfield& heightfield);
/// Returns the number of spans contained in the specified heightfield.
/// @ingroup recast
/// @param[in,out] context The build context to use during the operation.
/// @param[in] heightfield An initialized heightfield.
/// @returns The number of spans in the heightfield.
int rcGetHeightFieldSpanCount(rcContext* context, const rcHeightfield& heightfield);
/// @}
/// @name Compact Heightfield Functions
/// @see rcCompactHeightfield
/// @{
/// Builds a compact heightfield representing open space, from a heightfield representing solid space.
///
/// This is just the beginning of the process of fully building a compact heightfield.
/// Various filters may be applied, then the distance field and regions built.
/// E.g: #rcBuildDistanceField and #rcBuildRegions
///
/// See the #rcConfig documentation for more information on the configuration parameters.
///
/// @see rcAllocCompactHeightfield, rcHeightfield, rcCompactHeightfield, rcConfig
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] walkableHeight Minimum floor to 'ceiling' height that will still allow the floor area
/// to be considered walkable. [Limit: >= 3] [Units: vx]
/// @param[in] walkableClimb Maximum ledge height that is considered to still be traversable.
/// [Limit: >=0] [Units: vx]
/// @param[in] heightfield The heightfield to be compacted.
/// @param[out] compactHeightfield The resulting compact heightfield. (Must be pre-allocated.)
/// @returns True if the operation completed successfully.
bool rcBuildCompactHeightfield(rcContext* context, int walkableHeight, int walkableClimb,
const rcHeightfield& heightfield, rcCompactHeightfield& compactHeightfield);
/// Erodes the walkable area within the heightfield by the specified radius.
///
/// Basically, any spans that are closer to a boundary or obstruction than the specified radius
/// are marked as un-walkable.
///
/// This method is usually called immediately after the heightfield has been built.
///
/// @see rcCompactHeightfield, rcBuildCompactHeightfield, rcConfig::walkableRadius
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] erosionRadius The radius of erosion. [Limits: 0 < value < 255] [Units: vx]
/// @param[in,out] compactHeightfield The populated compact heightfield to erode.
/// @returns True if the operation completed successfully.
bool rcErodeWalkableArea(rcContext* context, int erosionRadius, rcCompactHeightfield& compactHeightfield);
/// Applies a median filter to walkable area types (based on area id), removing noise.
///
/// This filter is usually applied after applying area id's using functions
/// such as #rcMarkBoxArea, #rcMarkConvexPolyArea, and #rcMarkCylinderArea.
///
/// @see rcCompactHeightfield
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in,out] compactHeightfield A populated compact heightfield.
/// @returns True if the operation completed successfully.
bool rcMedianFilterWalkableArea(rcContext* context, rcCompactHeightfield& compactHeightfield);
/// Applies an area id to all spans within the specified bounding box. (AABB)
///
/// @see rcCompactHeightfield, rcMedianFilterWalkableArea
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] boxMinBounds The minimum extents of the bounding box. [(x, y, z)] [Units: wu]
/// @param[in] boxMaxBounds The maximum extents of the bounding box. [(x, y, z)] [Units: wu]
/// @param[in] areaId The area id to apply. [Limit: <= #RC_WALKABLE_AREA]
/// @param[in,out] compactHeightfield A populated compact heightfield.
void rcMarkBoxArea(rcContext* context, const float* boxMinBounds, const float* boxMaxBounds, unsigned char areaId,
rcCompactHeightfield& compactHeightfield);
/// Applies the area id to the all spans within the specified convex polygon.
///
/// The value of spacial parameters are in world units.
///
/// The y-values of the polygon vertices are ignored. So the polygon is effectively
/// projected onto the xz-plane, translated to @p minY, and extruded to @p maxY.
///
/// @see rcCompactHeightfield, rcMedianFilterWalkableArea
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] verts The vertices of the polygon [For: (x, y, z) * @p numVerts]
/// @param[in] numVerts The number of vertices in the polygon.
/// @param[in] minY The height of the base of the polygon. [Units: wu]
/// @param[in] maxY The height of the top of the polygon. [Units: wu]
/// @param[in] areaId The area id to apply. [Limit: <= #RC_WALKABLE_AREA]
/// @param[in,out] compactHeightfield A populated compact heightfield.
void rcMarkConvexPolyArea(rcContext* context, const float* verts, int numVerts,
float minY, float maxY, unsigned char areaId,
rcCompactHeightfield& compactHeightfield);
/// Expands a convex polygon along its vertex normals by the given offset amount.
/// Inserts extra vertices to bevel sharp corners.
///
/// Helper function to offset convex polygons for rcMarkConvexPolyArea.
///
/// @ingroup recast
///
/// @param[in] verts The vertices of the polygon [Form: (x, y, z) * @p numVerts]
/// @param[in] numVerts The number of vertices in the polygon.
/// @param[in] offset How much to offset the polygon by. [Units: wu]
/// @param[out] outVerts The offset vertices (should hold up to 2 * @p numVerts) [Form: (x, y, z) * return value]
/// @param[in] maxOutVerts The max number of vertices that can be stored to @p outVerts.
/// @returns Number of vertices in the offset polygon or 0 if too few vertices in @p outVerts.
int rcOffsetPoly(const float* verts, int numVerts, float offset, float* outVerts, int maxOutVerts);
/// Applies the area id to all spans within the specified y-axis-aligned cylinder.
///
/// @see rcCompactHeightfield, rcMedianFilterWalkableArea
///
/// @ingroup recast
///
/// @param[in,out] context The build context to use during the operation.
/// @param[in] position The center of the base of the cylinder. [Form: (x, y, z)] [Units: wu]
/// @param[in] radius The radius of the cylinder. [Units: wu] [Limit: > 0]
/// @param[in] height The height of the cylinder. [Units: wu] [Limit: > 0]
/// @param[in] areaId The area id to apply. [Limit: <= #RC_WALKABLE_AREA]
/// @param[in,out] compactHeightfield A populated compact heightfield.
void rcMarkCylinderArea(rcContext* context, const float* position, float radius, float height,
unsigned char areaId, rcCompactHeightfield& compactHeightfield);
/// Builds the distance field for the specified compact heightfield.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
/// @returns True if the operation completed successfully.
bool rcBuildDistanceField(rcContext* ctx, rcCompactHeightfield& chf);
/// Builds region data for the heightfield using watershed partitioning.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
/// @param[in] borderSize The size of the non-navigable border around the heightfield.
/// [Limit: >=0] [Units: vx]
/// @param[in] minRegionArea The minimum number of cells allowed to form isolated island areas.
/// [Limit: >=0] [Units: vx].
/// @param[in] mergeRegionArea Any regions with a span count smaller than this value will, if possible,
/// be merged with larger regions. [Limit: >=0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf, int borderSize, int minRegionArea, int mergeRegionArea);
/// Builds region data for the heightfield by partitioning the heightfield in non-overlapping layers.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
/// @param[in] borderSize The size of the non-navigable border around the heightfield.
/// [Limit: >=0] [Units: vx]
/// @param[in] minRegionArea The minimum number of cells allowed to form isolated island areas.
/// [Limit: >=0] [Units: vx].
/// @returns True if the operation completed successfully.
bool rcBuildLayerRegions(rcContext* ctx, rcCompactHeightfield& chf, int borderSize, int minRegionArea);
/// Builds region data for the heightfield using simple monotone partitioning.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in,out] chf A populated compact heightfield.
/// @param[in] borderSize The size of the non-navigable border around the heightfield.
/// [Limit: >=0] [Units: vx]
/// @param[in] minRegionArea The minimum number of cells allowed to form isolated island areas.
/// [Limit: >=0] [Units: vx].
/// @param[in] mergeRegionArea Any regions with a span count smaller than this value will, if possible,
/// be merged with larger regions. [Limit: >=0] [Units: vx]
/// @returns True if the operation completed successfully.
bool rcBuildRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf,
int borderSize, int minRegionArea, int mergeRegionArea);
/// Sets the neighbor connection data for the specified direction.
/// @param[in] span The span to update.
/// @param[in] direction The direction to set. [Limits: 0 <= value < 4]
/// @param[in] neighborIndex The index of the neighbor span.
inline void rcSetCon(rcCompactSpan& span, int direction, int neighborIndex)
{
const unsigned int shift = (unsigned int)direction * 6;
const unsigned int con = span.con;
span.con = (con & ~(0x3f << shift)) | (((unsigned int)neighborIndex & 0x3f) << shift);
}
/// Gets neighbor connection data for the specified direction.
/// @param[in] span The span to check.
/// @param[in] direction The direction to check. [Limits: 0 <= value < 4]
/// @return The neighbor connection data for the specified direction, or #RC_NOT_CONNECTED if there is no connection.
inline int rcGetCon(const rcCompactSpan& span, int direction)
{
const unsigned int shift = (unsigned int)direction * 6;
return (span.con >> shift) & 0x3f;
}
/// Gets the standard width (x-axis) offset for the specified direction.
/// @param[in] direction The direction. [Limits: 0 <= value < 4]
/// @return The width offset to apply to the current cell position to move in the direction.
inline int rcGetDirOffsetX(int direction)
{
static const int offset[4] = { -1, 0, 1, 0, };
return offset[direction & 0x03];
}
// TODO (graham): Rename this to rcGetDirOffsetZ
/// Gets the standard height (z-axis) offset for the specified direction.
/// @param[in] direction The direction. [Limits: 0 <= value < 4]
/// @return The height offset to apply to the current cell position to move in the direction.
inline int rcGetDirOffsetY(int direction)
{
static const int offset[4] = { 0, 1, 0, -1 };
return offset[direction & 0x03];
}
/// Gets the direction for the specified offset. One of x and y should be 0.
/// @param[in] offsetX The x offset. [Limits: -1 <= value <= 1]
/// @param[in] offsetZ The z offset. [Limits: -1 <= value <= 1]
/// @return The direction that represents the offset.
inline int rcGetDirForOffset(int offsetX, int offsetZ)
{
static const int dirs[5] = { 3, 0, -1, 2, 1 };
return dirs[((offsetZ + 1) << 1) + offsetX];
}
/// @}
/// @name Layer, Contour, Polymesh, and Detail Mesh Functions
/// @see rcHeightfieldLayer, rcContourSet, rcPolyMesh, rcPolyMeshDetail
/// @{
/// Builds a layer set from the specified compact heightfield.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] chf A fully built compact heightfield.
/// @param[in] borderSize The size of the non-navigable border around the heightfield. [Limit: >=0]
/// [Units: vx]
/// @param[in] walkableHeight Minimum floor to 'ceiling' height that will still allow the floor area
/// to be considered walkable. [Limit: >= 3] [Units: vx]
/// @param[out] lset The resulting layer set. (Must be pre-allocated.)
/// @returns True if the operation completed successfully.
bool rcBuildHeightfieldLayers(rcContext* ctx, const rcCompactHeightfield& chf,
int borderSize, int walkableHeight,
rcHeightfieldLayerSet& lset);
/// Builds a contour set from the region outlines in the provided compact heightfield.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] chf A fully built compact heightfield.
/// @param[in] maxError The maximum distance a simplified contour's border edges should deviate
/// the original raw contour. [Limit: >=0] [Units: wu]
/// @param[in] maxEdgeLen The maximum allowed length for contour edges along the border of the mesh.
/// [Limit: >=0] [Units: vx]
/// @param[out] cset The resulting contour set. (Must be pre-allocated.)
/// @param[in] buildFlags The build flags. (See: #rcBuildContoursFlags)
/// @returns True if the operation completed successfully.
bool rcBuildContours(rcContext* ctx, const rcCompactHeightfield& chf,
float maxError, int maxEdgeLen,
rcContourSet& cset, int buildFlags = RC_CONTOUR_TESS_WALL_EDGES);
/// Builds a polygon mesh from the provided contours.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] cset A fully built contour set.
/// @param[in] nvp The maximum number of vertices allowed for polygons generated during the
/// contour to polygon conversion process. [Limit: >= 3]
/// @param[out] mesh The resulting polygon mesh. (Must be re-allocated.)
/// @returns True if the operation completed successfully.
bool rcBuildPolyMesh(rcContext* ctx, const rcContourSet& cset, const int nvp, rcPolyMesh& mesh);
/// Merges multiple polygon meshes into a single mesh.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] meshes An array of polygon meshes to merge. [Size: @p nmeshes]
/// @param[in] nmeshes The number of polygon meshes in the meshes array.
/// @param[in] mesh The resulting polygon mesh. (Must be pre-allocated.)
/// @returns True if the operation completed successfully.
bool rcMergePolyMeshes(rcContext* ctx, rcPolyMesh** meshes, const int nmeshes, rcPolyMesh& mesh);
/// Builds a detail mesh from the provided polygon mesh.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] mesh A fully built polygon mesh.
/// @param[in] chf The compact heightfield used to build the polygon mesh.
/// @param[in] sampleDist Sets the distance to use when sampling the heightfield. [Limit: >=0] [Units: wu]
/// @param[in] sampleMaxError The maximum distance the detail mesh surface should deviate from
/// heightfield data. [Limit: >=0] [Units: wu]
/// @param[out] dmesh The resulting detail mesh. (Must be pre-allocated.)
/// @returns True if the operation completed successfully.
bool rcBuildPolyMeshDetail(rcContext* ctx, const rcPolyMesh& mesh, const rcCompactHeightfield& chf,
float sampleDist, float sampleMaxError,
rcPolyMeshDetail& dmesh);
/// Copies the poly mesh data from src to dst.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] src The source mesh to copy from.
/// @param[out] dst The resulting detail mesh. (Must be pre-allocated, must be empty mesh.)
/// @returns True if the operation completed successfully.
bool rcCopyPolyMesh(rcContext* ctx, const rcPolyMesh& src, rcPolyMesh& dst);
/// Merges multiple detail meshes into a single detail mesh.
/// @ingroup recast
/// @param[in,out] ctx The build context to use during the operation.
/// @param[in] meshes An array of detail meshes to merge. [Size: @p nmeshes]
/// @param[in] nmeshes The number of detail meshes in the meshes array.
/// @param[out] mesh The resulting detail mesh. (Must be pre-allocated.)
/// @returns True if the operation completed successfully.
bool rcMergePolyMeshDetails(rcContext* ctx, rcPolyMeshDetail** meshes, const int nmeshes, rcPolyMeshDetail& mesh);
/// @}
#endif // RECAST_H
///////////////////////////////////////////////////////////////////////////
// Due to the large amount of detail documentation for this file,
// the content normally located at the end of the header file has been separated
// out to a file in /Docs/Extern.
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/BitmapRef.zig | const std = @import("std");
pub const byte = u8;
pub fn BitmapRef(comptime T: type, comptime N: c_int) type {
return extern struct {
const Self = @This();
pixels: [*c]T,
width: c_int,
height: c_int,
pub inline fn call(self: *const Self, x: c_int, y: c_int) [*c]T {
return self.pixels + N * (self.width * y + x);
}
};
}
pub fn BitmapConstRef(comptime T: type, comptime N: c_int) type {
return extern struct {
const Self = @This();
pixels: [*c]const T,
width: c_int,
height: c_int,
pub inline fn call(self: *const Self, x: c_int, y: c_int) [*c]const T {
return self.pixels + N * (self.width * y + x);
}
};
}
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/Contour.zig | const std = @import("std");
const cpp = @import("cpp");
const EdgeHolder = @import("EdgeHolder.zig").EdgeHolder;
pub const Contour = extern struct {
edges: cpp.Vector(EdgeHolder),
extern fn _ZN7msdfgen7Contour7addEdgeERKNS_10EdgeHolderE(self: *Contour, edge: *const EdgeHolder) void;
pub const addEdge = _ZN7msdfgen7Contour7addEdgeERKNS_10EdgeHolderE;
extern fn _ZN7msdfgen7Contour7addEdgeEv(self: *Contour) *EdgeHolder;
pub const addEdge__Overload2 = _ZN7msdfgen7Contour7addEdgeEv;
extern fn _ZNK7msdfgen7Contour5boundERdS1_S1_S1_(self: *const Contour, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen7Contour5boundERdS1_S1_S1_;
extern fn _ZNK7msdfgen7Contour11boundMitersERdS1_S1_S1_ddi(self: *const Contour, l: *f64, b: *f64, r: *f64, t: *f64, border: f64, miterLimit: f64, polarity: c_int) void;
pub const boundMiters = _ZNK7msdfgen7Contour11boundMitersERdS1_S1_S1_ddi;
extern fn _ZNK7msdfgen7Contour7windingEv(self: *const Contour) c_int;
pub const winding = _ZNK7msdfgen7Contour7windingEv;
extern fn _ZN7msdfgen7Contour7reverseEv(self: *Contour) void;
pub const reverse = _ZN7msdfgen7Contour7reverseEv;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/Vector2.zig | const std = @import("std");
pub const Vector2 = extern struct {
x: f64 = 0.0,
y: f64 = 0.0,
extern fn _ZN7msdfgen7Vector2C1Ed(self: *Vector2, val: f64) void;
pub inline fn init(val: f64) Vector2 {
var self: Vector2 = undefined;
_ZN7msdfgen7Vector2C1Ed(&self, val);
return self;
}
extern fn _ZN7msdfgen7Vector2C1Edd(self: *Vector2, x: f64, y: f64) void;
pub inline fn initXY(x: f64, y: f64) Vector2 {
var self: Vector2 = undefined;
_ZN7msdfgen7Vector2C1Edd(&self, x, y);
return self;
}
extern fn _ZN7msdfgen7Vector25resetEv(self: *Vector2) void;
pub const reset = _ZN7msdfgen7Vector25resetEv;
extern fn _ZN7msdfgen7Vector23setEdd(self: *Vector2, x: f64, y: f64) void;
pub const set = _ZN7msdfgen7Vector23setEdd;
extern fn _ZNK7msdfgen7Vector26lengthEv(self: *const Vector2) f64;
pub const length = _ZNK7msdfgen7Vector26lengthEv;
extern fn _ZNK7msdfgen7Vector29directionEv(self: *const Vector2) f64;
pub const direction = _ZNK7msdfgen7Vector29directionEv;
extern fn _ZNK7msdfgen7Vector29normalizeEb(self: *const Vector2, allowZero: bool) Vector2;
pub const normalize = _ZNK7msdfgen7Vector29normalizeEb;
extern fn _ZNK7msdfgen7Vector213getOrthogonalEb(self: *const Vector2, polarity: bool) Vector2;
pub const getOrthogonal = _ZNK7msdfgen7Vector213getOrthogonalEb;
extern fn _ZNK7msdfgen7Vector214getOrthonormalEbb(self: *const Vector2, polarity: bool, allowZero: bool) Vector2;
pub const getOrthonormal = _ZNK7msdfgen7Vector214getOrthonormalEbb;
extern fn _ZNK7msdfgen7Vector27projectERKS0_b(self: *const Vector2, vector: *const Vector2, positive: bool) Vector2;
pub const project = _ZNK7msdfgen7Vector27projectERKS0_b;
extern fn _ZNK7msdfgen7Vector2ntEv(self: *const Vector2) bool;
pub const not = _ZNK7msdfgen7Vector2ntEv;
extern fn _ZNK7msdfgen7Vector2eqERKS0_(self: *const Vector2, other: *const Vector2) bool;
pub const eql = _ZNK7msdfgen7Vector2eqERKS0_;
extern fn _ZNK7msdfgen7Vector2neERKS0_(self: *const Vector2, other: *const Vector2) bool;
pub const notEql = _ZNK7msdfgen7Vector2neERKS0_;
// extern fn _ZNK7msdfgen7Vector2psEv(self: *const Vector2) Vector2;
// pub const add = _ZNK7msdfgen7Vector2psEv;
// extern fn _ZNK7msdfgen7Vector2ngEv(self: *const Vector2) Vector2;
// pub const sub = _ZNK7msdfgen7Vector2ngEv;
extern fn _ZNK7msdfgen7Vector2plERKS0_(self: *const Vector2, other: *const Vector2) Vector2;
pub const add = _ZNK7msdfgen7Vector2plERKS0_;
extern fn _ZNK7msdfgen7Vector2miERKS0_(self: *const Vector2, other: *const Vector2) Vector2;
pub const sub = _ZNK7msdfgen7Vector2miERKS0_;
extern fn _ZNK7msdfgen7Vector2mlERKS0_(self: *const Vector2, other: *const Vector2) Vector2;
pub const mul = _ZNK7msdfgen7Vector2mlERKS0_;
extern fn _ZNK7msdfgen7Vector2dvERKS0_(self: *const Vector2, other: *const Vector2) Vector2;
pub const div = _ZNK7msdfgen7Vector2dvERKS0_;
extern fn _ZNK7msdfgen7Vector2mlEd(self: *const Vector2, value: f64) Vector2;
pub const mulScalar = _ZNK7msdfgen7Vector2mlEd;
extern fn _ZNK7msdfgen7Vector2dvEd(self: *const Vector2, value: f64) Vector2;
pub const divScalar = _ZNK7msdfgen7Vector2dvEd;
extern fn _ZN7msdfgen7Vector2pLERKS0_(self: *Vector2, other: *const Vector2) *Vector2;
pub const addInto = _ZN7msdfgen7Vector2pLERKS0_;
extern fn _ZN7msdfgen7Vector2mIERKS0_(self: *Vector2, other: *const Vector2) *Vector2;
pub const subInto = _ZN7msdfgen7Vector2mIERKS0_;
extern fn _ZN7msdfgen7Vector2mLERKS0_(self: *Vector2, other: *const Vector2) *Vector2;
pub const mulInto = _ZN7msdfgen7Vector2mLERKS0_;
extern fn _ZN7msdfgen7Vector2dVERKS0_(self: *Vector2, other: *const Vector2) *Vector2;
pub const divInto = _ZN7msdfgen7Vector2dVERKS0_;
extern fn _ZN7msdfgen7Vector2mLEd(self: *Vector2, value: f64) *Vector2;
pub const mulScalarInto = _ZN7msdfgen7Vector2mLEd;
extern fn _ZN7msdfgen7Vector2dVEd(self: *Vector2, value: f64) *Vector2;
pub const divScalarInto_ = _ZN7msdfgen7Vector2dVEd;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/Shape.zig | const std = @import("std");
const cpp = @import("cpp");
const Contour = @import("Contour.zig").Contour;
const Scanline = @import("Scanline.zig").Scanline;
pub const Shape = extern struct {
pub const Bounds = extern struct {
l: f64,
b: f64,
r: f64,
t: f64,
};
contours: cpp.Vector(Contour),
inverseYAxis: bool,
extern fn _ZN7msdfgen5ShapeC1Ev(self: *Shape) void;
pub inline fn init() Shape {
var self: Shape = undefined;
_ZN7msdfgen5ShapeC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen5Shape10addContourERKNS_7ContourE(self: *Shape, contour: *const Contour) void;
pub const addContour = _ZN7msdfgen5Shape10addContourERKNS_7ContourE;
extern fn _ZN7msdfgen5Shape10addContourEv(self: *Shape) *Contour;
pub const addContour__Overload2 = _ZN7msdfgen5Shape10addContourEv;
extern fn _ZN7msdfgen5Shape9normalizeEv(self: *Shape) void;
pub const normalize = _ZN7msdfgen5Shape9normalizeEv;
extern fn _ZNK7msdfgen5Shape8validateEv(self: *const Shape) bool;
pub const validate = _ZNK7msdfgen5Shape8validateEv;
extern fn _ZNK7msdfgen5Shape5boundERdS1_S1_S1_(self: *const Shape, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen5Shape5boundERdS1_S1_S1_;
extern fn _ZNK7msdfgen5Shape11boundMitersERdS1_S1_S1_ddi(self: *const Shape, l: *f64, b: *f64, r: *f64, t: *f64, border: f64, miterLimit: f64, polarity: c_int) void;
pub const boundMiters = _ZNK7msdfgen5Shape11boundMitersERdS1_S1_S1_ddi;
extern fn _ZNK7msdfgen5Shape9getBoundsEddi(self: *const Shape, border: f64, miterLimit: f64, polarity: c_int) Bounds;
pub const getBounds = _ZNK7msdfgen5Shape9getBoundsEddi;
extern fn _ZNK7msdfgen5Shape8scanlineERNS_8ScanlineEd(self: *const Shape, line: *Scanline, y: f64) void;
pub const scanline = _ZNK7msdfgen5Shape8scanlineERNS_8ScanlineEd;
extern fn _ZNK7msdfgen5Shape9edgeCountEv(self: *const Shape) c_int;
pub const edgeCount = _ZNK7msdfgen5Shape9edgeCountEv;
extern fn _ZN7msdfgen5Shape14orientContoursEv(self: *Shape) void;
pub const orientContours = _ZN7msdfgen5Shape14orientContoursEv;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/Scanline.zig | const std = @import("std");
const cpp = @import("cpp");
pub const FillRule = extern struct {
bits: c_int = 0,
pub const FILL_NONZERO: FillRule = .{ .bits = 0 };
pub const FILL_ODD: FillRule = .{ .bits = 1 };
pub const FILL_POSITIVE: FillRule = .{ .bits = 2 };
pub const FILL_NEGATIVE: FillRule = .{ .bits = 3 };
// pub usingnamespace cpp.FlagsMixin(FillRule);
};
extern fn _ZN7msdfgen17interpretFillRuleEiNS_8FillRuleE(intersections: c_int, fillRule: FillRule) bool;
pub const interpretFillRule = _ZN7msdfgen17interpretFillRuleEiNS_8FillRuleE;
pub const Scanline = extern struct {
pub const Intersection = extern struct {
x: f64,
direction: c_int,
};
intersections: cpp.Vector(Intersection),
lastIndex: c_int,
extern fn _ZN7msdfgen8Scanline7overlapERKS0_S2_ddNS_8FillRuleE(self: *Scanline, a: *const Scanline, b: *const Scanline, xFrom: f64, xTo: f64, fillRule: FillRule) f64;
pub const overlap = _ZN7msdfgen8Scanline7overlapERKS0_S2_ddNS_8FillRuleE;
extern fn _ZN7msdfgen8ScanlineC1Ev(self: *Scanline) void;
pub inline fn init() Scanline {
var self: Scanline = undefined;
_ZN7msdfgen8ScanlineC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen8Scanline16setIntersectionsERKNSt3__16vectorINS0_12IntersectionENS1_9allocatorIS3_EEEE(self: *Scanline, intersections: *const cpp.Vector(Intersection)) void;
pub const setIntersections = _ZN7msdfgen8Scanline16setIntersectionsERKNSt3__16vectorINS0_12IntersectionENS1_9allocatorIS3_EEEE;
extern fn _ZNK7msdfgen8Scanline18countIntersectionsEd(self: *const Scanline, x: f64) c_int;
pub const countIntersections = _ZNK7msdfgen8Scanline18countIntersectionsEd;
extern fn _ZNK7msdfgen8Scanline16sumIntersectionsEd(self: *const Scanline, x: f64) c_int;
pub const sumIntersections = _ZNK7msdfgen8Scanline16sumIntersectionsEd;
extern fn _ZNK7msdfgen8Scanline6filledEdNS_8FillRuleE(self: *const Scanline, x: f64, fillRule: FillRule) bool;
pub const filled = _ZNK7msdfgen8Scanline6filledEdNS_8FillRuleE;
extern fn _ZN7msdfgen8Scanline10preprocessEv(self: *Scanline) void;
pub const preprocess = _ZN7msdfgen8Scanline10preprocessEv;
extern fn _ZNK7msdfgen8Scanline6moveToEd(self: *const Scanline, x: f64) c_int;
pub const moveTo = _ZNK7msdfgen8Scanline6moveToEd;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/msdfgen.zig | const std = @import("std");
const BitmapRef = @import("BitmapRef.zig").BitmapRef;
const Shape = @import("Shape.zig").Shape;
const Projection = @import("Projection.zig").Projection;
const GeneratorConfig = @import("generator-config.zig").GeneratorConfig;
const MSDFGeneratorConfig = @import("generator-config.zig").MSDFGeneratorConfig;
extern fn _ZN7msdfgen11generateSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_15GeneratorConfigE(output: *const BitmapRef(f32, 1), shape: *const Shape, projection: *const Projection, range: f64, config: *const GeneratorConfig) void;
/// Generates a conventional single-channel signed distance field.
pub const generateSDF = _ZN7msdfgen11generateSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_15GeneratorConfigE;
extern fn _ZN7msdfgen17generatePseudoSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_15GeneratorConfigE(output: *const BitmapRef(f32, 1), shape: *const Shape, projection: *const Projection, range: f64, config: *const GeneratorConfig) void;
/// Generates a single-channel signed pseudo-distance field.
pub const generatePseudoSDF = _ZN7msdfgen17generatePseudoSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_15GeneratorConfigE;
extern fn _ZN7msdfgen12generateMSDFERKNS_9BitmapRefIfLi3EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_19MSDFGeneratorConfigE(output: *const BitmapRef(f32, 3), shape: *const Shape, projection: *const Projection, range: f64, config: *const MSDFGeneratorConfig) void;
/// Generates a multi-channel signed distance field. Edge colors must be assigned first! (See edgeColoringSimple)
pub const generateMSDF = _ZN7msdfgen12generateMSDFERKNS_9BitmapRefIfLi3EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_19MSDFGeneratorConfigE;
extern fn _ZN7msdfgen13generateMTSDFERKNS_9BitmapRefIfLi4EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_19MSDFGeneratorConfigE(output: *const BitmapRef(f32, 4), shape: *const Shape, projection: *const Projection, range: f64, config: *const MSDFGeneratorConfig) void;
/// Generates a multi-channel signed distance field with true distance in the alpha channel. Edge colors must be assigned first.
pub const generateMTSDF = _ZN7msdfgen13generateMTSDFERKNS_9BitmapRefIfLi4EEERKNS_5ShapeERKNS_10ProjectionEdRKNS_19MSDFGeneratorConfigE;
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/EdgeHolder.zig | const std = @import("std");
const EdgeSegment = @import("edge-segments.zig").EdgeSegment;
const EdgeColor = @import("EdgeColor.zig").EdgeColor;
const Vector2 = @import("Vector2.zig").Vector2;
const Point2 = Vector2;
pub const EdgeHolder = extern struct {
edgeSegment: [*c]EdgeSegment,
extern fn _ZN7msdfgen10EdgeHolder4swapERS0_S1_(self: *EdgeHolder, a: *EdgeHolder, b: *EdgeHolder) void;
pub const swap = _ZN7msdfgen10EdgeHolder4swapERS0_S1_;
extern fn _ZN7msdfgen10EdgeHolderC1Ev(self: *EdgeHolder) void;
pub inline fn init() EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderC1EPNS_11EdgeSegmentE(self: *EdgeHolder, segment: [*c]EdgeSegment) void;
pub inline fn init1(segment: [*c]EdgeSegment) EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1EPNS_11EdgeSegmentE(&self, segment);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_NS_9EdgeColorE(self: *EdgeHolder, p0: Point2, p1: Point2, edgeColor: EdgeColor) void;
pub inline fn init2(p0: Point2, p1: Point2, edgeColor: EdgeColor) EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_NS_9EdgeColorE(&self, p0, p1, edgeColor);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_NS_9EdgeColorE(self: *EdgeHolder, p0: Point2, p1: Point2, p2: Point2, edgeColor: EdgeColor) void;
pub inline fn init3(p0: Point2, p1: Point2, p2: Point2, edgeColor: EdgeColor) EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_NS_9EdgeColorE(&self, p0, p1, p2, edgeColor);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE(self: *EdgeHolder, p0: Point2, p1: Point2, p2: Point2, p3: Point2, edgeColor: EdgeColor) void;
pub inline fn init4(p0: Point2, p1: Point2, p2: Point2, p3: Point2, edgeColor: EdgeColor) EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE(&self, p0, p1, p2, p3, edgeColor);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderC1ERKS0_(self: *EdgeHolder, orig: *const EdgeHolder) void;
pub inline fn init5(orig: *const EdgeHolder) EdgeHolder {
var self: EdgeHolder = undefined;
_ZN7msdfgen10EdgeHolderC1ERKS0_(&self, orig);
return self;
}
extern fn _ZN7msdfgen10EdgeHolderD1Ev(self: *EdgeHolder) void;
pub inline fn deinit(self: *EdgeHolder) void {
self._ZN7msdfgen10EdgeHolderD1Ev();
}
extern fn _ZN7msdfgen10EdgeHolderaSERKS0_(self: *EdgeHolder, orig: *const EdgeHolder) *EdgeHolder;
pub const copyFrom = _ZN7msdfgen10EdgeHolderaSERKS0_;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/SignedDistance.zig | const std = @import("std");
/// Represents a signed distance and alignment, which together can be compared to uniquely determine the closest edge segment.
pub const SignedDistance = extern struct {
distance: f64,
dot: f64,
extern fn _ZN7msdfgen14SignedDistanceC1Ev(self: *SignedDistance) void;
pub inline fn init() SignedDistance {
var self: SignedDistance = undefined;
_ZN7msdfgen14SignedDistanceC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen14SignedDistanceC1Edd(self: *SignedDistance, dist: f64, d: f64) void;
pub inline fn init1(dist: f64, d: f64) SignedDistance {
var self: SignedDistance = undefined;
_ZN7msdfgen14SignedDistanceC1Edd(&self, dist, d);
return self;
}
extern fn _ZN7msdfgenltENS_14SignedDistanceES0_(a: SignedDistance, b: SignedDistance) bool;
pub const lessThan = _ZN7msdfgenltENS_14SignedDistanceES0_;
extern fn _ZN7msdfgengtENS_14SignedDistanceES0_(a: SignedDistance, b: SignedDistance) bool;
pub const greaterThan = _ZN7msdfgengtENS_14SignedDistanceES0_;
extern fn _ZN7msdfgenleENS_14SignedDistanceES0_(a: SignedDistance, b: SignedDistance) bool;
pub const lessEqThan = _ZN7msdfgenleENS_14SignedDistanceES0_;
extern fn _ZN7msdfgengeENS_14SignedDistanceES0_(a: SignedDistance, b: SignedDistance) bool;
pub const greaterEqThan = _ZN7msdfgengeENS_14SignedDistanceES0_;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/EdgeColor.zig | const std = @import("std");
pub const EdgeColor = extern struct {
bits: c_int = 0,
pub const BLACK: EdgeColor = .{ .bits = @intCast(c_uint, 0) };
pub const RED: EdgeColor = .{ .bits = @intCast(c_uint, 1) };
pub const GREEN: EdgeColor = .{ .bits = @intCast(c_uint, 2) };
pub const YELLOW: EdgeColor = .{ .bits = @intCast(c_uint, 3) };
pub const BLUE: EdgeColor = .{ .bits = @intCast(c_uint, 4) };
pub const MAGENTA: EdgeColor = .{ .bits = @intCast(c_uint, 5) };
pub const CYAN: EdgeColor = .{ .bits = @intCast(c_uint, 6) };
pub const WHITE: EdgeColor = .{ .bits = @intCast(c_uint, 7) };
// pub usingnamespace cpp.FlagsMixin(EdgeColor);
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/generator-config.zig | const std = @import("std");
/// The configuration of the MSDF error correction pass.
pub const ErrorCorrectionConfig = extern struct {
/// Mode of operation.
pub const Mode = extern struct {
bits: c_int = 0,
/// Skips error correction pass.
pub const DISABLED: Mode = .{ .bits = 0 };
/// Corrects all discontinuities of the distance field regardless if edges are adversely affected.
pub const INDISCRIMINATE: Mode = .{ .bits = 1 };
/// Corrects artifacts at edges and other discontinuous distances only if it does not affect edges or corners.
pub const EDGE_PRIORITY: Mode = .{ .bits = 2 };
/// Only corrects artifacts at edges.
pub const EDGE_ONLY: Mode = .{ .bits = 3 };
// pub usingnamespace cpp.FlagsMixin(Mode);
};
/// Configuration of whether to use an algorithm that computes the exact shape distance at the positions of suspected artifacts. This algorithm can be much slower.
pub const DistanceCheckMode = extern struct {
bits: c_int = 0,
/// Never computes exact shape distance.
pub const DO_NOT_CHECK_DISTANCE: DistanceCheckMode = .{ .bits = 0 };
/// Only computes exact shape distance at edges. Provides a good balance between speed and precision.
pub const CHECK_DISTANCE_AT_EDGE: DistanceCheckMode = .{ .bits = 1 };
/// Computes and compares the exact shape distance for each suspected artifact.
pub const ALWAYS_CHECK_DISTANCE: DistanceCheckMode = .{ .bits = 2 };
// pub usingnamespace cpp.FlagsMixin(DistanceCheckMode);
};
mode: Mode,
distanceCheckMode: DistanceCheckMode,
/// The minimum ratio between the actual and maximum expected distance delta to be considered an error.
minDeviationRatio: f64,
/// The minimum ratio between the pre-correction distance error and the post-correction distance error. Has no effect for DO_NOT_CHECK_DISTANCE.
minImproveRatio: f64,
/// An optional buffer to avoid dynamic allocation. Must have at least as many bytes as the MSDF has pixels.
buffer: [*c]u8,
extern const _ZN7msdfgen21ErrorCorrectionConfig24defaultMinDeviationRatioE: f64;
pub inline fn defaultMinDeviationRatio() f64 {
return _ZN7msdfgen21ErrorCorrectionConfig24defaultMinDeviationRatioE;
}
extern const _ZN7msdfgen21ErrorCorrectionConfig22defaultMinImproveRatioE: f64;
pub inline fn defaultMinImproveRatio() f64 {
return _ZN7msdfgen21ErrorCorrectionConfig22defaultMinImproveRatioE;
}
extern fn _ZN7msdfgen21ErrorCorrectionConfigC1ENS0_4ModeENS0_17DistanceCheckModeEddPh(self: *ErrorCorrectionConfig, mode: Mode, distanceCheckMode: DistanceCheckMode, minDeviationRatio: f64, minImproveRatio: f64, buffer: [*c]u8) void;
pub inline fn init(mode: Mode, distanceCheckMode: DistanceCheckMode, minDeviationRatio: f64, minImproveRatio: f64, buffer: [*c]u8) ErrorCorrectionConfig {
var self: ErrorCorrectionConfig = undefined;
_ZN7msdfgen21ErrorCorrectionConfigC1ENS0_4ModeENS0_17DistanceCheckModeEddPh(&self, mode, distanceCheckMode, minDeviationRatio, minImproveRatio, buffer);
return self;
}
};
/// The configuration of the distance field generator algorithm.
pub const GeneratorConfig = extern struct {
/// Specifies whether to use the version of the algorithm that supports overlapping contours with the same winding. May be set to false to improve performance when no such contours are present.
overlapSupport: bool,
extern fn _ZN7msdfgen15GeneratorConfigC1Eb(self: *GeneratorConfig, overlapSupport: bool) void;
pub inline fn init(overlapSupport: bool) GeneratorConfig {
var self: GeneratorConfig = undefined;
_ZN7msdfgen15GeneratorConfigC1Eb(&self, overlapSupport);
return self;
}
};
/// The configuration of the multi-channel distance field generator algorithm.
pub const MSDFGeneratorConfig = extern struct {
base: GeneratorConfig,
/// Configuration of the error correction pass.
errorCorrection: ErrorCorrectionConfig,
extern fn _ZN7msdfgen19MSDFGeneratorConfigC1Ev(self: *MSDFGeneratorConfig) void;
pub inline fn init() MSDFGeneratorConfig {
var self: MSDFGeneratorConfig = undefined;
_ZN7msdfgen19MSDFGeneratorConfigC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen19MSDFGeneratorConfigC1EbRKNS_21ErrorCorrectionConfigE(self: *MSDFGeneratorConfig, overlapSupport: bool, errorCorrection: *const ErrorCorrectionConfig) void;
pub inline fn init1(overlapSupport: bool, errorCorrection: *const ErrorCorrectionConfig) MSDFGeneratorConfig {
var self: MSDFGeneratorConfig = undefined;
_ZN7msdfgen19MSDFGeneratorConfigC1EbRKNS_21ErrorCorrectionConfigE(&self, overlapSupport, errorCorrection);
return self;
}
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/edge-segments.zig | const std = @import("std");
const EdgeColor = @import("EdgeColor.zig").EdgeColor;
const SignedDistance = @import("SignedDistance.zig").SignedDistance;
const Vector2 = @import("Vector2.zig").Vector2;
const Point2 = Vector2;
pub const EdgeSegment = extern struct {
vtable: *const anyopaque,
color: EdgeColor,
extern fn _ZN7msdfgen11EdgeSegmentC1ENS_9EdgeColorE(self: *EdgeSegment, edgeColor: EdgeColor) void;
pub inline fn init(edgeColor: EdgeColor) EdgeSegment {
var self: EdgeSegment = undefined;
_ZN7msdfgen11EdgeSegmentC1ENS_9EdgeColorE(&self, edgeColor);
return self;
}
extern fn _ZN7msdfgen11EdgeSegmentD1Ev(self: *EdgeSegment) void;
pub inline fn deinit(self: *EdgeSegment) void {
self._ZN7msdfgen11EdgeSegmentD1Ev();
}
extern fn _ZNK7msdfgen11EdgeSegment5cloneEv(self: *const EdgeSegment) [*c]EdgeSegment;
pub const clone = _ZNK7msdfgen11EdgeSegment5cloneEv;
extern fn _ZNK7msdfgen11EdgeSegment5pointEd(self: *const EdgeSegment, param: f64) Point2;
pub const point = _ZNK7msdfgen11EdgeSegment5pointEd;
extern fn _ZNK7msdfgen11EdgeSegment9directionEd(self: *const EdgeSegment, param: f64) Vector2;
pub const direction = _ZNK7msdfgen11EdgeSegment9directionEd;
extern fn _ZNK7msdfgen11EdgeSegment15directionChangeEd(self: *const EdgeSegment, param: f64) Vector2;
pub const directionChange = _ZNK7msdfgen11EdgeSegment15directionChangeEd;
extern fn _ZNK7msdfgen11EdgeSegment14signedDistanceENS_7Vector2ERd(self: *const EdgeSegment, origin: Point2, param: *f64) SignedDistance;
pub const signedDistance = _ZNK7msdfgen11EdgeSegment14signedDistanceENS_7Vector2ERd;
extern fn _ZNK7msdfgen11EdgeSegment24distanceToPseudoDistanceERNS_14SignedDistanceENS_7Vector2Ed(self: *const EdgeSegment, distance: *SignedDistance, origin: Point2, param: f64) void;
pub const distanceToPseudoDistance = _ZNK7msdfgen11EdgeSegment24distanceToPseudoDistanceERNS_14SignedDistanceENS_7Vector2Ed;
extern fn _ZNK7msdfgen11EdgeSegment21scanlineIntersectionsEPdPid(self: *const EdgeSegment, x: [*c]f64, dy: [*c]c_int, y: f64) c_int;
pub const scanlineIntersections = _ZNK7msdfgen11EdgeSegment21scanlineIntersectionsEPdPid;
extern fn _ZNK7msdfgen11EdgeSegment5boundERdS1_S1_S1_(self: *const EdgeSegment, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen11EdgeSegment5boundERdS1_S1_S1_;
extern fn _ZN7msdfgen11EdgeSegment7reverseEv(self: *EdgeSegment) void;
pub const reverse = _ZN7msdfgen11EdgeSegment7reverseEv;
extern fn _ZN7msdfgen11EdgeSegment14moveStartPointENS_7Vector2E(self: *EdgeSegment, to: Point2) void;
pub const moveStartPoint = _ZN7msdfgen11EdgeSegment14moveStartPointENS_7Vector2E;
extern fn _ZN7msdfgen11EdgeSegment12moveEndPointENS_7Vector2E(self: *EdgeSegment, to: Point2) void;
pub const moveEndPoint = _ZN7msdfgen11EdgeSegment12moveEndPointENS_7Vector2E;
extern fn _ZNK7msdfgen11EdgeSegment13splitInThirdsERPS0_S2_S2_(self: *const EdgeSegment, part1: *[*c]EdgeSegment, part2: *[*c]EdgeSegment, part3: *[*c]EdgeSegment) void;
pub const splitInThirds = _ZNK7msdfgen11EdgeSegment13splitInThirdsERPS0_S2_S2_;
};
pub const LinearSegment = extern struct {
base: EdgeSegment,
p: [2]Point2,
extern fn _ZN7msdfgen13LinearSegmentC1ENS_7Vector2ES1_NS_9EdgeColorE(self: *LinearSegment, p0: Point2, p1: Point2, edgeColor: EdgeColor) void;
pub inline fn init(p0: Point2, p1: Point2, edgeColor: EdgeColor) LinearSegment {
var self: LinearSegment = undefined;
_ZN7msdfgen13LinearSegmentC1ENS_7Vector2ES1_NS_9EdgeColorE(&self, p0, p1, edgeColor);
return self;
}
extern fn _ZNK7msdfgen13LinearSegment5cloneEv(self: *const LinearSegment) [*c]LinearSegment;
pub const clone = _ZNK7msdfgen13LinearSegment5cloneEv;
extern fn _ZNK7msdfgen13LinearSegment5pointEd(self: *const LinearSegment, param: f64) Point2;
pub const point = _ZNK7msdfgen13LinearSegment5pointEd;
extern fn _ZNK7msdfgen13LinearSegment9directionEd(self: *const LinearSegment, param: f64) Vector2;
pub const direction = _ZNK7msdfgen13LinearSegment9directionEd;
extern fn _ZNK7msdfgen13LinearSegment15directionChangeEd(self: *const LinearSegment, param: f64) Vector2;
pub const directionChange = _ZNK7msdfgen13LinearSegment15directionChangeEd;
extern fn _ZNK7msdfgen13LinearSegment6lengthEv(self: *const LinearSegment) f64;
pub const length = _ZNK7msdfgen13LinearSegment6lengthEv;
extern fn _ZNK7msdfgen13LinearSegment14signedDistanceENS_7Vector2ERd(self: *const LinearSegment, origin: Point2, param: *f64) SignedDistance;
pub const signedDistance = _ZNK7msdfgen13LinearSegment14signedDistanceENS_7Vector2ERd;
extern fn _ZNK7msdfgen13LinearSegment21scanlineIntersectionsEPdPid(self: *const LinearSegment, x: [*c]f64, dy: [*c]c_int, y: f64) c_int;
pub const scanlineIntersections = _ZNK7msdfgen13LinearSegment21scanlineIntersectionsEPdPid;
extern fn _ZNK7msdfgen13LinearSegment5boundERdS1_S1_S1_(self: *const LinearSegment, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen13LinearSegment5boundERdS1_S1_S1_;
extern fn _ZN7msdfgen13LinearSegment7reverseEv(self: *LinearSegment) void;
pub const reverse = _ZN7msdfgen13LinearSegment7reverseEv;
extern fn _ZN7msdfgen13LinearSegment14moveStartPointENS_7Vector2E(self: *LinearSegment, to: Point2) void;
pub const moveStartPoint = _ZN7msdfgen13LinearSegment14moveStartPointENS_7Vector2E;
extern fn _ZN7msdfgen13LinearSegment12moveEndPointENS_7Vector2E(self: *LinearSegment, to: Point2) void;
pub const moveEndPoint = _ZN7msdfgen13LinearSegment12moveEndPointENS_7Vector2E;
extern fn _ZNK7msdfgen13LinearSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_(self: *const LinearSegment, part1: *[*c]EdgeSegment, part2: *[*c]EdgeSegment, part3: *[*c]EdgeSegment) void;
pub const splitInThirds = _ZNK7msdfgen13LinearSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_;
};
pub const QuadraticSegment = extern struct {
base: EdgeSegment,
p: [3]Point2,
extern fn _ZN7msdfgen16QuadraticSegmentC1ENS_7Vector2ES1_S1_NS_9EdgeColorE(self: *QuadraticSegment, p0: Point2, p1: Point2, p2: Point2, edgeColor: EdgeColor) void;
pub inline fn init(p0: Point2, p1: Point2, p2: Point2, edgeColor: EdgeColor) QuadraticSegment {
var self: QuadraticSegment = undefined;
_ZN7msdfgen16QuadraticSegmentC1ENS_7Vector2ES1_S1_NS_9EdgeColorE(&self, p0, p1, p2, edgeColor);
return self;
}
extern fn _ZNK7msdfgen16QuadraticSegment5cloneEv(self: *const QuadraticSegment) [*c]QuadraticSegment;
pub const clone = _ZNK7msdfgen16QuadraticSegment5cloneEv;
extern fn _ZNK7msdfgen16QuadraticSegment5pointEd(self: *const QuadraticSegment, param: f64) Point2;
pub const point = _ZNK7msdfgen16QuadraticSegment5pointEd;
extern fn _ZNK7msdfgen16QuadraticSegment9directionEd(self: *const QuadraticSegment, param: f64) Vector2;
pub const direction = _ZNK7msdfgen16QuadraticSegment9directionEd;
extern fn _ZNK7msdfgen16QuadraticSegment15directionChangeEd(self: *const QuadraticSegment, param: f64) Vector2;
pub const directionChange = _ZNK7msdfgen16QuadraticSegment15directionChangeEd;
extern fn _ZNK7msdfgen16QuadraticSegment6lengthEv(self: *const QuadraticSegment) f64;
pub const length = _ZNK7msdfgen16QuadraticSegment6lengthEv;
extern fn _ZNK7msdfgen16QuadraticSegment14signedDistanceENS_7Vector2ERd(self: *const QuadraticSegment, origin: Point2, param: *f64) SignedDistance;
pub const signedDistance = _ZNK7msdfgen16QuadraticSegment14signedDistanceENS_7Vector2ERd;
extern fn _ZNK7msdfgen16QuadraticSegment21scanlineIntersectionsEPdPid(self: *const QuadraticSegment, x: [*c]f64, dy: [*c]c_int, y: f64) c_int;
pub const scanlineIntersections = _ZNK7msdfgen16QuadraticSegment21scanlineIntersectionsEPdPid;
extern fn _ZNK7msdfgen16QuadraticSegment5boundERdS1_S1_S1_(self: *const QuadraticSegment, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen16QuadraticSegment5boundERdS1_S1_S1_;
extern fn _ZN7msdfgen16QuadraticSegment7reverseEv(self: *QuadraticSegment) void;
pub const reverse = _ZN7msdfgen16QuadraticSegment7reverseEv;
extern fn _ZN7msdfgen16QuadraticSegment14moveStartPointENS_7Vector2E(self: *QuadraticSegment, to: Point2) void;
pub const moveStartPoint = _ZN7msdfgen16QuadraticSegment14moveStartPointENS_7Vector2E;
extern fn _ZN7msdfgen16QuadraticSegment12moveEndPointENS_7Vector2E(self: *QuadraticSegment, to: Point2) void;
pub const moveEndPoint = _ZN7msdfgen16QuadraticSegment12moveEndPointENS_7Vector2E;
extern fn _ZNK7msdfgen16QuadraticSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_(self: *const QuadraticSegment, part1: *[*c]EdgeSegment, part2: *[*c]EdgeSegment, part3: *[*c]EdgeSegment) void;
pub const splitInThirds = _ZNK7msdfgen16QuadraticSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_;
extern fn _ZNK7msdfgen16QuadraticSegment14convertToCubicEv(self: *const QuadraticSegment) [*c]EdgeSegment;
pub const convertToCubic = _ZNK7msdfgen16QuadraticSegment14convertToCubicEv;
};
pub const CubicSegment = extern struct {
base: EdgeSegment,
p: [4]Point2,
extern fn _ZN7msdfgen12CubicSegmentC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE(self: *CubicSegment, p0: Point2, p1: Point2, p2: Point2, p3: Point2, edgeColor: EdgeColor) void;
pub inline fn init(p0: Point2, p1: Point2, p2: Point2, p3: Point2, edgeColor: EdgeColor) CubicSegment {
var self: CubicSegment = undefined;
_ZN7msdfgen12CubicSegmentC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE(&self, p0, p1, p2, p3, edgeColor);
return self;
}
extern fn _ZNK7msdfgen12CubicSegment5cloneEv(self: *const CubicSegment) [*c]CubicSegment;
pub const clone = _ZNK7msdfgen12CubicSegment5cloneEv;
extern fn _ZNK7msdfgen12CubicSegment5pointEd(self: *const CubicSegment, param: f64) Point2;
pub const point = _ZNK7msdfgen12CubicSegment5pointEd;
extern fn _ZNK7msdfgen12CubicSegment9directionEd(self: *const CubicSegment, param: f64) Vector2;
pub const direction = _ZNK7msdfgen12CubicSegment9directionEd;
extern fn _ZNK7msdfgen12CubicSegment15directionChangeEd(self: *const CubicSegment, param: f64) Vector2;
pub const directionChange = _ZNK7msdfgen12CubicSegment15directionChangeEd;
extern fn _ZNK7msdfgen12CubicSegment14signedDistanceENS_7Vector2ERd(self: *const CubicSegment, origin: Point2, param: *f64) SignedDistance;
pub const signedDistance = _ZNK7msdfgen12CubicSegment14signedDistanceENS_7Vector2ERd;
extern fn _ZNK7msdfgen12CubicSegment21scanlineIntersectionsEPdPid(self: *const CubicSegment, x: [*c]f64, dy: [*c]c_int, y: f64) c_int;
pub const scanlineIntersections = _ZNK7msdfgen12CubicSegment21scanlineIntersectionsEPdPid;
extern fn _ZNK7msdfgen12CubicSegment5boundERdS1_S1_S1_(self: *const CubicSegment, l: *f64, b: *f64, r: *f64, t: *f64) void;
pub const bound = _ZNK7msdfgen12CubicSegment5boundERdS1_S1_S1_;
extern fn _ZN7msdfgen12CubicSegment7reverseEv(self: *CubicSegment) void;
pub const reverse = _ZN7msdfgen12CubicSegment7reverseEv;
extern fn _ZN7msdfgen12CubicSegment14moveStartPointENS_7Vector2E(self: *CubicSegment, to: Point2) void;
pub const moveStartPoint = _ZN7msdfgen12CubicSegment14moveStartPointENS_7Vector2E;
extern fn _ZN7msdfgen12CubicSegment12moveEndPointENS_7Vector2E(self: *CubicSegment, to: Point2) void;
pub const moveEndPoint = _ZN7msdfgen12CubicSegment12moveEndPointENS_7Vector2E;
extern fn _ZNK7msdfgen12CubicSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_(self: *const CubicSegment, part1: *[*c]EdgeSegment, part2: *[*c]EdgeSegment, part3: *[*c]EdgeSegment) void;
pub const splitInThirds = _ZNK7msdfgen12CubicSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_;
extern fn _ZN7msdfgen12CubicSegment10deconvergeEid(self: *CubicSegment, param: c_int, amount: f64) void;
pub const deconverge = _ZN7msdfgen12CubicSegment10deconvergeEid;
};
|
0 | repos/c2z/use_cases | repos/c2z/use_cases/msdfgen/Projection.zig | const std = @import("std");
const Vector2 = @import("Vector2.zig").Vector2;
const Point2 = Vector2;
/// A transformation from shape coordinates to pixel coordinates.
pub const Projection = extern struct {
scale: Vector2,
translate: Vector2,
extern fn _ZN7msdfgen10ProjectionC1Ev(self: *Projection) void;
pub inline fn init() Projection {
var self: Projection = undefined;
_ZN7msdfgen10ProjectionC1Ev(&self);
return self;
}
extern fn _ZN7msdfgen10ProjectionC1ERKNS_7Vector2ES3_(self: *Projection, scale: *const Vector2, translate: *const Vector2) void;
pub inline fn init1(scale: *const Vector2, translate: *const Vector2) Projection {
var self: Projection = undefined;
_ZN7msdfgen10ProjectionC1ERKNS_7Vector2ES3_(&self, scale, translate);
return self;
}
extern fn _ZNK7msdfgen10Projection7projectERKNS_7Vector2E(self: *const Projection, coord: *const Point2) Point2;
/// Converts the shape coordinate to pixel coordinate.
pub const project = _ZNK7msdfgen10Projection7projectERKNS_7Vector2E;
extern fn _ZNK7msdfgen10Projection9unprojectERKNS_7Vector2E(self: *const Projection, coord: *const Point2) Point2;
/// Converts the pixel coordinate to shape coordinate.
pub const unproject = _ZNK7msdfgen10Projection9unprojectERKNS_7Vector2E;
extern fn _ZNK7msdfgen10Projection13projectVectorERKNS_7Vector2E(self: *const Projection, vector: *const Vector2) Vector2;
/// Converts the vector to pixel coordinate space.
pub const projectVector = _ZNK7msdfgen10Projection13projectVectorERKNS_7Vector2E;
extern fn _ZNK7msdfgen10Projection15unprojectVectorERKNS_7Vector2E(self: *const Projection, vector: *const Vector2) Vector2;
/// Converts the vector from pixel coordinate space.
pub const unprojectVector = _ZNK7msdfgen10Projection15unprojectVectorERKNS_7Vector2E;
extern fn _ZNK7msdfgen10Projection8projectXEd(self: *const Projection, x: f64) f64;
/// Converts the X-coordinate from shape to pixel coordinate space.
pub const projectX = _ZNK7msdfgen10Projection8projectXEd;
extern fn _ZNK7msdfgen10Projection8projectYEd(self: *const Projection, y: f64) f64;
/// Converts the Y-coordinate from shape to pixel coordinate space.
pub const projectY = _ZNK7msdfgen10Projection8projectYEd;
extern fn _ZNK7msdfgen10Projection10unprojectXEd(self: *const Projection, x: f64) f64;
/// Converts the X-coordinate from pixel to shape coordinate space.
pub const unprojectX = _ZNK7msdfgen10Projection10unprojectXEd;
extern fn _ZNK7msdfgen10Projection10unprojectYEd(self: *const Projection, y: f64) f64;
/// Converts the Y-coordinate from pixel to shape coordinate space.
pub const unprojectY = _ZNK7msdfgen10Projection10unprojectYEd;
};
|
0 | repos/c2z/use_cases/msdfgen | repos/c2z/use_cases/msdfgen/include/msdfgen-ext.h |
#pragma once
/*
* MULTI-CHANNEL SIGNED DISTANCE FIELD GENERATOR
* ---------------------------------------------
* A utility by Viktor Chlumsky, (c) 2014 - 2023
*
* The extension module provides ways to easily load input and save output using popular formats.
*
* Third party dependencies in extension module:
* - Skia by Google
* (to resolve self-intersecting paths)
* - FreeType 2
* (to load input font files)
* - TinyXML 2 by Lee Thomason
* (to aid in parsing input SVG files)
* - libpng by the PNG Development Group
* - or LodePNG by Lode Vandevenne
* (to save output PNG images)
*
*/
#include "ext/resolve-shape-geometry.h"
#include "ext/save-png.h"
#include "ext/import-svg.h"
#include "ext/import-font.h"
|
0 | repos/c2z/use_cases/msdfgen | repos/c2z/use_cases/msdfgen/include/msdfgen.h |
#pragma once
/*
* MULTI-CHANNEL SIGNED DISTANCE FIELD GENERATOR
* ---------------------------------------------
* A utility by Viktor Chlumsky, (c) 2014 - 2023
*
* The technique used to generate multi-channel distance fields in this code
* has been developed by Viktor Chlumsky in 2014 for his master's thesis,
* "Shape Decomposition for Multi-Channel Distance Fields". It provides improved
* quality of sharp corners in glyphs and other 2D shapes compared to monochrome
* distance fields. To reconstruct an image of the shape, apply the median of three
* operation on the triplet of sampled signed distance values.
*
*/
#include "core/arithmetics.hpp"
#include "core/Vector2.h"
#include "core/Projection.h"
#include "core/Scanline.h"
#include "core/Shape.h"
#include "core/BitmapRef.hpp"
#include "core/Bitmap.h"
#include "core/bitmap-interpolation.hpp"
#include "core/pixel-conversion.hpp"
#include "core/edge-coloring.h"
#include "core/generator-config.h"
#include "core/msdf-error-correction.h"
#include "core/render-sdf.h"
#include "core/rasterization.h"
#include "core/sdf-error-estimation.h"
#include "core/save-bmp.h"
#include "core/save-tiff.h"
#include "core/shape-description.h"
namespace msdfgen {
/// Generates a conventional single-channel signed distance field.
void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config = GeneratorConfig());
/// Generates a single-channel signed pseudo-distance field.
void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, double range, const GeneratorConfig &config = GeneratorConfig());
/// Generates a multi-channel signed distance field. Edge colors must be assigned first! (See edgeColoringSimple)
void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
/// Generates a multi-channel signed distance field with true distance in the alpha channel. Edge colors must be assigned first.
void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
// Old version of the function API's kept for backwards compatibility
void generateSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport = true);
void generatePseudoSDF(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, bool overlapSupport = true);
void generateMSDF(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig = ErrorCorrectionConfig(), bool overlapSupport = true);
void generateMTSDF(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, const ErrorCorrectionConfig &errorCorrectionConfig = ErrorCorrectionConfig(), bool overlapSupport = true);
// Original simpler versions of the previous functions, which work well under normal circumstances, but cannot deal with overlapping contours.
void generateSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate);
void generatePseudoSDF_legacy(const BitmapRef<float, 1> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate);
void generateMSDF_legacy(const BitmapRef<float, 3> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig = ErrorCorrectionConfig());
void generateMTSDF_legacy(const BitmapRef<float, 4> &output, const Shape &shape, double range, const Vector2 &scale, const Vector2 &translate, ErrorCorrectionConfig errorCorrectionConfig = ErrorCorrectionConfig());
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/arithmetics.hpp |
#pragma once
#include <cmath>
namespace msdfgen {
/// Returns the smaller of the arguments.
template <typename T>
inline T min(T a, T b) {
return b < a ? b : a;
}
/// Returns the larger of the arguments.
template <typename T>
inline T max(T a, T b) {
return a < b ? b : a;
}
/// Returns the middle out of three values
template <typename T>
inline T median(T a, T b, T c) {
return max(min(a, b), min(max(a, b), c));
}
/// Returns the weighted average of a and b.
template <typename T, typename S>
inline T mix(T a, T b, S weight) {
return T((S(1)-weight)*a+weight*b);
}
/// Clamps the number to the interval from 0 to 1.
template <typename T>
inline T clamp(T n) {
return n >= T(0) && n <= T(1) ? n : T(n > T(0));
}
/// Clamps the number to the interval from 0 to b.
template <typename T>
inline T clamp(T n, T b) {
return n >= T(0) && n <= b ? n : T(n > T(0))*b;
}
/// Clamps the number to the interval from a to b.
template <typename T>
inline T clamp(T n, T a, T b) {
return n >= a && n <= b ? n : n < a ? a : b;
}
/// Returns 1 for positive values, -1 for negative values, and 0 for zero.
template <typename T>
inline int sign(T n) {
return (T(0) < n)-(n < T(0));
}
/// Returns 1 for non-negative values and -1 for negative values.
template <typename T>
inline int nonZeroSign(T n) {
return 2*(n > T(0))-1;
}
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/render-sdf.h |
#pragma once
#include "Vector2.h"
#include "BitmapRef.hpp"
namespace msdfgen {
/// Reconstructs the shape's appearance into output from the distance field sdf.
void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 1> &sdf, double pxRange = 0, float midValue = .5f);
void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 1> &sdf, double pxRange = 0, float midValue = .5f);
void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 3> &sdf, double pxRange = 0, float midValue = .5f);
void renderSDF(const BitmapRef<float, 3> &output, const BitmapConstRef<float, 3> &sdf, double pxRange = 0, float midValue = .5f);
void renderSDF(const BitmapRef<float, 1> &output, const BitmapConstRef<float, 4> &sdf, double pxRange = 0, float midValue = .5f);
void renderSDF(const BitmapRef<float, 4> &output, const BitmapConstRef<float, 4> &sdf, double pxRange = 0, float midValue = .5f);
/// Snaps the values of the floating-point bitmaps into one of the 256 values representable in a standard 8-bit bitmap.
void simulate8bit(const BitmapRef<float, 1> &bitmap);
void simulate8bit(const BitmapRef<float, 3> &bitmap);
void simulate8bit(const BitmapRef<float, 4> &bitmap);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/edge-coloring.h |
#pragma once
#include "Shape.h"
#define MSDFGEN_EDGE_LENGTH_PRECISION 4
namespace msdfgen {
/** Assigns colors to edges of the shape in accordance to the multi-channel distance field technique.
* May split some edges if necessary.
* angleThreshold specifies the maximum angle (in radians) to be considered a corner, for example 3 (~172 degrees).
* Values below 1/2 PI will be treated as the external angle.
*/
void edgeColoringSimple(Shape &shape, double angleThreshold, unsigned long long seed = 0);
/** The alternative "ink trap" coloring strategy is designed for better results with typefaces
* that use ink traps as a design feature. It guarantees that even if all edges that are shorter than
* both their neighboring edges are removed, the coloring remains consistent with the established rules.
*/
void edgeColoringInkTrap(Shape &shape, double angleThreshold, unsigned long long seed = 0);
/** The alternative coloring by distance tries to use different colors for edges that are close together.
* This should theoretically be the best strategy on average. However, since it needs to compute the distance
* between all pairs of edges, and perform a graph optimization task, it is much slower than the rest.
*/
void edgeColoringByDistance(Shape &shape, double angleThreshold, unsigned long long seed = 0);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Bitmap.hpp |
#include "Bitmap.h"
#include <cstdlib>
#include <cstring>
namespace msdfgen {
template <typename T, int N>
Bitmap<T, N>::Bitmap() : pixels(NULL), w(0), h(0) { }
template <typename T, int N>
Bitmap<T, N>::Bitmap(int width, int height) : w(width), h(height) {
pixels = new T[N*w*h];
}
template <typename T, int N>
Bitmap<T, N>::Bitmap(const BitmapConstRef<T, N> &orig) : w(orig.width), h(orig.height) {
pixels = new T[N*w*h];
memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
}
template <typename T, int N>
Bitmap<T, N>::Bitmap(const Bitmap<T, N> &orig) : w(orig.w), h(orig.h) {
pixels = new T[N*w*h];
memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
}
#ifdef MSDFGEN_USE_CPP11
template <typename T, int N>
Bitmap<T, N>::Bitmap(Bitmap<T, N> &&orig) : pixels(orig.pixels), w(orig.w), h(orig.h) {
orig.pixels = NULL;
orig.w = 0, orig.h = 0;
}
#endif
template <typename T, int N>
Bitmap<T, N>::~Bitmap() {
delete [] pixels;
}
template <typename T, int N>
Bitmap<T, N> & Bitmap<T, N>::operator=(const BitmapConstRef<T, N> &orig) {
if (pixels != orig.pixels) {
delete [] pixels;
w = orig.width, h = orig.height;
pixels = new T[N*w*h];
memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
}
return *this;
}
template <typename T, int N>
Bitmap<T, N> & Bitmap<T, N>::operator=(const Bitmap<T, N> &orig) {
if (this != &orig) {
delete [] pixels;
w = orig.w, h = orig.h;
pixels = new T[N*w*h];
memcpy(pixels, orig.pixels, sizeof(T)*N*w*h);
}
return *this;
}
#ifdef MSDFGEN_USE_CPP11
template <typename T, int N>
Bitmap<T, N> & Bitmap<T, N>::operator=(Bitmap<T, N> &&orig) {
if (this != &orig) {
delete [] pixels;
pixels = orig.pixels;
w = orig.w, h = orig.h;
orig.pixels = NULL;
}
return *this;
}
#endif
template <typename T, int N>
int Bitmap<T, N>::width() const {
return w;
}
template <typename T, int N>
int Bitmap<T, N>::height() const {
return h;
}
template <typename T, int N>
T * Bitmap<T, N>::operator()(int x, int y) {
return pixels+N*(w*y+x);
}
template <typename T, int N>
const T * Bitmap<T, N>::operator()(int x, int y) const {
return pixels+N*(w*y+x);
}
template <typename T, int N>
Bitmap<T, N>::operator T *() {
return pixels;
}
template <typename T, int N>
Bitmap<T, N>::operator const T *() const {
return pixels;
}
template <typename T, int N>
Bitmap<T, N>::operator BitmapRef<T, N>() {
return BitmapRef<T, N>(pixels, w, h);
}
template <typename T, int N>
Bitmap<T, N>::operator BitmapConstRef<T, N>() const {
return BitmapConstRef<T, N>(pixels, w, h);
}
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/save-tiff.h |
#pragma once
#include "BitmapRef.hpp"
namespace msdfgen {
/// Saves the bitmap as an uncompressed floating-point TIFF file.
bool saveTiff(const BitmapConstRef<float, 1> &bitmap, const char *filename);
bool saveTiff(const BitmapConstRef<float, 3> &bitmap, const char *filename);
bool saveTiff(const BitmapConstRef<float, 4> &bitmap, const char *filename);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/equation-solver.h |
#pragma once
namespace msdfgen {
// ax^2 + bx + c = 0
int solveQuadratic(double x[2], double a, double b, double c);
// ax^3 + bx^2 + cx + d = 0
int solveCubic(double x[3], double a, double b, double c, double d);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/rasterization.h |
#pragma once
#include "Vector2.h"
#include "Shape.h"
#include "Projection.h"
#include "Scanline.h"
#include "BitmapRef.hpp"
namespace msdfgen {
/// Rasterizes the shape into a monochrome bitmap.
void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
/// Fixes the sign of the input signed distance field, so that it matches the shape's rasterized fill.
void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, FillRule fillRule = FILL_NONZERO);
// Old version of the function API's kept for backwards compatibility
void rasterize(const BitmapRef<float, 1> &output, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
void distanceSignCorrection(const BitmapRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
void distanceSignCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
void distanceSignCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, FillRule fillRule = FILL_NONZERO);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Contour.h |
#pragma once
#include <vector>
#include "EdgeHolder.h"
namespace msdfgen {
/// A single closed contour of a shape.
class Contour {
public:
/// The sequence of edges that make up the contour.
std::vector<EdgeHolder> edges;
/// Adds an edge to the contour.
void addEdge(const EdgeHolder &edge);
#ifdef MSDFGEN_USE_CPP11
void addEdge(EdgeHolder &&edge);
#endif
/// Creates a new edge in the contour and returns its reference.
EdgeHolder & addEdge();
/// Adjusts the bounding box to fit the contour.
void bound(double &l, double &b, double &r, double &t) const;
/// Adjusts the bounding box to fit the contour border's mitered corners.
void boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const;
/// Computes the winding of the contour. Returns 1 if positive, -1 if negative.
int winding() const;
/// Reverses the sequence of edges on the contour.
void reverse();
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/edge-selectors.h |
#pragma once
#include "Vector2.h"
#include "SignedDistance.h"
#include "edge-segments.h"
namespace msdfgen {
struct MultiDistance {
double r, g, b;
};
struct MultiAndTrueDistance : MultiDistance {
double a;
};
/// Selects the nearest edge by its true distance.
class TrueDistanceSelector {
public:
typedef double DistanceType;
struct EdgeCache {
Point2 point;
double absDistance;
EdgeCache();
};
void reset(const Point2 &p);
void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
void merge(const TrueDistanceSelector &other);
DistanceType distance() const;
private:
Point2 p;
SignedDistance minDistance;
};
class PseudoDistanceSelectorBase {
public:
struct EdgeCache {
Point2 point;
double absDistance;
double aDomainDistance, bDomainDistance;
double aPseudoDistance, bPseudoDistance;
EdgeCache();
};
static bool getPseudoDistance(double &distance, const Vector2 &ep, const Vector2 &edgeDir);
PseudoDistanceSelectorBase();
void reset(double delta);
bool isEdgeRelevant(const EdgeCache &cache, const EdgeSegment *edge, const Point2 &p) const;
void addEdgeTrueDistance(const EdgeSegment *edge, const SignedDistance &distance, double param);
void addEdgePseudoDistance(double distance);
void merge(const PseudoDistanceSelectorBase &other);
double computeDistance(const Point2 &p) const;
SignedDistance trueDistance() const;
private:
SignedDistance minTrueDistance;
double minNegativePseudoDistance;
double minPositivePseudoDistance;
const EdgeSegment *nearEdge;
double nearEdgeParam;
};
/// Selects the nearest edge by its pseudo-distance.
class PseudoDistanceSelector : public PseudoDistanceSelectorBase {
public:
typedef double DistanceType;
void reset(const Point2 &p);
void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
DistanceType distance() const;
private:
Point2 p;
};
/// Selects the nearest edge for each of the three channels by its pseudo-distance.
class MultiDistanceSelector {
public:
typedef MultiDistance DistanceType;
typedef PseudoDistanceSelectorBase::EdgeCache EdgeCache;
void reset(const Point2 &p);
void addEdge(EdgeCache &cache, const EdgeSegment *prevEdge, const EdgeSegment *edge, const EdgeSegment *nextEdge);
void merge(const MultiDistanceSelector &other);
DistanceType distance() const;
SignedDistance trueDistance() const;
private:
Point2 p;
PseudoDistanceSelectorBase r, g, b;
};
/// Selects the nearest edge for each of the three color channels by its pseudo-distance and by true distance for the alpha channel.
class MultiAndTrueDistanceSelector : public MultiDistanceSelector {
public:
typedef MultiAndTrueDistance DistanceType;
DistanceType distance() const;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/EdgeHolder.h |
#pragma once
#include "edge-segments.h"
namespace msdfgen {
/// Container for a single edge of dynamic type.
class EdgeHolder {
public:
/// Swaps the edges held by a and b.
static void swap(EdgeHolder &a, EdgeHolder &b);
EdgeHolder();
EdgeHolder(EdgeSegment *segment);
EdgeHolder(Point2 p0, Point2 p1, EdgeColor edgeColor = WHITE);
EdgeHolder(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor = WHITE);
EdgeHolder(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor = WHITE);
EdgeHolder(const EdgeHolder &orig);
#ifdef MSDFGEN_USE_CPP11
EdgeHolder(EdgeHolder &&orig);
#endif
~EdgeHolder();
EdgeHolder & operator=(const EdgeHolder &orig);
#ifdef MSDFGEN_USE_CPP11
EdgeHolder & operator=(EdgeHolder &&orig);
#endif
EdgeSegment & operator*();
const EdgeSegment & operator*() const;
EdgeSegment * operator->();
const EdgeSegment * operator->() const;
operator EdgeSegment *();
operator const EdgeSegment *() const;
private:
EdgeSegment *edgeSegment;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/generator-config.h |
#pragma once
#include <cstddef>
#include "BitmapRef.hpp"
#ifndef MSDFGEN_PUBLIC
#define MSDFGEN_PUBLIC // for DLL import/export
#endif
namespace msdfgen {
/// The configuration of the MSDF error correction pass.
struct ErrorCorrectionConfig {
/// The default value of minDeviationRatio.
static MSDFGEN_PUBLIC const double defaultMinDeviationRatio;
/// The default value of minImproveRatio.
static MSDFGEN_PUBLIC const double defaultMinImproveRatio;
/// Mode of operation.
enum Mode {
/// Skips error correction pass.
DISABLED,
/// Corrects all discontinuities of the distance field regardless if edges are adversely affected.
INDISCRIMINATE,
/// Corrects artifacts at edges and other discontinuous distances only if it does not affect edges or corners.
EDGE_PRIORITY,
/// Only corrects artifacts at edges.
EDGE_ONLY
} mode;
/// Configuration of whether to use an algorithm that computes the exact shape distance at the positions of suspected artifacts. This algorithm can be much slower.
enum DistanceCheckMode {
/// Never computes exact shape distance.
DO_NOT_CHECK_DISTANCE,
/// Only computes exact shape distance at edges. Provides a good balance between speed and precision.
CHECK_DISTANCE_AT_EDGE,
/// Computes and compares the exact shape distance for each suspected artifact.
ALWAYS_CHECK_DISTANCE
} distanceCheckMode;
/// The minimum ratio between the actual and maximum expected distance delta to be considered an error.
double minDeviationRatio;
/// The minimum ratio between the pre-correction distance error and the post-correction distance error. Has no effect for DO_NOT_CHECK_DISTANCE.
double minImproveRatio;
/// An optional buffer to avoid dynamic allocation. Must have at least as many bytes as the MSDF has pixels.
byte *buffer;
inline explicit ErrorCorrectionConfig(Mode mode = EDGE_PRIORITY, DistanceCheckMode distanceCheckMode = CHECK_DISTANCE_AT_EDGE, double minDeviationRatio = defaultMinDeviationRatio, double minImproveRatio = defaultMinImproveRatio, byte *buffer = NULL) : mode(mode), distanceCheckMode(distanceCheckMode), minDeviationRatio(minDeviationRatio), minImproveRatio(minImproveRatio), buffer(buffer) { }
};
/// The configuration of the distance field generator algorithm.
struct GeneratorConfig {
/// Specifies whether to use the version of the algorithm that supports overlapping contours with the same winding. May be set to false to improve performance when no such contours are present.
bool overlapSupport;
inline explicit GeneratorConfig(bool overlapSupport = true) : overlapSupport(overlapSupport) { }
};
/// The configuration of the multi-channel distance field generator algorithm.
struct MSDFGeneratorConfig : GeneratorConfig {
/// Configuration of the error correction pass.
ErrorCorrectionConfig errorCorrection;
inline MSDFGeneratorConfig() { }
inline explicit MSDFGeneratorConfig(bool overlapSupport, const ErrorCorrectionConfig &errorCorrection = ErrorCorrectionConfig()) : GeneratorConfig(overlapSupport), errorCorrection(errorCorrection) { }
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/edge-segments.h |
#pragma once
#include "Vector2.h"
#include "SignedDistance.h"
#include "EdgeColor.h"
namespace msdfgen {
// Parameters for iterative search of closest point on a cubic Bezier curve. Increase for higher precision.
#define MSDFGEN_CUBIC_SEARCH_STARTS 4
#define MSDFGEN_CUBIC_SEARCH_STEPS 4
/// An abstract edge segment.
class EdgeSegment {
public:
EdgeColor color;
EdgeSegment(EdgeColor edgeColor = WHITE) : color(edgeColor) { }
virtual ~EdgeSegment() { }
/// Creates a copy of the edge segment.
virtual EdgeSegment * clone() const = 0;
/// Returns the point on the edge specified by the parameter (between 0 and 1).
virtual Point2 point(double param) const = 0;
/// Returns the direction the edge has at the point specified by the parameter.
virtual Vector2 direction(double param) const = 0;
/// Returns the change of direction (second derivative) at the point specified by the parameter.
virtual Vector2 directionChange(double param) const = 0;
/// Returns the minimum signed distance between origin and the edge.
virtual SignedDistance signedDistance(Point2 origin, double ¶m) const = 0;
/// Converts a previously retrieved signed distance from origin to pseudo-distance.
virtual void distanceToPseudoDistance(SignedDistance &distance, Point2 origin, double param) const;
/// Outputs a list of (at most three) intersections (their X coordinates) with an infinite horizontal scanline at y and returns how many there are.
virtual int scanlineIntersections(double x[3], int dy[3], double y) const = 0;
/// Adjusts the bounding box to fit the edge segment.
virtual void bound(double &l, double &b, double &r, double &t) const = 0;
/// Reverses the edge (swaps its start point and end point).
virtual void reverse() = 0;
/// Moves the start point of the edge segment.
virtual void moveStartPoint(Point2 to) = 0;
/// Moves the end point of the edge segment.
virtual void moveEndPoint(Point2 to) = 0;
/// Splits the edge segments into thirds which together represent the original edge.
virtual void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const = 0;
};
/// A line segment.
class LinearSegment : public EdgeSegment {
public:
Point2 p[2];
LinearSegment(Point2 p0, Point2 p1, EdgeColor edgeColor = WHITE);
LinearSegment * clone() const;
Point2 point(double param) const;
Vector2 direction(double param) const;
Vector2 directionChange(double param) const;
double length() const;
SignedDistance signedDistance(Point2 origin, double ¶m) const;
int scanlineIntersections(double x[3], int dy[3], double y) const;
void bound(double &l, double &b, double &r, double &t) const;
void reverse();
void moveStartPoint(Point2 to);
void moveEndPoint(Point2 to);
void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
};
/// A quadratic Bezier curve.
class QuadraticSegment : public EdgeSegment {
public:
Point2 p[3];
QuadraticSegment(Point2 p0, Point2 p1, Point2 p2, EdgeColor edgeColor = WHITE);
QuadraticSegment * clone() const;
Point2 point(double param) const;
Vector2 direction(double param) const;
Vector2 directionChange(double param) const;
double length() const;
SignedDistance signedDistance(Point2 origin, double ¶m) const;
int scanlineIntersections(double x[3], int dy[3], double y) const;
void bound(double &l, double &b, double &r, double &t) const;
void reverse();
void moveStartPoint(Point2 to);
void moveEndPoint(Point2 to);
void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
EdgeSegment * convertToCubic() const;
};
/// A cubic Bezier curve.
class CubicSegment : public EdgeSegment {
public:
Point2 p[4];
CubicSegment(Point2 p0, Point2 p1, Point2 p2, Point2 p3, EdgeColor edgeColor = WHITE);
CubicSegment * clone() const;
Point2 point(double param) const;
Vector2 direction(double param) const;
Vector2 directionChange(double param) const;
SignedDistance signedDistance(Point2 origin, double ¶m) const;
int scanlineIntersections(double x[3], int dy[3], double y) const;
void bound(double &l, double &b, double &r, double &t) const;
void reverse();
void moveStartPoint(Point2 to);
void moveEndPoint(Point2 to);
void splitInThirds(EdgeSegment *&part1, EdgeSegment *&part2, EdgeSegment *&part3) const;
void deconverge(int param, double amount);
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Bitmap.h |
#pragma once
#include "BitmapRef.hpp"
namespace msdfgen {
/// A 2D image bitmap with N channels of type T. Pixel memory is managed by the class.
template <typename T, int N = 1>
class Bitmap {
public:
Bitmap();
Bitmap(int width, int height);
Bitmap(const BitmapConstRef<T, N> &orig);
Bitmap(const Bitmap<T, N> &orig);
#ifdef MSDFGEN_USE_CPP11
Bitmap(Bitmap<T, N> &&orig);
#endif
~Bitmap();
Bitmap<T, N> & operator=(const BitmapConstRef<T, N> &orig);
Bitmap<T, N> & operator=(const Bitmap<T, N> &orig);
#ifdef MSDFGEN_USE_CPP11
Bitmap<T, N> & operator=(Bitmap<T, N> &&orig);
#endif
/// Bitmap width in pixels.
int width() const;
/// Bitmap height in pixels.
int height() const;
T * operator()(int x, int y);
const T * operator()(int x, int y) const;
#ifdef MSDFGEN_USE_CPP11
explicit operator T *();
explicit operator const T *() const;
#else
operator T *();
operator const T *() const;
#endif
operator BitmapRef<T, N>();
operator BitmapConstRef<T, N>() const;
private:
T *pixels;
int w, h;
};
}
#include "Bitmap.hpp"
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/pixel-conversion.hpp |
#pragma once
#include "arithmetics.hpp"
namespace msdfgen {
typedef unsigned char byte;
inline byte pixelFloatToByte(float x) {
return byte(clamp(256.f*x, 255.f));
}
inline float pixelByteToFloat(byte x) {
return 1.f/255.f*float(x);
}
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Projection.h |
#pragma once
#include "Vector2.h"
namespace msdfgen {
/// A transformation from shape coordinates to pixel coordinates.
class Projection {
public:
Projection();
Projection(const Vector2 &scale, const Vector2 &translate);
/// Converts the shape coordinate to pixel coordinate.
Point2 project(const Point2 &coord) const;
/// Converts the pixel coordinate to shape coordinate.
Point2 unproject(const Point2 &coord) const;
/// Converts the vector to pixel coordinate space.
Vector2 projectVector(const Vector2 &vector) const;
/// Converts the vector from pixel coordinate space.
Vector2 unprojectVector(const Vector2 &vector) const;
/// Converts the X-coordinate from shape to pixel coordinate space.
double projectX(double x) const;
/// Converts the Y-coordinate from shape to pixel coordinate space.
double projectY(double y) const;
/// Converts the X-coordinate from pixel to shape coordinate space.
double unprojectX(double x) const;
/// Converts the Y-coordinate from pixel to shape coordinate space.
double unprojectY(double y) const;
private:
Vector2 scale;
Vector2 translate;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/msdf-error-correction.h |
#pragma once
#include "Vector2.h"
#include "Projection.h"
#include "Shape.h"
#include "BitmapRef.hpp"
#include "generator-config.h"
namespace msdfgen {
/// Predicts potential artifacts caused by the interpolation of the MSDF and corrects them by converting nearby texels to single-channel.
void msdfErrorCorrection(const BitmapRef<float, 3> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
void msdfErrorCorrection(const BitmapRef<float, 4> &sdf, const Shape &shape, const Projection &projection, double range, const MSDFGeneratorConfig &config = MSDFGeneratorConfig());
/// Applies the simplified error correction to all discontiunous distances (INDISCRIMINATE mode). Does not need shape or translation.
void msdfFastDistanceErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
void msdfFastDistanceErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
/// Applies the simplified error correction to edges only (EDGE_ONLY mode). Does not need shape or translation.
void msdfFastEdgeErrorCorrection(const BitmapRef<float, 3> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
void msdfFastEdgeErrorCorrection(const BitmapRef<float, 4> &sdf, const Projection &projection, double range, double minDeviationRatio = ErrorCorrectionConfig::defaultMinDeviationRatio);
/// The original version of the error correction algorithm.
void msdfErrorCorrection_legacy(const BitmapRef<float, 3> &output, const Vector2 &threshold);
void msdfErrorCorrection_legacy(const BitmapRef<float, 4> &output, const Vector2 &threshold);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/sdf-error-estimation.h |
#pragma once
#include "Vector2.h"
#include "Shape.h"
#include "Projection.h"
#include "Scanline.h"
#include "BitmapRef.hpp"
namespace msdfgen {
/// Analytically constructs a scanline at y evaluating fill by linear interpolation of the SDF.
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Projection &projection, double y, bool inverseYAxis = false);
/// Estimates the portion of the area that will be filled incorrectly when rendering using the SDF.
double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Projection &projection, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
// Old version of the function API's kept for backwards compatibility
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 1> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 3> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
void scanlineSDF(Scanline &line, const BitmapConstRef<float, 4> &sdf, const Vector2 &scale, const Vector2 &translate, bool inverseYAxis, double y);
double estimateSDFError(const BitmapConstRef<float, 1> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
double estimateSDFError(const BitmapConstRef<float, 3> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
double estimateSDFError(const BitmapConstRef<float, 4> &sdf, const Shape &shape, const Vector2 &scale, const Vector2 &translate, int scanlinesPerRow, FillRule fillRule = FILL_NONZERO);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/SignedDistance.h |
#pragma once
namespace msdfgen {
/// Represents a signed distance and alignment, which together can be compared to uniquely determine the closest edge segment.
class SignedDistance {
public:
double distance;
double dot;
SignedDistance();
SignedDistance(double dist, double d);
friend bool operator<(SignedDistance a, SignedDistance b);
friend bool operator>(SignedDistance a, SignedDistance b);
friend bool operator<=(SignedDistance a, SignedDistance b);
friend bool operator>=(SignedDistance a, SignedDistance b);
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Scanline.h |
#pragma once
#include <vector>
namespace msdfgen {
/// Fill rule dictates how intersection total is interpreted during rasterization.
enum FillRule {
FILL_NONZERO,
FILL_ODD, // "even-odd"
FILL_POSITIVE,
FILL_NEGATIVE
};
/// Resolves the number of intersection into a binary fill value based on fill rule.
bool interpretFillRule(int intersections, FillRule fillRule);
/// Represents a horizontal scanline intersecting a shape.
class Scanline {
public:
/// An intersection with the scanline.
struct Intersection {
/// X coordinate.
double x;
/// Normalized Y direction of the oriented edge at the point of intersection.
int direction;
};
static double overlap(const Scanline &a, const Scanline &b, double xFrom, double xTo, FillRule fillRule);
Scanline();
/// Populates the intersection list.
void setIntersections(const std::vector<Intersection> &intersections);
#ifdef MSDFGEN_USE_CPP11
void setIntersections(std::vector<Intersection> &&intersections);
#endif
/// Returns the number of intersections left of x.
int countIntersections(double x) const;
/// Returns the total sign of intersections left of x.
int sumIntersections(double x) const;
/// Decides whether the scanline is filled at x based on fill rule.
bool filled(double x, FillRule fillRule) const;
private:
std::vector<Intersection> intersections;
mutable int lastIndex;
void preprocess();
int moveTo(double x) const;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/MSDFErrorCorrection.h |
#pragma once
#include "Projection.h"
#include "Shape.h"
#include "BitmapRef.hpp"
namespace msdfgen {
/// Performs error correction on a computed MSDF to eliminate interpolation artifacts. This is a low-level class, you may want to use the API in msdf-error-correction.h instead.
class MSDFErrorCorrection {
public:
/// Stencil flags.
enum Flags {
/// Texel marked as potentially causing interpolation errors.
ERROR = 1,
/// Texel marked as protected. Protected texels are only given the error flag if they cause inversion artifacts.
PROTECTED = 2
};
MSDFErrorCorrection();
explicit MSDFErrorCorrection(const BitmapRef<byte, 1> &stencil, const Projection &projection, double range);
/// Sets the minimum ratio between the actual and maximum expected distance delta to be considered an error.
void setMinDeviationRatio(double minDeviationRatio);
/// Sets the minimum ratio between the pre-correction distance error and the post-correction distance error.
void setMinImproveRatio(double minImproveRatio);
/// Flags all texels that are interpolated at corners as protected.
void protectCorners(const Shape &shape);
/// Flags all texels that contribute to edges as protected.
template <int N>
void protectEdges(const BitmapConstRef<float, N> &sdf);
/// Flags all texels as protected.
void protectAll();
/// Flags texels that are expected to cause interpolation artifacts based on analysis of the SDF only.
template <int N>
void findErrors(const BitmapConstRef<float, N> &sdf);
/// Flags texels that are expected to cause interpolation artifacts based on analysis of the SDF and comparison with the exact shape distance.
template <template <typename> class ContourCombiner, int N>
void findErrors(const BitmapConstRef<float, N> &sdf, const Shape &shape);
/// Modifies the MSDF so that all texels with the error flag are converted to single-channel.
template <int N>
void apply(const BitmapRef<float, N> &sdf) const;
/// Returns the stencil in its current state (see Flags).
BitmapConstRef<byte, 1> getStencil() const;
private:
BitmapRef<byte, 1> stencil;
Projection projection;
double invRange;
double minDeviationRatio;
double minImproveRatio;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/BitmapRef.hpp |
#pragma once
#include <cstddef>
namespace msdfgen {
typedef unsigned char byte;
/// Reference to a 2D image bitmap or a buffer acting as one. Pixel storage not owned or managed by the object.
template <typename T, int N = 1>
struct BitmapRef {
T *pixels;
int width, height;
inline BitmapRef() : pixels(NULL), width(0), height(0) { }
inline BitmapRef(T *pixels, int width, int height) : pixels(pixels), width(width), height(height) { }
inline T * operator()(int x, int y) const {
return pixels+N*(width*y+x);
}
};
/// Constant reference to a 2D image bitmap or a buffer acting as one. Pixel storage not owned or managed by the object.
template <typename T, int N = 1>
struct BitmapConstRef {
const T *pixels;
int width, height;
inline BitmapConstRef() : pixels(NULL), width(0), height(0) { }
inline BitmapConstRef(const T *pixels, int width, int height) : pixels(pixels), width(width), height(height) { }
inline BitmapConstRef(const BitmapRef<T, N> &orig) : pixels(orig.pixels), width(orig.width), height(orig.height) { }
inline const T * operator()(int x, int y) const {
return pixels+N*(width*y+x);
}
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/shape-description.h |
#pragma once
#include <cstdio>
#include "Shape.h"
namespace msdfgen {
/// Deserializes a text description of a vector shape into output.
bool readShapeDescription(FILE *input, Shape &output, bool *colorsSpecified = NULL);
bool readShapeDescription(const char *input, Shape &output, bool *colorsSpecified = NULL);
/// Serializes a shape object into a text description.
bool writeShapeDescription(FILE *output, const Shape &shape);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Vector2.h |
#pragma once
namespace msdfgen {
/**
* A 2-dimensional euclidean vector with double precision.
* Implementation based on the Vector2 template from Artery Engine.
* @author Viktor Chlumsky
*/
struct Vector2 {
double x, y;
Vector2(double val = 0);
Vector2(double x, double y);
/// Sets the vector to zero.
void reset();
/// Sets individual elements of the vector.
void set(double x, double y);
/// Returns the vector's length.
double length() const;
/// Returns the angle of the vector in radians (atan2).
double direction() const;
/// Returns the normalized vector - one that has the same direction but unit length.
Vector2 normalize(bool allowZero = false) const;
/// Returns a vector with the same length that is orthogonal to this one.
Vector2 getOrthogonal(bool polarity = true) const;
/// Returns a vector with unit length that is orthogonal to this one.
Vector2 getOrthonormal(bool polarity = true, bool allowZero = false) const;
/// Returns a vector projected along this one.
Vector2 project(const Vector2 &vector, bool positive = false) const;
operator const void *() const;
bool operator!() const;
bool operator==(const Vector2 &other) const;
bool operator!=(const Vector2 &other) const;
Vector2 operator+() const;
Vector2 operator-() const;
Vector2 operator+(const Vector2 &other) const;
Vector2 operator-(const Vector2 &other) const;
Vector2 operator*(const Vector2 &other) const;
Vector2 operator/(const Vector2 &other) const;
Vector2 operator*(double value) const;
Vector2 operator/(double value) const;
Vector2 & operator+=(const Vector2 &other);
Vector2 & operator-=(const Vector2 &other);
Vector2 & operator*=(const Vector2 &other);
Vector2 & operator/=(const Vector2 &other);
Vector2 & operator*=(double value);
Vector2 & operator/=(double value);
/// Dot product of two vectors.
friend double dotProduct(const Vector2 &a, const Vector2 &b);
/// A special version of the cross product for 2D vectors (returns scalar value).
friend double crossProduct(const Vector2 &a, const Vector2 &b);
friend Vector2 operator*(double value, const Vector2 &vector);
friend Vector2 operator/(double value, const Vector2 &vector);
};
/// A vector may also represent a point, which shall be differentiated semantically using the alias Point2.
typedef Vector2 Point2;
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/bitmap-interpolation.hpp |
#pragma once
#include "arithmetics.hpp"
#include "Vector2.h"
#include "BitmapRef.hpp"
namespace msdfgen {
template <typename T, int N>
static void interpolate(T *output, const BitmapConstRef<T, N> &bitmap, Point2 pos) {
pos -= .5;
int l = (int) floor(pos.x);
int b = (int) floor(pos.y);
int r = l+1;
int t = b+1;
double lr = pos.x-l;
double bt = pos.y-b;
l = clamp(l, bitmap.width-1), r = clamp(r, bitmap.width-1);
b = clamp(b, bitmap.height-1), t = clamp(t, bitmap.height-1);
for (int i = 0; i < N; ++i)
output[i] = mix(mix(bitmap(l, b)[i], bitmap(r, b)[i], lr), mix(bitmap(l, t)[i], bitmap(r, t)[i], lr), bt);
}
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/EdgeColor.h |
#pragma once
namespace msdfgen {
/// Edge color specifies which color channels an edge belongs to.
enum EdgeColor {
BLACK = 0,
RED = 1,
GREEN = 2,
YELLOW = 3,
BLUE = 4,
MAGENTA = 5,
CYAN = 6,
WHITE = 7
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/Shape.h |
#pragma once
#include <vector>
#include "Contour.h"
#include "Scanline.h"
namespace msdfgen {
// Threshold of the dot product of adjacent edge directions to be considered convergent.
#define MSDFGEN_CORNER_DOT_EPSILON .000001
// The proportional amount by which a curve's control point will be adjusted to eliminate convergent corners.
#define MSDFGEN_DECONVERGENCE_FACTOR .000001
/// Vector shape representation.
class Shape {
public:
struct Bounds {
double l, b, r, t;
};
/// The list of contours the shape consists of.
std::vector<Contour> contours;
/// Specifies whether the shape uses bottom-to-top (false) or top-to-bottom (true) Y coordinates.
bool inverseYAxis;
Shape();
/// Adds a contour.
void addContour(const Contour &contour);
#ifdef MSDFGEN_USE_CPP11
void addContour(Contour &&contour);
#endif
/// Adds a blank contour and returns its reference.
Contour & addContour();
/// Normalizes the shape geometry for distance field generation.
void normalize();
/// Performs basic checks to determine if the object represents a valid shape.
bool validate() const;
/// Adjusts the bounding box to fit the shape.
void bound(double &l, double &b, double &r, double &t) const;
/// Adjusts the bounding box to fit the shape border's mitered corners.
void boundMiters(double &l, double &b, double &r, double &t, double border, double miterLimit, int polarity) const;
/// Computes the minimum bounding box that fits the shape, optionally with a (mitered) border.
Bounds getBounds(double border = 0, double miterLimit = 0, int polarity = 0) const;
/// Outputs the scanline that intersects the shape at y.
void scanline(Scanline &line, double y) const;
/// Returns the total number of edge segments
int edgeCount() const;
/// Assumes its contours are unoriented (even-odd fill rule). Attempts to orient them to conform to the non-zero winding rule.
void orientContours();
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/save-bmp.h |
#pragma once
#include "BitmapRef.hpp"
namespace msdfgen {
/// Saves the bitmap as a BMP file.
bool saveBmp(const BitmapConstRef<byte, 1> &bitmap, const char *filename);
bool saveBmp(const BitmapConstRef<byte, 3> &bitmap, const char *filename);
bool saveBmp(const BitmapConstRef<byte, 4> &bitmap, const char *filename);
bool saveBmp(const BitmapConstRef<float, 1> &bitmap, const char *filename);
bool saveBmp(const BitmapConstRef<float, 3> &bitmap, const char *filename);
bool saveBmp(const BitmapConstRef<float, 4> &bitmap, const char *filename);
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/ShapeDistanceFinder.hpp |
#include "ShapeDistanceFinder.h"
namespace msdfgen {
template <class ContourCombiner>
ShapeDistanceFinder<ContourCombiner>::ShapeDistanceFinder(const Shape &shape) : shape(shape), contourCombiner(shape), shapeEdgeCache(shape.edgeCount()) { }
template <class ContourCombiner>
typename ShapeDistanceFinder<ContourCombiner>::DistanceType ShapeDistanceFinder<ContourCombiner>::distance(const Point2 &origin) {
contourCombiner.reset(origin);
typename ContourCombiner::EdgeSelectorType::EdgeCache *edgeCache = &shapeEdgeCache[0];
for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
if (!contour->edges.empty()) {
typename ContourCombiner::EdgeSelectorType &edgeSelector = contourCombiner.edgeSelector(int(contour-shape.contours.begin()));
const EdgeSegment *prevEdge = contour->edges.size() >= 2 ? *(contour->edges.end()-2) : *contour->edges.begin();
const EdgeSegment *curEdge = contour->edges.back();
for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
const EdgeSegment *nextEdge = *edge;
edgeSelector.addEdge(*edgeCache++, prevEdge, curEdge, nextEdge);
prevEdge = curEdge;
curEdge = nextEdge;
}
}
}
return contourCombiner.distance();
}
template <class ContourCombiner>
typename ShapeDistanceFinder<ContourCombiner>::DistanceType ShapeDistanceFinder<ContourCombiner>::oneShotDistance(const Shape &shape, const Point2 &origin) {
ContourCombiner contourCombiner(shape);
contourCombiner.reset(origin);
for (std::vector<Contour>::const_iterator contour = shape.contours.begin(); contour != shape.contours.end(); ++contour) {
if (!contour->edges.empty()) {
typename ContourCombiner::EdgeSelectorType &edgeSelector = contourCombiner.edgeSelector(int(contour-shape.contours.begin()));
const EdgeSegment *prevEdge = contour->edges.size() >= 2 ? *(contour->edges.end()-2) : *contour->edges.begin();
const EdgeSegment *curEdge = contour->edges.back();
for (std::vector<EdgeHolder>::const_iterator edge = contour->edges.begin(); edge != contour->edges.end(); ++edge) {
const EdgeSegment *nextEdge = *edge;
typename ContourCombiner::EdgeSelectorType::EdgeCache dummy;
edgeSelector.addEdge(dummy, prevEdge, curEdge, nextEdge);
prevEdge = curEdge;
curEdge = nextEdge;
}
}
}
return contourCombiner.distance();
}
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/ShapeDistanceFinder.h |
#pragma once
#include <vector>
#include "Vector2.h"
#include "edge-selectors.h"
#include "contour-combiners.h"
namespace msdfgen {
/// Finds the distance between a point and a Shape. ContourCombiner dictates the distance metric and its data type.
template <class ContourCombiner>
class ShapeDistanceFinder {
public:
typedef typename ContourCombiner::DistanceType DistanceType;
// Passed shape object must persist until the distance finder is destroyed!
explicit ShapeDistanceFinder(const Shape &shape);
/// Finds the distance from origin. Not thread-safe! Is fastest when subsequent queries are close together.
DistanceType distance(const Point2 &origin);
/// Finds the distance between shape and origin. Does not allocate result cache used to optimize performance of multiple queries.
static DistanceType oneShotDistance(const Shape &shape, const Point2 &origin);
private:
const Shape &shape;
ContourCombiner contourCombiner;
std::vector<typename ContourCombiner::EdgeSelectorType::EdgeCache> shapeEdgeCache;
};
typedef ShapeDistanceFinder<SimpleContourCombiner<TrueDistanceSelector> > SimpleTrueShapeDistanceFinder;
}
#include "ShapeDistanceFinder.hpp"
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/core/contour-combiners.h |
#pragma once
#include "Shape.h"
#include "edge-selectors.h"
namespace msdfgen {
/// Simply selects the nearest contour.
template <class EdgeSelector>
class SimpleContourCombiner {
public:
typedef EdgeSelector EdgeSelectorType;
typedef typename EdgeSelector::DistanceType DistanceType;
explicit SimpleContourCombiner(const Shape &shape);
void reset(const Point2 &p);
EdgeSelector & edgeSelector(int i);
DistanceType distance() const;
private:
EdgeSelector shapeEdgeSelector;
};
/// Selects the nearest contour that actually forms a border between filled and unfilled area.
template <class EdgeSelector>
class OverlappingContourCombiner {
public:
typedef EdgeSelector EdgeSelectorType;
typedef typename EdgeSelector::DistanceType DistanceType;
explicit OverlappingContourCombiner(const Shape &shape);
void reset(const Point2 &p);
EdgeSelector & edgeSelector(int i);
DistanceType distance() const;
private:
Point2 p;
std::vector<int> windings;
std::vector<EdgeSelector> edgeSelectors;
};
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/ext/save-png.h |
#pragma once
#include "../core/BitmapRef.hpp"
#ifndef MSDFGEN_DISABLE_PNG
namespace msdfgen {
/// Saves the bitmap as a PNG file.
bool savePng(const BitmapConstRef<byte, 1> &bitmap, const char *filename);
bool savePng(const BitmapConstRef<byte, 3> &bitmap, const char *filename);
bool savePng(const BitmapConstRef<byte, 4> &bitmap, const char *filename);
bool savePng(const BitmapConstRef<float, 1> &bitmap, const char *filename);
bool savePng(const BitmapConstRef<float, 3> &bitmap, const char *filename);
bool savePng(const BitmapConstRef<float, 4> &bitmap, const char *filename);
}
#endif
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/ext/import-font.h |
#pragma once
#include <cstddef>
#include "../core/Shape.h"
namespace msdfgen {
typedef unsigned char byte;
typedef unsigned unicode_t;
class FreetypeHandle;
class FontHandle;
class GlyphIndex {
public:
explicit GlyphIndex(unsigned index = 0);
unsigned getIndex() const;
private:
unsigned index;
};
/// Global metrics of a typeface (in font units).
struct FontMetrics {
/// The size of one EM.
double emSize;
/// The vertical position of the ascender and descender relative to the baseline.
double ascenderY, descenderY;
/// The vertical difference between consecutive baselines.
double lineHeight;
/// The vertical position and thickness of the underline.
double underlineY, underlineThickness;
};
/// A structure to model a given axis of a variable font.
struct FontVariationAxis {
/// The name of the variation axis.
const char *name;
/// The axis's minimum coordinate value.
double minValue;
/// The axis's maximum coordinate value.
double maxValue;
/// The axis's default coordinate value. FreeType computes meaningful default values for Adobe MM fonts.
double defaultValue;
};
/// Initializes the FreeType library.
FreetypeHandle * initializeFreetype();
/// Deinitializes the FreeType library.
void deinitializeFreetype(FreetypeHandle *library);
#ifdef FT_LOAD_DEFAULT // FreeType included
/// Creates a FontHandle from FT_Face that was loaded by the user. destroyFont must still be called but will not affect the FT_Face.
FontHandle * adoptFreetypeFont(FT_Face ftFace);
/// Converts the geometry of FreeType's FT_Outline to a Shape object.
FT_Error readFreetypeOutline(Shape &output, FT_Outline *outline);
#endif
/// Loads a font file and returns its handle.
FontHandle * loadFont(FreetypeHandle *library, const char *filename);
/// Loads a font from binary data and returns its handle.
FontHandle * loadFontData(FreetypeHandle *library, const byte *data, int length);
/// Unloads a font file.
void destroyFont(FontHandle *font);
/// Outputs the metrics of a font file.
bool getFontMetrics(FontMetrics &metrics, FontHandle *font);
/// Outputs the width of the space and tab characters.
bool getFontWhitespaceWidth(double &spaceAdvance, double &tabAdvance, FontHandle *font);
/// Outputs the glyph index corresponding to the specified Unicode character.
bool getGlyphIndex(GlyphIndex &glyphIndex, FontHandle *font, unicode_t unicode);
/// Loads the geometry of a glyph from a font file.
bool loadGlyph(Shape &output, FontHandle *font, GlyphIndex glyphIndex, double *advance = NULL);
bool loadGlyph(Shape &output, FontHandle *font, unicode_t unicode, double *advance = NULL);
/// Outputs the kerning distance adjustment between two specific glyphs.
bool getKerning(double &output, FontHandle *font, GlyphIndex glyphIndex1, GlyphIndex glyphIndex2);
bool getKerning(double &output, FontHandle *font, unicode_t unicode1, unicode_t unicode2);
#ifndef MSDFGEN_DISABLE_VARIABLE_FONTS
/// Sets a single variation axis of a variable font.
bool setFontVariationAxis(FreetypeHandle *library, FontHandle *font, const char *name, double coordinate);
/// Lists names and ranges of variation axes of a variable font.
bool listFontVariationAxes(std::vector<FontVariationAxis> &axes, FreetypeHandle *library, FontHandle *font);
#endif
}
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/ext/resolve-shape-geometry.h |
#pragma once
#include "../core/Shape.h"
#ifdef MSDFGEN_USE_SKIA
namespace msdfgen {
/// Resolves any intersections within the shape by subdividing its contours using the Skia library and makes sure its contours have a consistent winding.
bool resolveShapeGeometry(Shape &shape);
}
#endif
|
0 | repos/c2z/use_cases/msdfgen/include | repos/c2z/use_cases/msdfgen/include/ext/import-svg.h |
#pragma once
#include <cstddef>
#include "../core/Shape.h"
#ifndef MSDFGEN_DISABLE_SVG
#ifndef MSDFGEN_EXT_PUBLIC
#define MSDFGEN_EXT_PUBLIC // for DLL import/export
#endif
namespace msdfgen {
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_FAILURE;
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_SUCCESS_FLAG;
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_PARTIAL_FAILURE_FLAG;
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_INCOMPLETE_FLAG;
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_UNSUPPORTED_FEATURE_FLAG;
extern MSDFGEN_EXT_PUBLIC const int SVG_IMPORT_TRANSFORMATION_IGNORED_FLAG;
/// Builds a shape from an SVG path string
bool buildShapeFromSvgPath(Shape &shape, const char *pathDef, double endpointSnapRange = 0);
/// Reads a single <path> element found in the specified SVG file and converts it to output Shape
bool loadSvgShape(Shape &output, const char *filename, int pathIndex = 0, Vector2 *dimensions = NULL);
/// New version - if Skia is available, reads the entire geometry of the SVG file into the output Shape, otherwise may only read one path, returns SVG import flags
int loadSvgShape(Shape &output, Shape::Bounds &viewBox, const char *filename);
}
#endif
|
0 | repos | repos/libcamera/meson_options.txt | # SPDX-License-Identifier: CC0-1.0
option('android',
type : 'feature',
value : 'disabled',
description : 'Compile libcamera with Android Camera3 HAL interface')
option('android_platform',
type : 'combo',
choices : ['cros', 'generic'],
value : 'generic',
description : 'Select the Android platform to compile for')
option('cam',
type : 'feature',
value : 'auto',
description : 'Compile the cam test application')
option('documentation',
type : 'feature',
description : 'Generate the project documentation')
option('doc_werror',
type : 'boolean',
value : false,
description : 'Treat documentation warnings as errors')
option('gstreamer',
type : 'feature',
value : 'auto',
description : 'Compile libcamera GStreamer plugin')
option('ipas',
type : 'array',
choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'vimc'],
description : 'Select which IPA modules to build')
option('lc-compliance',
type : 'feature',
value : 'auto',
description : 'Compile the lc-compliance test application')
option('pipelines',
type : 'array',
value : ['auto'],
choices : [
'all',
'auto',
'imx8-isi',
'ipu3',
'mali-c55',
'rkisp1',
'rpi/vc4',
'simple',
'uvcvideo',
'vimc'
],
description : 'Select which pipeline handlers to build. If this is set to "auto", all the pipelines applicable to the target architecture will be built. If this is set to "all", all the pipelines will be built. If both are selected then "all" will take precedence.')
option('pycamera',
type : 'feature',
value : 'auto',
description : 'Enable libcamera Python bindings (experimental)')
option('qcam',
type : 'feature',
value : 'auto',
description : 'Compile the qcam test application')
option('test',
type : 'boolean',
value : false,
description : 'Compile and include the tests')
option('tracing',
type : 'feature',
value : 'auto',
description : 'Enable tracing (based on lttng)')
option('udev',
type : 'feature',
value : 'auto',
description : 'Enable udev support for hotplug')
option('v4l2',
type : 'boolean',
value : false,
description : 'Compile the V4L2 compatibility layer')
|
0 | repos/libcamera/include | repos/libcamera/include/linux/v4l2-common.h | /* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* include/linux/v4l2-common.h
*
* Common V4L2 and V4L2 subdev definitions.
*
* Users are advised to #include this file either through videodev2.h
* (V4L2) or through v4l2-subdev.h (V4L2 subdev) rather than to refer
* to this file directly.
*
* Copyright (C) 2012 Nokia Corporation
* Contact: Sakari Ailus <[email protected]>
*/
#ifndef __V4L2_COMMON__
#define __V4L2_COMMON__
#include <linux/types.h>
/*
*
* Selection interface definitions
*
*/
/* Current cropping area */
#define V4L2_SEL_TGT_CROP 0x0000
/* Default cropping area */
#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
/* Cropping bounds */
#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
/* Native frame size */
#define V4L2_SEL_TGT_NATIVE_SIZE 0x0003
/* Current composing area */
#define V4L2_SEL_TGT_COMPOSE 0x0100
/* Default composing area */
#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101
/* Composing bounds */
#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102
/* Current composing area plus all padding pixels */
#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
/* Selection flags */
#define V4L2_SEL_FLAG_GE (1 << 0)
#define V4L2_SEL_FLAG_LE (1 << 1)
#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
struct v4l2_edid {
__u32 pad;
__u32 start_block;
__u32 blocks;
__u32 reserved[5];
__u8 *edid;
};
/* Backward compatibility target definitions --- to be removed. */
#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
/* Backward compatibility flag definitions --- to be removed. */
#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
#endif /* __V4L2_COMMON__ */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/dma-buf.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Framework for buffer objects that can be shared across devices/subsystems.
*
* Copyright(C) 2015 Intel Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _DMA_BUF_UAPI_H_
#define _DMA_BUF_UAPI_H_
#include <linux/types.h>
/**
* struct dma_buf_sync - Synchronize with CPU access.
*
* When a DMA buffer is accessed from the CPU via mmap, it is not always
* possible to guarantee coherency between the CPU-visible map and underlying
* memory. To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
* any CPU access to give the kernel the chance to shuffle memory around if
* needed.
*
* Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
* with DMA_BUF_SYNC_START and the appropriate read/write flags. Once the
* access is complete, the client should call DMA_BUF_IOCTL_SYNC with
* DMA_BUF_SYNC_END and the same read/write flags.
*
* The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
* coherency. It does not prevent other processes or devices from
* accessing the memory at the same time. If synchronization with a GPU or
* other device driver is required, it is the client's responsibility to
* wait for buffer to be ready for reading or writing before calling this
* ioctl with DMA_BUF_SYNC_START. Likewise, the client must ensure that
* follow-up work is not submitted to GPU or other device driver until
* after this ioctl has been called with DMA_BUF_SYNC_END?
*
* If the driver or API with which the client is interacting uses implicit
* synchronization, waiting for prior work to complete can be done via
* poll() on the DMA buffer file descriptor. If the driver or API requires
* explicit synchronization, the client may have to wait on a sync_file or
* other synchronization primitive outside the scope of the DMA buffer API.
*/
struct dma_buf_sync {
/**
* @flags: Set of access flags
*
* DMA_BUF_SYNC_START:
* Indicates the start of a map access session.
*
* DMA_BUF_SYNC_END:
* Indicates the end of a map access session.
*
* DMA_BUF_SYNC_READ:
* Indicates that the mapped DMA buffer will be read by the
* client via the CPU map.
*
* DMA_BUF_SYNC_WRITE:
* Indicates that the mapped DMA buffer will be written by the
* client via the CPU map.
*
* DMA_BUF_SYNC_RW:
* An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
*/
__u64 flags;
};
#define DMA_BUF_SYNC_READ (1 << 0)
#define DMA_BUF_SYNC_WRITE (2 << 0)
#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
#define DMA_BUF_SYNC_START (0 << 2)
#define DMA_BUF_SYNC_END (1 << 2)
#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
#define DMA_BUF_NAME_LEN 32
/**
* struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
*
* Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
* current set of fences on a dma-buf file descriptor as a sync_file. CPU
* waits via poll() or other driver-specific mechanisms typically wait on
* whatever fences are on the dma-buf at the time the wait begins. This
* is similar except that it takes a snapshot of the current fences on the
* dma-buf for waiting later instead of waiting immediately. This is
* useful for modern graphics APIs such as Vulkan which assume an explicit
* synchronization model but still need to inter-operate with dma-buf.
*
* The intended usage pattern is the following:
*
* 1. Export a sync_file with flags corresponding to the expected GPU usage
* via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
*
* 2. Submit rendering work which uses the dma-buf. The work should wait on
* the exported sync file before rendering and produce another sync_file
* when complete.
*
* 3. Import the rendering-complete sync_file into the dma-buf with flags
* corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
*
* Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
* the above is not a single atomic operation. If userspace wants to ensure
* ordering via these fences, it is the respnosibility of userspace to use
* locks or other mechanisms to ensure that no other context adds fences or
* submits work between steps 1 and 3 above.
*/
struct dma_buf_export_sync_file {
/**
* @flags: Read/write flags
*
* Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
*
* If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
* the returned sync file waits on any writers of the dma-buf to
* complete. Waiting on the returned sync file is equivalent to
* poll() with POLLIN.
*
* If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
* any users of the dma-buf (read or write) to complete. Waiting
* on the returned sync file is equivalent to poll() with POLLOUT.
* If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
* is equivalent to just DMA_BUF_SYNC_WRITE.
*/
__u32 flags;
/** @fd: Returned sync file descriptor */
__s32 fd;
};
/**
* struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
*
* Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
* sync_file into a dma-buf for the purposes of implicit synchronization
* with other dma-buf consumers. This allows clients using explicitly
* synchronized APIs such as Vulkan to inter-op with dma-buf consumers
* which expect implicit synchronization such as OpenGL or most media
* drivers/video.
*/
struct dma_buf_import_sync_file {
/**
* @flags: Read/write flags
*
* Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
*
* If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
* this inserts the sync_file as a read-only fence. Any subsequent
* implicitly synchronized writes to this dma-buf will wait on this
* fence but reads will not.
*
* If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
* write fence. All subsequent implicitly synchronized access to
* this dma-buf will wait on this fence.
*/
__u32 flags;
/** @fd: Sync file descriptor */
__s32 fd;
};
#define DMA_BUF_BASE 'b'
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
/* 32/64bitness of this uapi was botched in android, there's no difference
* between them in actual uapi, they're just different numbers.
*/
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32)
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64)
#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)
#endif
|
0 | repos/libcamera/include | repos/libcamera/include/linux/media-bus-format.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Media Bus API header
*
* Copyright (C) 2009, Guennadi Liakhovetski <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_MEDIA_BUS_FORMAT_H
#define __LINUX_MEDIA_BUS_FORMAT_H
/*
* These bus formats uniquely identify data formats on the data bus. Format 0
* is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where
* the data format is fixed. Additionally, "2X8" means that one pixel is
* transferred in two 8-bit samples, "BE" or "LE" specify in which order those
* samples are transferred over the bus: "LE" means that the least significant
* bits are transferred first, "BE" means that the most significant bits are
* transferred first, and "PADHI" and "PADLO" define which bits - low or high,
* in the incomplete high byte, are filled with padding bits.
*
* The bus formats are grouped by type, bus_width, bits per component, samples
* per pixel and order of subsamples. Numerical values are sorted using generic
* numerical sort order (8 thus comes before 10).
*
* As their value can't change when a new bus format is inserted in the
* enumeration, the bus formats are explicitly given a numerical value. The next
* free values for each category are listed below, update them when inserting
* new pixel codes.
*/
#define MEDIA_BUS_FMT_FIXED 0x0001
/* RGB - next is 0x1026 */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003
#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004
#define MEDIA_BUS_FMT_RGB565_1X16 0x1017
#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005
#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006
#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007
#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008
#define MEDIA_BUS_FMT_RGB666_1X18 0x1009
#define MEDIA_BUS_FMT_RGB666_2X9_BE 0x1025
#define MEDIA_BUS_FMT_BGR666_1X18 0x1023
#define MEDIA_BUS_FMT_RBG888_1X24 0x100e
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
#define MEDIA_BUS_FMT_BGR666_1X24_CPADHI 0x1024
#define MEDIA_BUS_FMT_RGB565_1X24_CPADHI 0x1022
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c
#define MEDIA_BUS_FMT_RGB888_3X8 0x101c
#define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d
#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e
#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020
#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
/* YUV (including grey) - next is 0x202f */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003
#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004
#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005
#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006
#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007
#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
#define MEDIA_BUS_FMT_Y10_1X10 0x200a
#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c
#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c
#define MEDIA_BUS_FMT_Y12_1X12 0x2013
#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c
#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
#define MEDIA_BUS_FMT_Y14_1X14 0x202d
#define MEDIA_BUS_FMT_Y16_1X16 0x202e
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012
#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014
#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a
#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b
#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
#define MEDIA_BUS_FMT_UYYVYY8_0_5X24 0x2026
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028
#define MEDIA_BUS_FMT_YUV12_1X36 0x2029
#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
/* Bayer - next is 0x3021 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013
#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002
#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014
#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015
#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016
#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017
#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018
#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b
#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c
#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009
#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005
#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006
#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007
#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e
#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a
#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f
#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008
#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010
#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011
#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012
#define MEDIA_BUS_FMT_SBGGR14_1X14 0x3019
#define MEDIA_BUS_FMT_SGBRG14_1X14 0x301a
#define MEDIA_BUS_FMT_SGRBG14_1X14 0x301b
#define MEDIA_BUS_FMT_SRGGB14_1X14 0x301c
#define MEDIA_BUS_FMT_SBGGR16_1X16 0x301d
#define MEDIA_BUS_FMT_SGBRG16_1X16 0x301e
#define MEDIA_BUS_FMT_SGRBG16_1X16 0x301f
#define MEDIA_BUS_FMT_SRGGB16_1X16 0x3020
/* JPEG compressed formats - next is 0x4002 */
#define MEDIA_BUS_FMT_JPEG_1X8 0x4001
/* Vendor specific formats - next is 0x5002 */
/* S5C73M3 sensor specific interleaved UYVY and JPEG */
#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001
/* HSV - next is 0x6002 */
#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001
/*
* This format should be used when the same driver handles
* both sides of the link and the bus format is a fixed
* metadata format that is not configurable from userspace.
* Width and height will be set to 0 for this format.
*/
#define MEDIA_BUS_FMT_METADATA_FIXED 0x7001
/* Generic line based metadata formats for serial buses. Next is 0x8008. */
#define MEDIA_BUS_FMT_META_8 0x8001
#define MEDIA_BUS_FMT_META_10 0x8002
#define MEDIA_BUS_FMT_META_12 0x8003
#define MEDIA_BUS_FMT_META_14 0x8004
#define MEDIA_BUS_FMT_META_16 0x8005
#define MEDIA_BUS_FMT_META_20 0x8006
#define MEDIA_BUS_FMT_META_24 0x8007
#endif /* __LINUX_MEDIA_BUS_FORMAT_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/rkisp1-config.h | /* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR MIT) */
/*
* Rockchip ISP1 userspace API
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#ifndef _RKISP1_CONFIG_H
#define _RKISP1_CONFIG_H
#include <linux/types.h>
/* Defect Pixel Cluster Detection */
#define RKISP1_CIF_ISP_MODULE_DPCC (1U << 0)
/* Black Level Subtraction */
#define RKISP1_CIF_ISP_MODULE_BLS (1U << 1)
/* Sensor De-gamma */
#define RKISP1_CIF_ISP_MODULE_SDG (1U << 2)
/* Histogram statistics configuration */
#define RKISP1_CIF_ISP_MODULE_HST (1U << 3)
/* Lens Shade Control */
#define RKISP1_CIF_ISP_MODULE_LSC (1U << 4)
/* Auto White Balance Gain */
#define RKISP1_CIF_ISP_MODULE_AWB_GAIN (1U << 5)
/* Filter */
#define RKISP1_CIF_ISP_MODULE_FLT (1U << 6)
/* Bayer Demosaic */
#define RKISP1_CIF_ISP_MODULE_BDM (1U << 7)
/* Cross Talk */
#define RKISP1_CIF_ISP_MODULE_CTK (1U << 8)
/* Gamma Out Curve */
#define RKISP1_CIF_ISP_MODULE_GOC (1U << 9)
/* Color Processing */
#define RKISP1_CIF_ISP_MODULE_CPROC (1U << 10)
/* Auto Focus Control statistics configuration */
#define RKISP1_CIF_ISP_MODULE_AFC (1U << 11)
/* Auto White Balancing statistics configuration */
#define RKISP1_CIF_ISP_MODULE_AWB (1U << 12)
/* Image Effect */
#define RKISP1_CIF_ISP_MODULE_IE (1U << 13)
/* Auto Exposure Control statistics configuration */
#define RKISP1_CIF_ISP_MODULE_AEC (1U << 14)
/* Wide Dynamic Range */
#define RKISP1_CIF_ISP_MODULE_WDR (1U << 15)
/* Denoise Pre-Filter */
#define RKISP1_CIF_ISP_MODULE_DPF (1U << 16)
/* Denoise Pre-Filter Strength */
#define RKISP1_CIF_ISP_MODULE_DPF_STRENGTH (1U << 17)
#define RKISP1_CIF_ISP_CTK_COEFF_MAX 0x100
#define RKISP1_CIF_ISP_CTK_OFFSET_MAX 0x800
#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10 25
#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12 81
#define RKISP1_CIF_ISP_AE_MEAN_MAX RKISP1_CIF_ISP_AE_MEAN_MAX_V12
#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 16
#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 32
#define RKISP1_CIF_ISP_HIST_BIN_N_MAX RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
#define RKISP1_CIF_ISP_AFM_MAX_WINDOWS 3
#define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE 17
#define RKISP1_CIF_ISP_BDM_MAX_TH 0xff
/*
* Black level compensation
*/
/* maximum value for horizontal start address */
#define RKISP1_CIF_ISP_BLS_START_H_MAX 0x00000fff
/* maximum value for horizontal stop address */
#define RKISP1_CIF_ISP_BLS_STOP_H_MAX 0x00000fff
/* maximum value for vertical start address */
#define RKISP1_CIF_ISP_BLS_START_V_MAX 0x00000fff
/* maximum value for vertical stop address */
#define RKISP1_CIF_ISP_BLS_STOP_V_MAX 0x00000fff
/* maximum is 2^18 = 262144*/
#define RKISP1_CIF_ISP_BLS_SAMPLES_MAX 0x00000012
/* maximum value for fixed black level */
#define RKISP1_CIF_ISP_BLS_FIX_SUB_MAX 0x00000fff
/* minimum value for fixed black level */
#define RKISP1_CIF_ISP_BLS_FIX_SUB_MIN 0xfffff000
/* 13 bit range (signed)*/
#define RKISP1_CIF_ISP_BLS_FIX_MASK 0x00001fff
/*
* Automatic white balance measurements
*/
#define RKISP1_CIF_ISP_AWB_MAX_GRID 1
#define RKISP1_CIF_ISP_AWB_MAX_FRAMES 7
/*
* Gamma out
*/
/* Maximum number of color samples supported */
#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 17
#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 34
#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
/*
* Lens shade correction
*/
#define RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE 8
/*
* The following matches the tuning process,
* not the max capabilities of the chip.
*/
#define RKISP1_CIF_ISP_LSC_SAMPLES_MAX 17
/*
* Histogram calculation
*/
#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
/*
* Defect Pixel Cluster Correction
*/
#define RKISP1_CIF_ISP_DPCC_METHODS_MAX 3
#define RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE (1U << 2)
#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_G_CENTER (1U << 0)
#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_INCL_RB_CENTER (1U << 1)
#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_G_3X3 (1U << 2)
#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_STAGE1_RB_3X3 (1U << 3)
/* 0-2 for sets 1-3 */
#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_SET(n) ((n) << 0)
#define RKISP1_CIF_ISP_DPCC_SET_USE_STAGE1_USE_FIX_SET (1U << 3)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_GREEN_ENABLE (1U << 0)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_GREEN_ENABLE (1U << 1)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_GREEN_ENABLE (1U << 2)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_GREEN_ENABLE (1U << 3)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_GREEN_ENABLE (1U << 4)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_PG_RED_BLUE_ENABLE (1U << 8)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_LC_RED_BLUE_ENABLE (1U << 9)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RO_RED_BLUE_ENABLE (1U << 10)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RND_RED_BLUE_ENABLE (1U << 11)
#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RG_RED_BLUE_ENABLE (1U << 12)
#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_G(v) ((v) << 0)
#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RB(v) ((v) << 8)
#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_G(v) ((v) << 0)
#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RB(v) ((v) << 8)
#define RKISP1_CIF_ISP_DPCC_PG_FAC_G(v) ((v) << 0)
#define RKISP1_CIF_ISP_DPCC_PG_FAC_RB(v) ((v) << 8)
#define RKISP1_CIF_ISP_DPCC_RND_THRESH_G(v) ((v) << 0)
#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RB(v) ((v) << 8)
#define RKISP1_CIF_ISP_DPCC_RG_FAC_G(v) ((v) << 0)
#define RKISP1_CIF_ISP_DPCC_RG_FAC_RB(v) ((v) << 8)
#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_G(n, v) ((v) << ((n) * 4))
#define RKISP1_CIF_ISP_DPCC_RO_LIMITS_n_RB(n, v) ((v) << ((n) * 4 + 2))
#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_G(n, v) ((v) << ((n) * 4))
#define RKISP1_CIF_ISP_DPCC_RND_OFFS_n_RB(n, v) ((v) << ((n) * 4 + 2))
/*
* Denoising pre filter
*/
#define RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS 17
#define RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS 6
/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
#define RKISP1_CIF_ISP_STAT_AUTOEXP (1U << 1)
#define RKISP1_CIF_ISP_STAT_AFM (1U << 2)
#define RKISP1_CIF_ISP_STAT_HIST (1U << 3)
/**
* enum rkisp1_cif_isp_version - ISP variants
*
* @RKISP1_V10: Used at least in RK3288 and RK3399.
* @RKISP1_V11: Declared in the original vendor code, but not used. Same number
* of entries in grids and histogram as v10.
* @RKISP1_V12: Used at least in RK3326 and PX30.
* @RKISP1_V13: Used at least in RK1808. Same number of entries in grids and
* histogram as v12.
* @RKISP1_V_IMX8MP: Used in at least i.MX8MP. Same number of entries in grids
* and histogram as v10.
*/
enum rkisp1_cif_isp_version {
RKISP1_V10 = 10,
RKISP1_V11,
RKISP1_V12,
RKISP1_V13,
RKISP1_V_IMX8MP,
};
enum rkisp1_cif_isp_histogram_mode {
RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
RKISP1_CIF_ISP_HISTOGRAM_MODE_R_HISTOGRAM,
RKISP1_CIF_ISP_HISTOGRAM_MODE_G_HISTOGRAM,
RKISP1_CIF_ISP_HISTOGRAM_MODE_B_HISTOGRAM,
RKISP1_CIF_ISP_HISTOGRAM_MODE_Y_HISTOGRAM
};
enum rkisp1_cif_isp_awb_mode_type {
RKISP1_CIF_ISP_AWB_MODE_MANUAL,
RKISP1_CIF_ISP_AWB_MODE_RGB,
RKISP1_CIF_ISP_AWB_MODE_YCBCR
};
enum rkisp1_cif_isp_flt_mode {
RKISP1_CIF_ISP_FLT_STATIC_MODE,
RKISP1_CIF_ISP_FLT_DYNAMIC_MODE
};
/**
* enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes
* @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0: continuous measurement
* @RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1: stop measuring after a complete frame
*/
enum rkisp1_cif_isp_exp_ctrl_autostop {
RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0 = 0,
RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_1 = 1,
};
/**
* enum rkisp1_cif_isp_exp_meas_mode - Exposure measure mode
* @RKISP1_CIF_ISP_EXP_MEASURING_MODE_0: Y = 16 + 0.25R + 0.5G + 0.1094B
* @RKISP1_CIF_ISP_EXP_MEASURING_MODE_1: Y = (R + G + B) x (85/256)
*/
enum rkisp1_cif_isp_exp_meas_mode {
RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
RKISP1_CIF_ISP_EXP_MEASURING_MODE_1,
};
/*---------- PART1: Input Parameters ------------*/
/**
* struct rkisp1_cif_isp_window - measurement window.
*
* Measurements are calculated per window inside the frame.
* This struct represents a window for a measurement.
*
* @h_offs: the horizontal offset of the window from the left of the frame in pixels.
* @v_offs: the vertical offset of the window from the top of the frame in pixels.
* @h_size: the horizontal size of the window in pixels
* @v_size: the vertical size of the window in pixels.
*/
struct rkisp1_cif_isp_window {
__u16 h_offs;
__u16 v_offs;
__u16 h_size;
__u16 v_size;
};
/**
* struct rkisp1_cif_isp_bls_fixed_val - BLS fixed subtraction values
*
* The values will be subtracted from the sensor
* values. Therefore a negative value means addition instead of subtraction!
*
* @r: Fixed (signed!) subtraction value for Bayer pattern R
* @gr: Fixed (signed!) subtraction value for Bayer pattern Gr
* @gb: Fixed (signed!) subtraction value for Bayer pattern Gb
* @b: Fixed (signed!) subtraction value for Bayer pattern B
*/
struct rkisp1_cif_isp_bls_fixed_val {
__s16 r;
__s16 gr;
__s16 gb;
__s16 b;
};
/**
* struct rkisp1_cif_isp_bls_config - Configuration used by black level subtraction
*
* @enable_auto: Automatic mode activated means that the measured values
* are subtracted. Otherwise the fixed subtraction
* values will be subtracted.
* @en_windows: enabled window
* @bls_window1: Measurement window 1 size
* @bls_window2: Measurement window 2 size
* @bls_samples: Set amount of measured pixels for each Bayer position
* (A, B,C and D) to 2^bls_samples.
* @fixed_val: Fixed subtraction values
*/
struct rkisp1_cif_isp_bls_config {
__u8 enable_auto;
__u8 en_windows;
struct rkisp1_cif_isp_window bls_window1;
struct rkisp1_cif_isp_window bls_window2;
__u8 bls_samples;
struct rkisp1_cif_isp_bls_fixed_val fixed_val;
};
/**
* struct rkisp1_cif_isp_dpcc_methods_config - DPCC methods set configuration
*
* This structure stores the configuration of one set of methods for the DPCC
* algorithm. Multiple methods can be selected in each set (independently for
* the Green and Red/Blue components) through the @method field, the result is
* the logical AND of all enabled methods. The remaining fields set thresholds
* and factors for each method.
*
* @method: Method enable bits (RKISP1_CIF_ISP_DPCC_METHODS_SET_*)
* @line_thresh: Line threshold (RKISP1_CIF_ISP_DPCC_LINE_THRESH_*)
* @line_mad_fac: Line Mean Absolute Difference factor (RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_*)
* @pg_fac: Peak gradient factor (RKISP1_CIF_ISP_DPCC_PG_FAC_*)
* @rnd_thresh: Rank Neighbor Difference threshold (RKISP1_CIF_ISP_DPCC_RND_THRESH_*)
* @rg_fac: Rank gradient factor (RKISP1_CIF_ISP_DPCC_RG_FAC_*)
*/
struct rkisp1_cif_isp_dpcc_methods_config {
__u32 method;
__u32 line_thresh;
__u32 line_mad_fac;
__u32 pg_fac;
__u32 rnd_thresh;
__u32 rg_fac;
};
/**
* struct rkisp1_cif_isp_dpcc_config - Configuration used by DPCC
*
* Configuration used by Defect Pixel Cluster Correction. Three sets of methods
* can be configured and selected through the @set_use field. The result is the
* logical OR of all enabled sets.
*
* @mode: DPCC mode (RKISP1_CIF_ISP_DPCC_MODE_*)
* @output_mode: Interpolation output mode (RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_*)
* @set_use: Methods sets selection (RKISP1_CIF_ISP_DPCC_SET_USE_*)
* @methods: Methods sets configuration
* @ro_limits: Rank order limits (RKISP1_CIF_ISP_DPCC_RO_LIMITS_*)
* @rnd_offs: Differential rank offsets for rank neighbor difference (RKISP1_CIF_ISP_DPCC_RND_OFFS_*)
*/
struct rkisp1_cif_isp_dpcc_config {
__u32 mode;
__u32 output_mode;
__u32 set_use;
struct rkisp1_cif_isp_dpcc_methods_config methods[RKISP1_CIF_ISP_DPCC_METHODS_MAX];
__u32 ro_limits;
__u32 rnd_offs;
};
/**
* struct rkisp1_cif_isp_gamma_corr_curve - gamma curve point definition y-axis (output).
*
* The reset values define a linear curve which has the same effect as bypass. Reset values are:
* gamma_y[0] = 0x0000, gamma_y[1] = 0x0100, ... gamma_y[15] = 0x0f00, gamma_y[16] = 0xfff
*
* @gamma_y: the values for the y-axis of gamma curve points. Each value is 12 bit.
*/
struct rkisp1_cif_isp_gamma_corr_curve {
__u16 gamma_y[RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE];
};
/**
* struct rkisp1_cif_isp_gamma_curve_x_axis_pnts - De-Gamma Curve definition x increments
* (sampling points). gamma_dx0 is for the lower samples (1-8), gamma_dx1 is for the
* higher samples (9-16). The reset values for both fields is 0x44444444. This means
* that each sample is 4 units away from the previous one on the x-axis.
*
* @gamma_dx0: gamma curve sample points definitions. Bits 0:2 for sample 1. Bit 3 unused.
* Bits 4:6 for sample 2. bit 7 unused ... Bits 28:30 for sample 8. Bit 31 unused
* @gamma_dx1: gamma curve sample points definitions. Bits 0:2 for sample 9. Bit 3 unused.
* Bits 4:6 for sample 10. bit 7 unused ... Bits 28:30 for sample 16. Bit 31 unused
*/
struct rkisp1_cif_isp_gamma_curve_x_axis_pnts {
__u32 gamma_dx0;
__u32 gamma_dx1;
};
/**
* struct rkisp1_cif_isp_sdg_config - Configuration used by sensor degamma
*
* @curve_r: gamma curve point definition axis for red
* @curve_g: gamma curve point definition axis for green
* @curve_b: gamma curve point definition axis for blue
* @xa_pnts: x axis increments
*/
struct rkisp1_cif_isp_sdg_config {
struct rkisp1_cif_isp_gamma_corr_curve curve_r;
struct rkisp1_cif_isp_gamma_corr_curve curve_g;
struct rkisp1_cif_isp_gamma_corr_curve curve_b;
struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts;
};
/**
* struct rkisp1_cif_isp_lsc_config - Configuration used by Lens shading correction
*
* @r_data_tbl: sample table red
* @gr_data_tbl: sample table green (red)
* @gb_data_tbl: sample table green (blue)
* @b_data_tbl: sample table blue
* @x_grad_tbl: gradient table x
* @y_grad_tbl: gradient table y
* @x_size_tbl: size table x
* @y_size_tbl: size table y
* @config_width: not used at the moment
* @config_height: not used at the moment
*/
struct rkisp1_cif_isp_lsc_config {
__u16 r_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
__u16 gr_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
__u16 gb_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
__u16 b_data_tbl[RKISP1_CIF_ISP_LSC_SAMPLES_MAX][RKISP1_CIF_ISP_LSC_SAMPLES_MAX];
__u16 x_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
__u16 y_grad_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
__u16 x_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
__u16 y_size_tbl[RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE];
__u16 config_width;
__u16 config_height;
};
/**
* struct rkisp1_cif_isp_ie_config - Configuration used by image effects
*
* @effect: values from 'enum v4l2_colorfx'. Possible values are: V4L2_COLORFX_SEPIA,
* V4L2_COLORFX_SET_CBCR, V4L2_COLORFX_AQUA, V4L2_COLORFX_EMBOSS,
* V4L2_COLORFX_SKETCH, V4L2_COLORFX_BW, V4L2_COLORFX_NEGATIVE
* @color_sel: bits 0:2 - colors bitmask (001 - blue, 010 - green, 100 - red).
* bits 8:15 - Threshold value of the RGB colors for the color selection effect.
* @eff_mat_1: 3x3 Matrix Coefficients for Emboss Effect 1
* @eff_mat_2: 3x3 Matrix Coefficients for Emboss Effect 2
* @eff_mat_3: 3x3 Matrix Coefficients for Emboss 3/Sketch 1
* @eff_mat_4: 3x3 Matrix Coefficients for Sketch Effect 2
* @eff_mat_5: 3x3 Matrix Coefficients for Sketch Effect 3
* @eff_tint: Chrominance increment values of tint (used for sepia effect)
*/
struct rkisp1_cif_isp_ie_config {
__u16 effect;
__u16 color_sel;
__u16 eff_mat_1;
__u16 eff_mat_2;
__u16 eff_mat_3;
__u16 eff_mat_4;
__u16 eff_mat_5;
__u16 eff_tint;
};
/**
* struct rkisp1_cif_isp_cproc_config - Configuration used by Color Processing
*
* @c_out_range: Chrominance pixel clipping range at output.
* (0 for limit, 1 for full)
* @y_in_range: Luminance pixel clipping range at output.
* @y_out_range: Luminance pixel clipping range at output.
* @contrast: 00~ff, 0.0~1.992
* @brightness: 80~7F, -128~+127
* @sat: saturation, 00~FF, 0.0~1.992
* @hue: 80~7F, -90~+87.188
*/
struct rkisp1_cif_isp_cproc_config {
__u8 c_out_range;
__u8 y_in_range;
__u8 y_out_range;
__u8 contrast;
__u8 brightness;
__u8 sat;
__u8 hue;
};
/**
* struct rkisp1_cif_isp_awb_meas_config - Configuration for the AWB statistics
*
* @awb_mode: the awb meas mode. From enum rkisp1_cif_isp_awb_mode_type.
* @awb_wnd: white balance measurement window (in pixels)
* @max_y: only pixels values < max_y contribute to awb measurement, set to 0
* to disable this feature
* @min_y: only pixels values > min_y contribute to awb measurement
* @max_csum: Chrominance sum maximum value, only consider pixels with Cb+Cr,
* smaller than threshold for awb measurements
* @min_c: Chrominance minimum value, only consider pixels with Cb/Cr
* each greater than threshold value for awb measurements
* @frames: number of frames - 1 used for mean value calculation
* (ucFrames=0 means 1 Frame)
* @awb_ref_cr: reference Cr value for AWB regulation, target for AWB
* @awb_ref_cb: reference Cb value for AWB regulation, target for AWB
* @enable_ymax_cmp: enable Y_MAX compare (Not valid in RGB measurement mode.)
*/
struct rkisp1_cif_isp_awb_meas_config {
/*
* Note: currently the h and v offsets are mapped to grid offsets
*/
struct rkisp1_cif_isp_window awb_wnd;
__u32 awb_mode;
__u8 max_y;
__u8 min_y;
__u8 max_csum;
__u8 min_c;
__u8 frames;
__u8 awb_ref_cr;
__u8 awb_ref_cb;
__u8 enable_ymax_cmp;
};
/**
* struct rkisp1_cif_isp_awb_gain_config - Configuration used by auto white balance gain
*
* All fields in this struct are 10 bit, where:
* 0x100h = 1, unsigned integer value, range 0 to 4 with 8 bit fractional part.
*
* out_data_x = ( AWB_GAIN_X * in_data + 128) >> 8
*
* @gain_red: gain value for red component.
* @gain_green_r: gain value for green component in red line.
* @gain_blue: gain value for blue component.
* @gain_green_b: gain value for green component in blue line.
*/
struct rkisp1_cif_isp_awb_gain_config {
__u16 gain_red;
__u16 gain_green_r;
__u16 gain_blue;
__u16 gain_green_b;
};
/**
* struct rkisp1_cif_isp_flt_config - Configuration used by ISP filtering
*
* All 4 threshold fields (thresh_*) are 10 bits.
* All 6 factor fields (fac_*) are 6 bits.
*
* @mode: ISP_FILT_MODE register fields (from enum rkisp1_cif_isp_flt_mode)
* @grn_stage1: Green filter stage 1 select (range 0x0...0x8)
* @chr_h_mode: Chroma filter horizontal mode
* @chr_v_mode: Chroma filter vertical mode
* @thresh_bl0: If thresh_bl1 < sum_grad < thresh_bl0 then fac_bl0 is selected (blurring th)
* @thresh_bl1: If sum_grad < thresh_bl1 then fac_bl1 is selected (blurring th)
* @thresh_sh0: If thresh_sh0 < sum_grad < thresh_sh1 then thresh_sh0 is selected (sharpening th)
* @thresh_sh1: If thresh_sh1 < sum_grad then thresh_sh1 is selected (sharpening th)
* @lum_weight: Parameters for luminance weight function.
* @fac_sh1: filter factor for sharp1 level
* @fac_sh0: filter factor for sharp0 level
* @fac_mid: filter factor for mid level and for static filter mode
* @fac_bl0: filter factor for blur 0 level
* @fac_bl1: filter factor for blur 1 level (max blur)
*/
struct rkisp1_cif_isp_flt_config {
__u32 mode;
__u8 grn_stage1;
__u8 chr_h_mode;
__u8 chr_v_mode;
__u32 thresh_bl0;
__u32 thresh_bl1;
__u32 thresh_sh0;
__u32 thresh_sh1;
__u32 lum_weight;
__u32 fac_sh1;
__u32 fac_sh0;
__u32 fac_mid;
__u32 fac_bl0;
__u32 fac_bl1;
};
/**
* struct rkisp1_cif_isp_bdm_config - Configuration used by Bayer DeMosaic
*
* @demosaic_th: threshold for bayer demosaicing texture detection
*/
struct rkisp1_cif_isp_bdm_config {
__u8 demosaic_th;
};
/**
* struct rkisp1_cif_isp_ctk_config - Configuration used by Cross Talk correction
*
* @coeff: color correction matrix. Values are 11-bit signed fixed-point numbers with 4 bit integer
* and 7 bit fractional part, ranging from -8 (0x400) to +7.992 (0x3FF). 0 is
* represented by 0x000 and a coefficient value of 1 as 0x080.
* @ct_offset: Red, Green, Blue offsets for the crosstalk correction matrix
*/
struct rkisp1_cif_isp_ctk_config {
__u16 coeff[3][3];
__u16 ct_offset[3];
};
enum rkisp1_cif_isp_goc_mode {
RKISP1_CIF_ISP_GOC_MODE_LOGARITHMIC,
RKISP1_CIF_ISP_GOC_MODE_EQUIDISTANT
};
/**
* struct rkisp1_cif_isp_goc_config - Configuration used by Gamma Out correction
*
* @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
* @gamma_y: gamma out curve y-axis for all color components
*
* The number of entries of @gamma_y depends on the hardware revision
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
* V10 has RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10 entries, V12 has
* RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 entries.
* RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum of the two.
*/
struct rkisp1_cif_isp_goc_config {
__u32 mode;
__u16 gamma_y[RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES];
};
/**
* struct rkisp1_cif_isp_hst_config - Configuration for Histogram statistics
*
* @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode)
* @histogram_predivider: process every stepsize pixel, all other pixels are
* skipped
* @meas_window: coordinates of the measure window
* @hist_weight: weighting factor for sub-windows
*
* The number of entries of @hist_weight depends on the hardware revision
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
* V10 has RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 entries, V12 has
* RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 entries.
* RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum of the
* two.
*/
struct rkisp1_cif_isp_hst_config {
__u32 mode;
__u8 histogram_predivider;
struct rkisp1_cif_isp_window meas_window;
__u8 hist_weight[RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE];
};
/**
* struct rkisp1_cif_isp_aec_config - Configuration for Auto Exposure statistics
*
* @mode: Exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode)
* @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop)
* @meas_window: coordinates of the measure window
*/
struct rkisp1_cif_isp_aec_config {
__u32 mode;
__u32 autostop;
struct rkisp1_cif_isp_window meas_window;
};
/**
* struct rkisp1_cif_isp_afc_config - Configuration for the Auto Focus statistics
*
* @num_afm_win: max RKISP1_CIF_ISP_AFM_MAX_WINDOWS
* @afm_win: coordinates of the meas window
* @thres: threshold used for minimizing the influence of noise
* @var_shift: the number of bits for the shift operation at the end of the
* calculation chain.
*/
struct rkisp1_cif_isp_afc_config {
__u8 num_afm_win;
struct rkisp1_cif_isp_window afm_win[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
__u32 thres;
__u32 var_shift;
};
/**
* enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED: don't use any gains in preprocessing stage
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS: use only the noise function gains from
* registers DPF_NF_GAIN_R, ...
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS: use only the gains from LSC module
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS: use the noise function gains and the
* gains from LSC module
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS: use only the gains from AWB module
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS: use the gains from AWB and LSC module
* @RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX: upper border (only for an internal evaluation)
*/
enum rkisp1_cif_isp_dpf_gain_usage {
RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS,
RKISP1_CIF_ISP_DPF_GAIN_USAGE_MAX
};
/**
* enum rkisp1_cif_isp_dpf_rb_filtersize - Red and blue filter sizes
* @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9: red and blue filter kernel size 13x9
* (means 7x5 active pixel)
* @RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9: red and blue filter kernel size 9x9
* (means 5x5 active pixel)
*/
enum rkisp1_cif_isp_dpf_rb_filtersize {
RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_13x9,
RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9,
};
/**
* enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode
* @RKISP1_CIF_ISP_NLL_SCALE_LINEAR: use a linear scaling
* @RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC: use a logarithmic scaling
*/
enum rkisp1_cif_isp_dpf_nll_scale_mode {
RKISP1_CIF_ISP_NLL_SCALE_LINEAR,
RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC,
};
/**
* struct rkisp1_cif_isp_dpf_nll - Noise level lookup
*
* @coeff: Noise level Lookup coefficient
* @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode)
*/
struct rkisp1_cif_isp_dpf_nll {
__u16 coeff[RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS];
__u32 scale_mode;
};
/**
* struct rkisp1_cif_isp_dpf_rb_flt - Red blue filter config
*
* @fltsize: The filter size for the red and blue pixels
* (from enum rkisp1_cif_isp_dpf_rb_filtersize)
* @spatial_coeff: Spatial weights
* @r_enable: enable filter processing for red pixels
* @b_enable: enable filter processing for blue pixels
*/
struct rkisp1_cif_isp_dpf_rb_flt {
__u32 fltsize;
__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
__u8 r_enable;
__u8 b_enable;
};
/**
* struct rkisp1_cif_isp_dpf_g_flt - Green filter Configuration
*
* @spatial_coeff: Spatial weights
* @gr_enable: enable filter processing for green pixels in green/red lines
* @gb_enable: enable filter processing for green pixels in green/blue lines
*/
struct rkisp1_cif_isp_dpf_g_flt {
__u8 spatial_coeff[RKISP1_CIF_ISP_DPF_MAX_SPATIAL_COEFFS];
__u8 gr_enable;
__u8 gb_enable;
};
/**
* struct rkisp1_cif_isp_dpf_gain - Noise function Configuration
*
* @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage)
* @nf_r_gain: Noise function Gain that replaces the AWB gain for red pixels
* @nf_b_gain: Noise function Gain that replaces the AWB gain for blue pixels
* @nf_gr_gain: Noise function Gain that replaces the AWB gain
* for green pixels in a red line
* @nf_gb_gain: Noise function Gain that replaces the AWB gain
* for green pixels in a blue line
*/
struct rkisp1_cif_isp_dpf_gain {
__u32 mode;
__u16 nf_r_gain;
__u16 nf_b_gain;
__u16 nf_gr_gain;
__u16 nf_gb_gain;
};
/**
* struct rkisp1_cif_isp_dpf_config - Configuration used by De-noising pre-filter
*
* @gain: noise function gain
* @g_flt: green filter config
* @rb_flt: red blue filter config
* @nll: noise level lookup
*/
struct rkisp1_cif_isp_dpf_config {
struct rkisp1_cif_isp_dpf_gain gain;
struct rkisp1_cif_isp_dpf_g_flt g_flt;
struct rkisp1_cif_isp_dpf_rb_flt rb_flt;
struct rkisp1_cif_isp_dpf_nll nll;
};
/**
* struct rkisp1_cif_isp_dpf_strength_config - strength of the filter
*
* @r: filter strength of the RED filter
* @g: filter strength of the GREEN filter
* @b: filter strength of the BLUE filter
*/
struct rkisp1_cif_isp_dpf_strength_config {
__u8 r;
__u8 g;
__u8 b;
};
/**
* struct rkisp1_cif_isp_isp_other_cfg - Parameters for some blocks in rockchip isp1
*
* @dpcc_config: Defect Pixel Cluster Correction config
* @bls_config: Black Level Subtraction config
* @sdg_config: sensor degamma config
* @lsc_config: Lens Shade config
* @awb_gain_config: Auto White balance gain config
* @flt_config: filter config
* @bdm_config: demosaic config
* @ctk_config: cross talk config
* @goc_config: gamma out config
* @bls_config: black level subtraction config
* @dpf_config: De-noising pre-filter config
* @dpf_strength_config: dpf strength config
* @cproc_config: color process config
* @ie_config: image effects config
*/
struct rkisp1_cif_isp_isp_other_cfg {
struct rkisp1_cif_isp_dpcc_config dpcc_config;
struct rkisp1_cif_isp_bls_config bls_config;
struct rkisp1_cif_isp_sdg_config sdg_config;
struct rkisp1_cif_isp_lsc_config lsc_config;
struct rkisp1_cif_isp_awb_gain_config awb_gain_config;
struct rkisp1_cif_isp_flt_config flt_config;
struct rkisp1_cif_isp_bdm_config bdm_config;
struct rkisp1_cif_isp_ctk_config ctk_config;
struct rkisp1_cif_isp_goc_config goc_config;
struct rkisp1_cif_isp_dpf_config dpf_config;
struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config;
struct rkisp1_cif_isp_cproc_config cproc_config;
struct rkisp1_cif_isp_ie_config ie_config;
};
/**
* struct rkisp1_cif_isp_isp_meas_cfg - Rockchip ISP1 Measure Parameters
*
* @awb_meas_config: auto white balance config
* @hst_config: histogram config
* @aec_config: auto exposure config
* @afc_config: auto focus config
*/
struct rkisp1_cif_isp_isp_meas_cfg {
struct rkisp1_cif_isp_awb_meas_config awb_meas_config;
struct rkisp1_cif_isp_hst_config hst_config;
struct rkisp1_cif_isp_aec_config aec_config;
struct rkisp1_cif_isp_afc_config afc_config;
};
/**
* struct rkisp1_params_cfg - Rockchip ISP1 Input Parameters Meta Data
*
* @module_en_update: mask the enable bits of which module should be updated
* @module_ens: mask the enable value of each module, only update the module
* which correspond bit was set in module_en_update
* @module_cfg_update: mask the config bits of which module should be updated
* @meas: measurement config
* @others: other config
*/
struct rkisp1_params_cfg {
__u32 module_en_update;
__u32 module_ens;
__u32 module_cfg_update;
struct rkisp1_cif_isp_isp_meas_cfg meas;
struct rkisp1_cif_isp_isp_other_cfg others;
};
/*---------- PART2: Measurement Statistics ------------*/
/**
* struct rkisp1_cif_isp_awb_meas - AWB measured values
*
* @cnt: White pixel count, number of "white pixels" found during last
* measurement
* @mean_y_or_g: Mean value of Y within window and frames,
* Green if RGB is selected.
* @mean_cb_or_b: Mean value of Cb within window and frames,
* Blue if RGB is selected.
* @mean_cr_or_r: Mean value of Cr within window and frames,
* Red if RGB is selected.
*/
struct rkisp1_cif_isp_awb_meas {
__u32 cnt;
__u8 mean_y_or_g;
__u8 mean_cb_or_b;
__u8 mean_cr_or_r;
};
/**
* struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data
*
* @awb_mean: Mean measured data
*/
struct rkisp1_cif_isp_awb_stat {
struct rkisp1_cif_isp_awb_meas awb_mean[RKISP1_CIF_ISP_AWB_MAX_GRID];
};
/**
* struct rkisp1_cif_isp_bls_meas_val - BLS measured values
*
* @meas_r: Mean measured value for Bayer pattern R
* @meas_gr: Mean measured value for Bayer pattern Gr
* @meas_gb: Mean measured value for Bayer pattern Gb
* @meas_b: Mean measured value for Bayer pattern B
*/
struct rkisp1_cif_isp_bls_meas_val {
__u16 meas_r;
__u16 meas_gr;
__u16 meas_gb;
__u16 meas_b;
};
/**
* struct rkisp1_cif_isp_ae_stat - statistics auto exposure data
*
* @exp_mean: Mean luminance value of block xx
* @bls_val: BLS measured values
*
* The number of entries of @exp_mean depends on the hardware revision
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
* V10 has RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries, V12 has
* RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries. RKISP1_CIF_ISP_AE_MEAN_MAX is equal
* to the maximum of the two.
*
* Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
*/
struct rkisp1_cif_isp_ae_stat {
__u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
struct rkisp1_cif_isp_bls_meas_val bls_val;
};
/**
* struct rkisp1_cif_isp_af_meas_val - AF measured values
*
* @sum: sharpness value
* @lum: luminance value
*/
struct rkisp1_cif_isp_af_meas_val {
__u32 sum;
__u32 lum;
};
/**
* struct rkisp1_cif_isp_af_stat - statistics auto focus data
*
* @window: AF measured value of window x
*
* The module measures the sharpness in 3 windows of selectable size via
* register settings(ISP_AFM_*_A/B/C)
*/
struct rkisp1_cif_isp_af_stat {
struct rkisp1_cif_isp_af_meas_val window[RKISP1_CIF_ISP_AFM_MAX_WINDOWS];
};
/**
* struct rkisp1_cif_isp_hist_stat - statistics histogram data
*
* @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
* type. Bits 0-4 are the fractional part and bits 5-19 are the
* integer part.
*
* The window of the measurements area is divided to 5x5 sub-windows for
* V10 and to 9x9 sub-windows for V12. The histogram is then computed for each
* sub-window independently and the final result is a weighted average of the
* histogram measurements on all sub-windows. The window of the measurements
* area and the weight of each sub-window are configurable using
* struct @rkisp1_cif_isp_hst_config.
*
* The histogram contains 16 bins in V10 and 32 bins in V12.
*
* The number of entries of @hist_bins depends on the hardware revision
* as is reported by the hw_revision field of the struct media_device_info
* that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
*
* V10 has RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries, V12 has
* RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries. RKISP1_CIF_ISP_HIST_BIN_N_MAX is
* equal to the maximum of the two.
*/
struct rkisp1_cif_isp_hist_stat {
__u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
};
/**
* struct rkisp1_cif_isp_stat - Rockchip ISP1 Statistics Data
*
* @awb: statistics data for automatic white balance
* @ae: statistics data for auto exposure
* @af: statistics data for auto focus
* @hist: statistics histogram data
*/
struct rkisp1_cif_isp_stat {
struct rkisp1_cif_isp_awb_stat awb;
struct rkisp1_cif_isp_ae_stat ae;
struct rkisp1_cif_isp_af_stat af;
struct rkisp1_cif_isp_hist_stat hist;
};
/**
* struct rkisp1_stat_buffer - Rockchip ISP1 Statistics Meta Data
*
* @meas_type: measurement types (RKISP1_CIF_ISP_STAT_* definitions)
* @frame_id: frame ID for sync
* @params: statistics data
*/
struct rkisp1_stat_buffer {
__u32 meas_type;
__u32 frame_id;
struct rkisp1_cif_isp_stat params;
};
#endif /* _RKISP1_CONFIG_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/videodev2.h | /* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* Video for Linux Two header file
*
* Copyright (C) 1999-2012 the contributors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Alternatively you can redistribute this file under the terms of the
* BSD license as stated below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. The names of its contributors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Header file for v4l or V4L2 drivers and applications
* with public API.
* All kernel-specific stuff were moved to media/v4l2-dev.h, so
* no #if __KERNEL tests are allowed here
*
* See https://linuxtv.org for more info
*
* Author: Bill Dirks <[email protected]>
* Justin Schoeman
* Hans Verkuil <[email protected]>
* et al.
*/
#ifndef __LINUX_VIDEODEV2_H
#define __LINUX_VIDEODEV2_H
#include <sys/time.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
#include <linux/v4l2-controls.h>
/*
* Common stuff for both V4L1 and V4L2
* Moved from videodev.h
*/
#define VIDEO_MAX_FRAME 32
#define VIDEO_MAX_PLANES 8
/*
* M I S C E L L A N E O U S
*/
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1U << 31))
/*
* E N U M S
*/
enum v4l2_field {
V4L2_FIELD_ANY = 0, /* driver can choose from none,
top, bottom, interlaced
depending on whatever it thinks
is approximate ... */
V4L2_FIELD_NONE = 1, /* this device has no fields ... */
V4L2_FIELD_TOP = 2, /* top field only */
V4L2_FIELD_BOTTOM = 3, /* bottom field only */
V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */
V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one
buffer, top-bottom order */
V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */
V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into
separate buffers */
V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field
first and the top field is
transmitted first */
V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field
first and the bottom field is
transmitted first */
};
#define V4L2_FIELD_HAS_TOP(field) \
((field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
(field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
#define V4L2_FIELD_HAS_BOTTOM(field) \
((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
(field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
#define V4L2_FIELD_HAS_BOTH(field) \
((field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT ||\
(field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
#define V4L2_FIELD_HAS_T_OR_B(field) \
((field) == V4L2_FIELD_BOTTOM ||\
(field) == V4L2_FIELD_TOP ||\
(field) == V4L2_FIELD_ALTERNATE)
#define V4L2_FIELD_IS_INTERLACED(field) \
((field) == V4L2_FIELD_INTERLACED ||\
(field) == V4L2_FIELD_INTERLACED_TB ||\
(field) == V4L2_FIELD_INTERLACED_BT)
#define V4L2_FIELD_IS_SEQUENTIAL(field) \
((field) == V4L2_FIELD_SEQ_TB ||\
(field) == V4L2_FIELD_SEQ_BT)
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
V4L2_BUF_TYPE_VBI_CAPTURE = 4,
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
V4L2_BUF_TYPE_META_OUTPUT = 14,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
#define V4L2_TYPE_IS_MULTIPLANAR(type) \
((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
#define V4L2_TYPE_IS_OUTPUT(type) \
((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE \
|| (type) == V4L2_BUF_TYPE_VIDEO_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY \
|| (type) == V4L2_BUF_TYPE_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT \
|| (type) == V4L2_BUF_TYPE_SDR_OUTPUT \
|| (type) == V4L2_BUF_TYPE_META_OUTPUT)
#define V4L2_TYPE_IS_CAPTURE(type) (!V4L2_TYPE_IS_OUTPUT(type))
enum v4l2_tuner_type {
V4L2_TUNER_RADIO = 1,
V4L2_TUNER_ANALOG_TV = 2,
V4L2_TUNER_DIGITAL_TV = 3,
V4L2_TUNER_SDR = 4,
V4L2_TUNER_RF = 5,
};
/* Deprecated, do not use */
#define V4L2_TUNER_ADC V4L2_TUNER_SDR
enum v4l2_memory {
V4L2_MEMORY_MMAP = 1,
V4L2_MEMORY_USERPTR = 2,
V4L2_MEMORY_OVERLAY = 3,
V4L2_MEMORY_DMABUF = 4,
};
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
/*
* Default colorspace, i.e. let the driver figure it out.
* Can only be used with video capture.
*/
V4L2_COLORSPACE_DEFAULT = 0,
/* SMPTE 170M: used for broadcast NTSC/PAL SDTV */
V4L2_COLORSPACE_SMPTE170M = 1,
/* Obsolete pre-1998 SMPTE 240M HDTV standard, superseded by Rec 709 */
V4L2_COLORSPACE_SMPTE240M = 2,
/* Rec.709: used for HDTV */
V4L2_COLORSPACE_REC709 = 3,
/*
* Deprecated, do not use. No driver will ever return this. This was
* based on a misunderstanding of the bt878 datasheet.
*/
V4L2_COLORSPACE_BT878 = 4,
/*
* NTSC 1953 colorspace. This only makes sense when dealing with
* really, really old NTSC recordings. Superseded by SMPTE 170M.
*/
V4L2_COLORSPACE_470_SYSTEM_M = 5,
/*
* EBU Tech 3213 PAL/SECAM colorspace.
*/
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
/*
* Effectively shorthand for V4L2_COLORSPACE_SRGB, V4L2_YCBCR_ENC_601
* and V4L2_QUANTIZATION_FULL_RANGE. To be used for (Motion-)JPEG.
*/
V4L2_COLORSPACE_JPEG = 7,
/* For RGB colorspaces such as produces by most webcams. */
V4L2_COLORSPACE_SRGB = 8,
/* opRGB colorspace */
V4L2_COLORSPACE_OPRGB = 9,
/* BT.2020 colorspace, used for UHDTV. */
V4L2_COLORSPACE_BT2020 = 10,
/* Raw colorspace: for RAW unprocessed images */
V4L2_COLORSPACE_RAW = 11,
/* DCI-P3 colorspace, used by cinema projectors */
V4L2_COLORSPACE_DCI_P3 = 12,
};
/*
* Determine how COLORSPACE_DEFAULT should map to a proper colorspace.
* This depends on whether this is a SDTV image (use SMPTE 170M), an
* HDTV image (use Rec. 709), or something else (use sRGB).
*/
#define V4L2_MAP_COLORSPACE_DEFAULT(is_sdtv, is_hdtv) \
((is_sdtv) ? V4L2_COLORSPACE_SMPTE170M : \
((is_hdtv) ? V4L2_COLORSPACE_REC709 : V4L2_COLORSPACE_SRGB))
enum v4l2_xfer_func {
/*
* Mapping of V4L2_XFER_FUNC_DEFAULT to actual transfer functions
* for the various colorspaces:
*
* V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
* V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_REC709 and
* V4L2_COLORSPACE_BT2020: V4L2_XFER_FUNC_709
*
* V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
*
* V4L2_COLORSPACE_OPRGB: V4L2_XFER_FUNC_OPRGB
*
* V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
*
* V4L2_COLORSPACE_RAW: V4L2_XFER_FUNC_NONE
*
* V4L2_COLORSPACE_DCI_P3: V4L2_XFER_FUNC_DCI_P3
*/
V4L2_XFER_FUNC_DEFAULT = 0,
V4L2_XFER_FUNC_709 = 1,
V4L2_XFER_FUNC_SRGB = 2,
V4L2_XFER_FUNC_OPRGB = 3,
V4L2_XFER_FUNC_SMPTE240M = 4,
V4L2_XFER_FUNC_NONE = 5,
V4L2_XFER_FUNC_DCI_P3 = 6,
V4L2_XFER_FUNC_SMPTE2084 = 7,
};
/*
* Determine how XFER_FUNC_DEFAULT should map to a proper transfer function.
* This depends on the colorspace.
*/
#define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
((colsp) == V4L2_COLORSPACE_OPRGB ? V4L2_XFER_FUNC_OPRGB : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
((colsp) == V4L2_COLORSPACE_SRGB || (colsp) == V4L2_COLORSPACE_JPEG ? \
V4L2_XFER_FUNC_SRGB : V4L2_XFER_FUNC_709)))))
enum v4l2_ycbcr_encoding {
/*
* Mapping of V4L2_YCBCR_ENC_DEFAULT to actual encodings for the
* various colorspaces:
*
* V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
* V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
* V4L2_COLORSPACE_OPRGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
*
* V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
*
* V4L2_COLORSPACE_BT2020: V4L2_YCBCR_ENC_BT2020
*
* V4L2_COLORSPACE_SMPTE240M: V4L2_YCBCR_ENC_SMPTE240M
*/
V4L2_YCBCR_ENC_DEFAULT = 0,
/* ITU-R 601 -- SDTV */
V4L2_YCBCR_ENC_601 = 1,
/* Rec. 709 -- HDTV */
V4L2_YCBCR_ENC_709 = 2,
/* ITU-R 601/EN 61966-2-4 Extended Gamut -- SDTV */
V4L2_YCBCR_ENC_XV601 = 3,
/* Rec. 709/EN 61966-2-4 Extended Gamut -- HDTV */
V4L2_YCBCR_ENC_XV709 = 4,
/*
* sYCC (Y'CbCr encoding of sRGB), identical to ENC_601. It was added
* originally due to a misunderstanding of the sYCC standard. It should
* not be used, instead use V4L2_YCBCR_ENC_601.
*/
V4L2_YCBCR_ENC_SYCC = 5,
/* BT.2020 Non-constant Luminance Y'CbCr */
V4L2_YCBCR_ENC_BT2020 = 6,
/* BT.2020 Constant Luminance Y'CbcCrc */
V4L2_YCBCR_ENC_BT2020_CONST_LUM = 7,
/* SMPTE 240M -- Obsolete HDTV */
V4L2_YCBCR_ENC_SMPTE240M = 8,
};
/*
* enum v4l2_hsv_encoding values should not collide with the ones from
* enum v4l2_ycbcr_encoding.
*/
enum v4l2_hsv_encoding {
/* Hue mapped to 0 - 179 */
V4L2_HSV_ENC_180 = 128,
/* Hue mapped to 0-255 */
V4L2_HSV_ENC_256 = 129,
};
/*
* Determine how YCBCR_ENC_DEFAULT should map to a proper Y'CbCr encoding.
* This depends on the colorspace.
*/
#define V4L2_MAP_YCBCR_ENC_DEFAULT(colsp) \
(((colsp) == V4L2_COLORSPACE_REC709 || \
(colsp) == V4L2_COLORSPACE_DCI_P3) ? V4L2_YCBCR_ENC_709 : \
((colsp) == V4L2_COLORSPACE_BT2020 ? V4L2_YCBCR_ENC_BT2020 : \
((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_YCBCR_ENC_SMPTE240M : \
V4L2_YCBCR_ENC_601)))
enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range.
* For Y'CbCr the quantization is always limited range, except
* for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
V4L2_QUANTIZATION_LIM_RANGE = 2,
};
/*
* Determine how QUANTIZATION_DEFAULT should map to a proper quantization.
* This depends on whether the image is RGB or not, the colorspace.
* The Y'CbCr encoding is not used anymore, but is still there for backwards
* compatibility.
*/
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
(((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE)
/*
* Deprecated names for opRGB colorspace (IEC 61966-2-5)
*
* WARNING: Please don't use these deprecated defines in your code, as
* there is a chance we have to remove them in the future.
*/
#define V4L2_COLORSPACE_ADOBERGB V4L2_COLORSPACE_OPRGB
#define V4L2_XFER_FUNC_ADOBERGB V4L2_XFER_FUNC_OPRGB
enum v4l2_priority {
V4L2_PRIORITY_UNSET = 0, /* not initialized */
V4L2_PRIORITY_BACKGROUND = 1,
V4L2_PRIORITY_INTERACTIVE = 2,
V4L2_PRIORITY_RECORD = 3,
V4L2_PRIORITY_DEFAULT = V4L2_PRIORITY_INTERACTIVE,
};
struct v4l2_rect {
__s32 left;
__s32 top;
__u32 width;
__u32 height;
};
struct v4l2_fract {
__u32 numerator;
__u32 denominator;
};
struct v4l2_area {
__u32 width;
__u32 height;
};
/**
* struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
*
* @driver: name of the driver module (e.g. "bttv")
* @card: name of the card (e.g. "Hauppauge WinTV")
* @bus_info: name of the bus (e.g. "PCI:" + pci_name(pci_dev) )
* @version: KERNEL_VERSION
* @capabilities: capabilities of the physical device as a whole
* @device_caps: capabilities accessed via this particular device (node)
* @reserved: reserved fields for future extensions
*/
struct v4l2_capability {
__u8 driver[16];
__u8 card[32];
__u8 bus_info[32];
__u32 version;
__u32 capabilities;
__u32 device_caps;
__u32 reserved[3];
};
/* Values for 'capabilities' field */
#define V4L2_CAP_VIDEO_CAPTURE 0x00000001 /* Is a video capture device */
#define V4L2_CAP_VIDEO_OUTPUT 0x00000002 /* Is a video output device */
#define V4L2_CAP_VIDEO_OVERLAY 0x00000004 /* Can do video overlay */
#define V4L2_CAP_VBI_CAPTURE 0x00000010 /* Is a raw VBI capture device */
#define V4L2_CAP_VBI_OUTPUT 0x00000020 /* Is a raw VBI output device */
#define V4L2_CAP_SLICED_VBI_CAPTURE 0x00000040 /* Is a sliced VBI capture device */
#define V4L2_CAP_SLICED_VBI_OUTPUT 0x00000080 /* Is a sliced VBI output device */
#define V4L2_CAP_RDS_CAPTURE 0x00000100 /* RDS data capture */
#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY 0x00000200 /* Can do video output overlay */
#define V4L2_CAP_HW_FREQ_SEEK 0x00000400 /* Can do hardware frequency seek */
#define V4L2_CAP_RDS_OUTPUT 0x00000800 /* Is an RDS encoder */
/* Is a video capture device that supports multiplanar formats */
#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000
/* Is a video output device that supports multiplanar formats */
#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000
/* Is a video mem-to-mem device that supports multiplanar formats */
#define V4L2_CAP_VIDEO_M2M_MPLANE 0x00004000
/* Is a video mem-to-mem device */
#define V4L2_CAP_VIDEO_M2M 0x00008000
#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */
#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */
#define V4L2_CAP_RADIO 0x00040000 /* is a radio device */
#define V4L2_CAP_MODULATOR 0x00080000 /* has a modulator */
#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */
#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */
#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */
#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
#define V4L2_CAP_IO_MC 0x20000000 /* Is input/output controlled by the media controller */
#define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */
/*
* V I D E O I M A G E F O R M A T
*/
struct v4l2_pix_format {
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field; /* enum v4l2_field */
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
union {
/* enum v4l2_ycbcr_encoding */
__u32 ycbcr_enc;
/* enum v4l2_hsv_encoding */
__u32 hsv_enc;
};
__u32 quantization; /* enum v4l2_quantization */
__u32 xfer_func; /* enum v4l2_xfer_func */
};
/* Pixel format FOURCC depth Description */
/* RGB formats (1 or 2 bytes per pixel) */
#define V4L2_PIX_FMT_RGB332 v4l2_fourcc('R', 'G', 'B', '1') /* 8 RGB-3-3-2 */
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_RGBA444 v4l2_fourcc('R', 'A', '1', '2') /* 16 rrrrgggg bbbbaaaa */
#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
#define V4L2_PIX_FMT_RGBA555 v4l2_fourcc('R', 'A', '1', '5') /* 16 RGBA-5-5-5-1 */
#define V4L2_PIX_FMT_RGBX555 v4l2_fourcc('R', 'X', '1', '5') /* 16 RGBX-5-5-5-1 */
#define V4L2_PIX_FMT_ABGR555 v4l2_fourcc('A', 'B', '1', '5') /* 16 ABGR-1-5-5-5 */
#define V4L2_PIX_FMT_XBGR555 v4l2_fourcc('X', 'B', '1', '5') /* 16 XBGR-1-5-5-5 */
#define V4L2_PIX_FMT_BGRA555 v4l2_fourcc('B', 'A', '1', '5') /* 16 BGRA-5-5-5-1 */
#define V4L2_PIX_FMT_BGRX555 v4l2_fourcc('B', 'X', '1', '5') /* 16 BGRX-5-5-5-1 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
/* RGB formats (3 or 4 bytes per pixel) */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
#define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */
#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */
#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
#define V4L2_PIX_FMT_RGBX1010102 v4l2_fourcc('R', 'X', '3', '0') /* 32 RGBX-10-10-10-2 */
#define V4L2_PIX_FMT_RGBA1010102 v4l2_fourcc('R', 'A', '3', '0') /* 32 RGBA-10-10-10-2 */
#define V4L2_PIX_FMT_ARGB2101010 v4l2_fourcc('A', 'R', '3', '0') /* 32 ARGB-2-10-10-10 */
/* RGB formats (6 or 8 bytes per pixel) */
#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */
#define V4L2_PIX_FMT_BGR48 v4l2_fourcc('B', 'G', 'R', '6') /* 48 BGR 16-bit per component */
#define V4L2_PIX_FMT_RGB48 v4l2_fourcc('R', 'G', 'B', '6') /* 48 RGB 16-bit per component */
#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */
/* Grey formats */
#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y012 v4l2_fourcc('Y', '0', '1', '2') /* 12 Greyscale */
#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
#define V4L2_PIX_FMT_IPU3_Y10 v4l2_fourcc('i', 'p', '3', 'y') /* IPU3 packed 10-bit greyscale */
#define V4L2_PIX_FMT_Y12P v4l2_fourcc('Y', '1', '2', 'P') /* 12 Greyscale, MIPI RAW12 packed */
#define V4L2_PIX_FMT_Y14P v4l2_fourcc('Y', '1', '4', 'P') /* 14 Greyscale, MIPI RAW14 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
/* Chrominance formats */
#define V4L2_PIX_FMT_UV8 v4l2_fourcc('U', 'V', '8', ' ') /* 8 UV 4:4 */
/* Luminance+Chrominance formats */
#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YYUV v4l2_fourcc('Y', 'Y', 'U', 'V') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
#define V4L2_PIX_FMT_YUV444 v4l2_fourcc('Y', '4', '4', '4') /* 16 xxxxyyyy uuuuvvvv */
#define V4L2_PIX_FMT_YUV555 v4l2_fourcc('Y', 'U', 'V', 'O') /* 16 YUV-5-5-5 */
#define V4L2_PIX_FMT_YUV565 v4l2_fourcc('Y', 'U', 'V', 'P') /* 16 YUV-5-6-5 */
#define V4L2_PIX_FMT_YUV24 v4l2_fourcc('Y', 'U', 'V', '3') /* 24 YUV-8-8-8 */
#define V4L2_PIX_FMT_YUV32 v4l2_fourcc('Y', 'U', 'V', '4') /* 32 YUV-8-8-8-8 */
#define V4L2_PIX_FMT_AYUV32 v4l2_fourcc('A', 'Y', 'U', 'V') /* 32 AYUV-8-8-8-8 */
#define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */
#define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */
#define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */
#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */
#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */
#define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */
#define V4L2_PIX_FMT_YUV48_12 v4l2_fourcc('Y', '3', '1', '2') /* 48 YUV 4:4:4 12-bit per component */
/*
* YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs
* of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs.
*/
#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */
#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */
#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */
/* two planes -- one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */
#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */
/* three planes - Y Cb, Cr */
#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
#define V4L2_PIX_FMT_YVU410 v4l2_fourcc('Y', 'V', 'U', '9') /* 9 YVU 4:1:0 */
#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 12 YVU411 planar */
#define V4L2_PIX_FMT_YUV420 v4l2_fourcc('Y', 'U', '1', '2') /* 12 YUV 4:2:0 */
#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
/* three non contiguous planes - Y, Cb, Cr */
#define V4L2_PIX_FMT_YUV420M v4l2_fourcc('Y', 'M', '1', '2') /* 12 YUV420 planar */
#define V4L2_PIX_FMT_YVU420M v4l2_fourcc('Y', 'M', '2', '1') /* 12 YVU420 planar */
#define V4L2_PIX_FMT_YUV422M v4l2_fourcc('Y', 'M', '1', '6') /* 16 YUV422 planar */
#define V4L2_PIX_FMT_YVU422M v4l2_fourcc('Y', 'M', '6', '1') /* 16 YVU422 planar */
#define V4L2_PIX_FMT_YUV444M v4l2_fourcc('Y', 'M', '2', '4') /* 24 YUV444 planar */
#define V4L2_PIX_FMT_YVU444M v4l2_fourcc('Y', 'M', '4', '2') /* 24 YVU444 planar */
/* Tiled YUV formats */
#define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */
#define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
#define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */
#define V4L2_PIX_FMT_NV15_4L4 v4l2_fourcc('V', 'T', '1', '5') /* 15 Y/CbCr 4:2:0 10-bit 4x4 tiles */
#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */
#define V4L2_PIX_FMT_NV12_8L128 v4l2_fourcc('A', 'T', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
#define V4L2_PIX_FMT_NV12_10BE_8L128 v4l2_fourcc_be('A', 'X', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
/* Tiled YUV formats, non contiguous planes */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */
#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */
#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */
/* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */
#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
/* 10bit raw bayer packed, 5 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR10P v4l2_fourcc('p', 'B', 'A', 'A')
#define V4L2_PIX_FMT_SGBRG10P v4l2_fourcc('p', 'G', 'A', 'A')
#define V4L2_PIX_FMT_SGRBG10P v4l2_fourcc('p', 'g', 'A', 'A')
#define V4L2_PIX_FMT_SRGGB10P v4l2_fourcc('p', 'R', 'A', 'A')
/* 10bit raw bayer a-law compressed to 8 bits */
#define V4L2_PIX_FMT_SBGGR10ALAW8 v4l2_fourcc('a', 'B', 'A', '8')
#define V4L2_PIX_FMT_SGBRG10ALAW8 v4l2_fourcc('a', 'G', 'A', '8')
#define V4L2_PIX_FMT_SGRBG10ALAW8 v4l2_fourcc('a', 'g', 'A', '8')
#define V4L2_PIX_FMT_SRGGB10ALAW8 v4l2_fourcc('a', 'R', 'A', '8')
/* 10bit raw bayer DPCM compressed to 8 bits */
#define V4L2_PIX_FMT_SBGGR10DPCM8 v4l2_fourcc('b', 'B', 'A', '8')
#define V4L2_PIX_FMT_SGBRG10DPCM8 v4l2_fourcc('b', 'G', 'A', '8')
#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
#define V4L2_PIX_FMT_SRGGB10DPCM8 v4l2_fourcc('b', 'R', 'A', '8')
#define V4L2_PIX_FMT_SBGGR12 v4l2_fourcc('B', 'G', '1', '2') /* 12 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
/* 12bit raw bayer packed, 6 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C')
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */
/* 14bit raw bayer packed, 7 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB16 v4l2_fourcc('R', 'G', '1', '6') /* 16 RGRG.. GBGB.. */
/* HSV formats */
#define V4L2_PIX_FMT_HSV24 v4l2_fourcc('H', 'S', 'V', '3')
#define V4L2_PIX_FMT_HSV32 v4l2_fourcc('H', 'S', 'V', '4')
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */
#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */
#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */
#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */
#define V4L2_PIX_FMT_H264_NO_SC v4l2_fourcc('A', 'V', 'C', '1') /* H264 without start codes */
#define V4L2_PIX_FMT_H264_MVC v4l2_fourcc('M', '2', '6', '4') /* H264 MVC */
#define V4L2_PIX_FMT_H263 v4l2_fourcc('H', '2', '6', '3') /* H263 */
#define V4L2_PIX_FMT_MPEG1 v4l2_fourcc('M', 'P', 'G', '1') /* MPEG-1 ES */
#define V4L2_PIX_FMT_MPEG2 v4l2_fourcc('M', 'P', 'G', '2') /* MPEG-2 ES */
#define V4L2_PIX_FMT_MPEG2_SLICE v4l2_fourcc('M', 'G', '2', 'S') /* MPEG-2 parsed slice data */
#define V4L2_PIX_FMT_MPEG4 v4l2_fourcc('M', 'P', 'G', '4') /* MPEG-4 part 2 ES */
#define V4L2_PIX_FMT_XVID v4l2_fourcc('X', 'V', 'I', 'D') /* Xvid */
#define V4L2_PIX_FMT_VC1_ANNEX_G v4l2_fourcc('V', 'C', '1', 'G') /* SMPTE 421M Annex G compliant stream */
#define V4L2_PIX_FMT_VC1_ANNEX_L v4l2_fourcc('V', 'C', '1', 'L') /* SMPTE 421M Annex L compliant stream */
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') /* VP8 parsed frame */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') /* VP9 parsed frame */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */
#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
#define V4L2_PIX_FMT_AV1_FRAME v4l2_fourcc('A', 'V', '1', 'F') /* AV1 parsed frame */
#define V4L2_PIX_FMT_SPK v4l2_fourcc('S', 'P', 'K', '0') /* Sorenson Spark */
#define V4L2_PIX_FMT_RV30 v4l2_fourcc('R', 'V', '3', '0') /* RealVideo 8 */
#define V4L2_PIX_FMT_RV40 v4l2_fourcc('R', 'V', '4', '0') /* RealVideo 9 & 10 */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
#define V4L2_PIX_FMT_WNVA v4l2_fourcc('W', 'N', 'V', 'A') /* Winnov hw compress */
#define V4L2_PIX_FMT_SN9C10X v4l2_fourcc('S', '9', '1', '0') /* SN9C10x compression */
#define V4L2_PIX_FMT_SN9C20X_I420 v4l2_fourcc('S', '9', '2', '0') /* SN9C20x YUV 4:2:0 */
#define V4L2_PIX_FMT_PWC1 v4l2_fourcc('P', 'W', 'C', '1') /* pwc older webcam */
#define V4L2_PIX_FMT_PWC2 v4l2_fourcc('P', 'W', 'C', '2') /* pwc newer webcam */
#define V4L2_PIX_FMT_ET61X251 v4l2_fourcc('E', '6', '2', '5') /* ET61X251 compression */
#define V4L2_PIX_FMT_SPCA501 v4l2_fourcc('S', '5', '0', '1') /* YUYV per line */
#define V4L2_PIX_FMT_SPCA505 v4l2_fourcc('S', '5', '0', '5') /* YYUV per line */
#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
#define V4L2_PIX_FMT_JL2005BCD v4l2_fourcc('J', 'L', '2', '0') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_SN9C2028 v4l2_fourcc('S', 'O', 'N', 'X') /* compressed GBRG bayer */
#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
#define V4L2_PIX_FMT_CIT_YYVYUY v4l2_fourcc('C', 'I', 'T', 'V') /* one line of Y then 1 line of VYUY */
#define V4L2_PIX_FMT_KONICA420 v4l2_fourcc('K', 'O', 'N', 'I') /* YUV420 planar in blocks of 256 pixels */
#define V4L2_PIX_FMT_JPGL v4l2_fourcc('J', 'P', 'G', 'L') /* JPEG-Lite */
#define V4L2_PIX_FMT_SE401 v4l2_fourcc('S', '4', '0', '1') /* se401 janggu compressed rgb */
#define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */
#define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
#define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
#define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
#define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
/* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
#define V4L2_PIX_FMT_IPU3_SGBRG10 v4l2_fourcc('i', 'p', '3', 'g') /* IPU3 packed 10-bit GBRG bayer */
#define V4L2_PIX_FMT_IPU3_SGRBG10 v4l2_fourcc('i', 'p', '3', 'G') /* IPU3 packed 10-bit GRBG bayer */
#define V4L2_PIX_FMT_IPU3_SRGGB10 v4l2_fourcc('i', 'p', '3', 'r') /* IPU3 packed 10-bit RGGB bayer */
/* Raspberry Pi PiSP compressed formats. */
#define V4L2_PIX_FMT_PISP_COMP1_RGGB v4l2_fourcc('P', 'C', '1', 'R') /* PiSP 8-bit mode 1 compressed RGGB bayer */
#define V4L2_PIX_FMT_PISP_COMP1_GRBG v4l2_fourcc('P', 'C', '1', 'G') /* PiSP 8-bit mode 1 compressed GRBG bayer */
#define V4L2_PIX_FMT_PISP_COMP1_GBRG v4l2_fourcc('P', 'C', '1', 'g') /* PiSP 8-bit mode 1 compressed GBRG bayer */
#define V4L2_PIX_FMT_PISP_COMP1_BGGR v4l2_fourcc('P', 'C', '1', 'B') /* PiSP 8-bit mode 1 compressed BGGR bayer */
#define V4L2_PIX_FMT_PISP_COMP1_MONO v4l2_fourcc('P', 'C', '1', 'M') /* PiSP 8-bit mode 1 compressed monochrome */
#define V4L2_PIX_FMT_PISP_COMP2_RGGB v4l2_fourcc('P', 'C', '2', 'R') /* PiSP 8-bit mode 2 compressed RGGB bayer */
#define V4L2_PIX_FMT_PISP_COMP2_GRBG v4l2_fourcc('P', 'C', '2', 'G') /* PiSP 8-bit mode 2 compressed GRBG bayer */
#define V4L2_PIX_FMT_PISP_COMP2_GBRG v4l2_fourcc('P', 'C', '2', 'g') /* PiSP 8-bit mode 2 compressed GBRG bayer */
#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */
#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
#define V4L2_SDR_FMT_CS8 v4l2_fourcc('C', 'S', '0', '8') /* complex s8 */
#define V4L2_SDR_FMT_CS14LE v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
#define V4L2_SDR_FMT_RU12LE v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
#define V4L2_SDR_FMT_PCU16BE v4l2_fourcc('P', 'C', '1', '6') /* planar complex u16be */
#define V4L2_SDR_FMT_PCU18BE v4l2_fourcc('P', 'C', '1', '8') /* planar complex u18be */
#define V4L2_SDR_FMT_PCU20BE v4l2_fourcc('P', 'C', '2', '0') /* planar complex u20be */
/* Touch formats - used for Touch devices */
#define V4L2_TCH_FMT_DELTA_TD16 v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */
#define V4L2_TCH_FMT_DELTA_TD08 v4l2_fourcc('T', 'D', '0', '8') /* 8-bit signed deltas */
#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
/* Meta-data formats */
#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
#define V4L2_META_FMT_SENSOR_DATA v4l2_fourcc('S', 'E', 'N', 'S') /* Sensor Ancillary metadata */
#define V4L2_META_FMT_BCM2835_ISP_STATS v4l2_fourcc('B', 'S', 'T', 'A') /* BCM2835 ISP image statistics output */
/* Vendor specific - used for RK_ISP1 camera sub-system */
#define V4L2_META_FMT_RK_ISP1_PARAMS v4l2_fourcc('R', 'K', '1', 'P') /* Rockchip ISP1 3A Parameters */
#define V4L2_META_FMT_RK_ISP1_STAT_3A v4l2_fourcc('R', 'K', '1', 'S') /* Rockchip ISP1 3A Statistics */
/* The metadata format identifier for BE configuration buffers. */
#define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C')
/* The metadata format identifier for FE configuration buffers. */
#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C')
/* The metadata format identifier for FE stats buffers. */
#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S')
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
/* Flags */
#define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
#define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002
/*
* F O R M A T E N U M E R A T I O N
*/
struct v4l2_fmtdesc {
__u32 index; /* Format number */
__u32 type; /* enum v4l2_buf_type */
__u32 flags;
__u8 description[32]; /* Description string */
__u32 pixelformat; /* Format fourcc */
__u32 mbus_code; /* Media bus code */
__u32 reserved[3];
};
#define V4L2_FMT_FLAG_COMPRESSED 0x0001
#define V4L2_FMT_FLAG_EMULATED 0x0002
#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004
#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008
#define V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL 0x0010
#define V4L2_FMT_FLAG_CSC_COLORSPACE 0x0020
#define V4L2_FMT_FLAG_CSC_XFER_FUNC 0x0040
#define V4L2_FMT_FLAG_CSC_YCBCR_ENC 0x0080
#define V4L2_FMT_FLAG_CSC_HSV_ENC V4L2_FMT_FLAG_CSC_YCBCR_ENC
#define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100
#define V4L2_FMT_FLAG_META_LINE_BASED 0x0200
/* Frame Size and frame rate enumeration */
/*
* F R A M E S I Z E E N U M E R A T I O N
*/
enum v4l2_frmsizetypes {
V4L2_FRMSIZE_TYPE_DISCRETE = 1,
V4L2_FRMSIZE_TYPE_CONTINUOUS = 2,
V4L2_FRMSIZE_TYPE_STEPWISE = 3,
};
struct v4l2_frmsize_discrete {
__u32 width; /* Frame width [pixel] */
__u32 height; /* Frame height [pixel] */
};
struct v4l2_frmsize_stepwise {
__u32 min_width; /* Minimum frame width [pixel] */
__u32 max_width; /* Maximum frame width [pixel] */
__u32 step_width; /* Frame width step size [pixel] */
__u32 min_height; /* Minimum frame height [pixel] */
__u32 max_height; /* Maximum frame height [pixel] */
__u32 step_height; /* Frame height step size [pixel] */
};
struct v4l2_frmsizeenum {
__u32 index; /* Frame size number */
__u32 pixel_format; /* Pixel format */
__u32 type; /* Frame size type the device supports. */
union { /* Frame size */
struct v4l2_frmsize_discrete discrete;
struct v4l2_frmsize_stepwise stepwise;
};
__u32 reserved[2]; /* Reserved space for future use */
};
/*
* F R A M E R A T E E N U M E R A T I O N
*/
enum v4l2_frmivaltypes {
V4L2_FRMIVAL_TYPE_DISCRETE = 1,
V4L2_FRMIVAL_TYPE_CONTINUOUS = 2,
V4L2_FRMIVAL_TYPE_STEPWISE = 3,
};
struct v4l2_frmival_stepwise {
struct v4l2_fract min; /* Minimum frame interval [s] */
struct v4l2_fract max; /* Maximum frame interval [s] */
struct v4l2_fract step; /* Frame interval step size [s] */
};
struct v4l2_frmivalenum {
__u32 index; /* Frame format index */
__u32 pixel_format; /* Pixel format */
__u32 width; /* Frame width */
__u32 height; /* Frame height */
__u32 type; /* Frame interval type the device supports. */
union { /* Frame interval */
struct v4l2_fract discrete;
struct v4l2_frmival_stepwise stepwise;
};
__u32 reserved[2]; /* Reserved space for future use */
};
/*
* T I M E C O D E
*/
struct v4l2_timecode {
__u32 type;
__u32 flags;
__u8 frames;
__u8 seconds;
__u8 minutes;
__u8 hours;
__u8 userbits[4];
};
/* Type */
#define V4L2_TC_TYPE_24FPS 1
#define V4L2_TC_TYPE_25FPS 2
#define V4L2_TC_TYPE_30FPS 3
#define V4L2_TC_TYPE_50FPS 4
#define V4L2_TC_TYPE_60FPS 5
/* Flags */
#define V4L2_TC_FLAG_DROPFRAME 0x0001 /* "drop-frame" mode */
#define V4L2_TC_FLAG_COLORFRAME 0x0002
#define V4L2_TC_USERBITS_field 0x000C
#define V4L2_TC_USERBITS_USERDEFINED 0x0000
#define V4L2_TC_USERBITS_8BITCHARS 0x0008
/* The above is based on SMPTE timecodes */
struct v4l2_jpegcompression {
int quality;
int APPn; /* Number of APP segment to be written,
* must be 0..15 */
int APP_len; /* Length of data in JPEG APPn segment */
char APP_data[60]; /* Data in the JPEG APPn segment. */
int COM_len; /* Length of data in JPEG COM segment */
char COM_data[60]; /* Data in JPEG COM segment */
__u32 jpeg_markers; /* Which markers should go into the JPEG
* output. Unless you exactly know what
* you do, leave them untouched.
* Including less markers will make the
* resulting code smaller, but there will
* be fewer applications which can read it.
* The presence of the APP and COM marker
* is influenced by APP_len and COM_len
* ONLY, not by this property! */
#define V4L2_JPEG_MARKER_DHT (1<<3) /* Define Huffman Tables */
#define V4L2_JPEG_MARKER_DQT (1<<4) /* Define Quantization Tables */
#define V4L2_JPEG_MARKER_DRI (1<<5) /* Define Restart Interval */
#define V4L2_JPEG_MARKER_COM (1<<6) /* Comment segment */
#define V4L2_JPEG_MARKER_APP (1<<7) /* App segment, driver will
* always use APP0 */
};
/*
* M E M O R Y - M A P P I N G B U F F E R S
*/
struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
__u32 memory; /* enum v4l2_memory */
__u32 capabilities;
__u8 flags;
__u8 reserved[3];
};
#define V4L2_MEMORY_FLAG_NON_COHERENT (1 << 0)
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
#define V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS (1 << 6)
#define V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS (1 << 7)
#define V4L2_BUF_CAP_SUPPORTS_REMOVE_BUFS (1 << 8)
/**
* struct v4l2_plane - plane info for multi-planar buffers
* @bytesused: number of bytes occupied by data in the plane (payload)
* @length: size of this plane (NOT the payload) in bytes
* @m.mem_offset: when memory in the associated struct v4l2_buffer is
* V4L2_MEMORY_MMAP, equals the offset from the start of
* the device memory for this plane (or is a "cookie" that
* should be passed to mmap() called on the video node)
* @m.userptr: when memory is V4L2_MEMORY_USERPTR, a userspace pointer
* pointing to this plane
* @m.fd: when memory is V4L2_MEMORY_DMABUF, a userspace file
* descriptor associated with this plane
* @m: union of @mem_offset, @userptr and @fd
* @data_offset: offset in the plane to the start of data; usually 0,
* unless there is a header in front of the data
* @reserved: drivers and applications must zero this array
*
* Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
* with two planes can have one plane for Y, and another for interleaved CbCr
* components. Each plane can reside in a separate memory buffer, or even in
* a completely separate memory node (e.g. in embedded devices).
*/
struct v4l2_plane {
__u32 bytesused;
__u32 length;
union {
__u32 mem_offset;
unsigned long userptr;
__s32 fd;
} m;
__u32 data_offset;
__u32 reserved[11];
};
/**
* struct v4l2_buffer - video buffer info
* @index: id number of the buffer
* @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
* multiplanar buffers);
* @bytesused: number of bytes occupied by data in the buffer (payload);
* unused (set to 0) for multiplanar buffers
* @flags: buffer informational flags
* @field: enum v4l2_field; field order of the image in the buffer
* @timestamp: frame timestamp
* @timecode: frame timecode
* @sequence: sequence count of this frame
* @memory: enum v4l2_memory; the method, in which the actual video data is
* passed
* @m.offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
* offset from the start of the device memory for this plane,
* (or a "cookie" that should be passed to mmap() as offset)
* @m.userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
* a userspace pointer pointing to this buffer
* @m.fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
* a userspace file descriptor associated with this buffer
* @m.planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
* @m: union of @offset, @userptr, @planes and @fd
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
* @reserved2: drivers and applications must zero this field
* @request_fd: fd of the request that this buffer should use
* @reserved: for backwards compatibility with applications that do not know
* about @request_fd
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
*/
struct v4l2_buffer {
__u32 index;
__u32 type;
__u32 bytesused;
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
__u32 memory;
union {
__u32 offset;
unsigned long userptr;
struct v4l2_plane *planes;
__s32 fd;
} m;
__u32 length;
__u32 reserved2;
union {
__s32 request_fd;
__u32 reserved;
};
};
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
* @tv: pointer to the timeval variable to be converted
*
* Returns the scalar nanosecond representation of the timeval
* parameter.
*/
static __inline__ __u64 v4l2_timeval_to_ns(const struct timeval *tv)
{
return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
}
/* Flags for 'flags' field */
/* Buffer is mapped (flag) */
#define V4L2_BUF_FLAG_MAPPED 0x00000001
/* Buffer is queued for processing */
#define V4L2_BUF_FLAG_QUEUED 0x00000002
/* Buffer is ready */
#define V4L2_BUF_FLAG_DONE 0x00000004
/* Image is a keyframe (I-frame) */
#define V4L2_BUF_FLAG_KEYFRAME 0x00000008
/* Image is a P-frame */
#define V4L2_BUF_FLAG_PFRAME 0x00000010
/* Image is a B-frame */
#define V4L2_BUF_FLAG_BFRAME 0x00000020
/* Buffer is ready, but the data contained within is corrupted. */
#define V4L2_BUF_FLAG_ERROR 0x00000040
/* Buffer is added to an unqueued request */
#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
/* Don't return the capture buffer until OUTPUT timestamp changes */
#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200
/* Buffer is prepared for queuing */
#define V4L2_BUF_FLAG_PREPARED 0x00000400
/* Cache handling flags */
#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x00000800
#define V4L2_BUF_FLAG_NO_CACHE_CLEAN 0x00001000
/* Timestamp type */
#define V4L2_BUF_FLAG_TIMESTAMP_MASK 0x0000e000
#define V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN 0x00000000
#define V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 0x00002000
#define V4L2_BUF_FLAG_TIMESTAMP_COPY 0x00004000
/* Timestamp sources. */
#define V4L2_BUF_FLAG_TSTAMP_SRC_MASK 0x00070000
#define V4L2_BUF_FLAG_TSTAMP_SRC_EOF 0x00000000
#define V4L2_BUF_FLAG_TSTAMP_SRC_SOE 0x00010000
/* mem2mem encoder/decoder */
#define V4L2_BUF_FLAG_LAST 0x00100000
/* request_fd is valid */
#define V4L2_BUF_FLAG_REQUEST_FD 0x00800000
/**
* struct v4l2_exportbuffer - export of video buffer as DMABUF file descriptor
*
* @index: id number of the buffer
* @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for
* multiplanar buffers);
* @plane: index of the plane to be exported, 0 for single plane queues
* @flags: flags for newly created file, currently only O_CLOEXEC is
* supported, refer to manual of open syscall for more details
* @fd: file descriptor associated with DMABUF (set by driver)
* @reserved: drivers and applications must zero this array
*
* Contains data used for exporting a video buffer as DMABUF file descriptor.
* The buffer is identified by a 'cookie' returned by VIDIOC_QUERYBUF
* (identical to the cookie used to mmap() the buffer to userspace). All
* reserved fields must be set to zero. The field reserved0 is expected to
* become a structure 'type' allowing an alternative layout of the structure
* content. Therefore this field should not be used for any other extensions.
*/
struct v4l2_exportbuffer {
__u32 type; /* enum v4l2_buf_type */
__u32 index;
__u32 plane;
__u32 flags;
__s32 fd;
__u32 reserved[11];
};
/*
* O V E R L A Y P R E V I E W
*/
struct v4l2_framebuffer {
__u32 capability;
__u32 flags;
/* FIXME: in theory we should pass something like PCI device + memory
* region + offset instead of some physical address */
void *base;
struct {
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field; /* enum v4l2_field */
__u32 bytesperline; /* for padding, zero if unused */
__u32 sizeimage;
__u32 colorspace; /* enum v4l2_colorspace */
__u32 priv; /* reserved field, set to 0 */
} fmt;
};
/* Flags for the 'capability' field. Read only */
#define V4L2_FBUF_CAP_EXTERNOVERLAY 0x0001
#define V4L2_FBUF_CAP_CHROMAKEY 0x0002
#define V4L2_FBUF_CAP_LIST_CLIPPING 0x0004
#define V4L2_FBUF_CAP_BITMAP_CLIPPING 0x0008
#define V4L2_FBUF_CAP_LOCAL_ALPHA 0x0010
#define V4L2_FBUF_CAP_GLOBAL_ALPHA 0x0020
#define V4L2_FBUF_CAP_LOCAL_INV_ALPHA 0x0040
#define V4L2_FBUF_CAP_SRC_CHROMAKEY 0x0080
/* Flags for the 'flags' field. */
#define V4L2_FBUF_FLAG_PRIMARY 0x0001
#define V4L2_FBUF_FLAG_OVERLAY 0x0002
#define V4L2_FBUF_FLAG_CHROMAKEY 0x0004
#define V4L2_FBUF_FLAG_LOCAL_ALPHA 0x0008
#define V4L2_FBUF_FLAG_GLOBAL_ALPHA 0x0010
#define V4L2_FBUF_FLAG_LOCAL_INV_ALPHA 0x0020
#define V4L2_FBUF_FLAG_SRC_CHROMAKEY 0x0040
struct v4l2_clip {
struct v4l2_rect c;
struct v4l2_clip *next;
};
struct v4l2_window {
struct v4l2_rect w;
__u32 field; /* enum v4l2_field */
__u32 chromakey;
struct v4l2_clip *clips;
__u32 clipcount;
void *bitmap;
__u8 global_alpha;
};
/*
* C A P T U R E P A R A M E T E R S
*/
struct v4l2_captureparm {
__u32 capability; /* Supported modes */
__u32 capturemode; /* Current mode */
struct v4l2_fract timeperframe; /* Time per frame in seconds */
__u32 extendedmode; /* Driver-specific extensions */
__u32 readbuffers; /* # of buffers for read */
__u32 reserved[4];
};
/* Flags for 'capability' and 'capturemode' fields */
#define V4L2_MODE_HIGHQUALITY 0x0001 /* High quality imaging mode */
#define V4L2_CAP_TIMEPERFRAME 0x1000 /* timeperframe field is supported */
struct v4l2_outputparm {
__u32 capability; /* Supported modes */
__u32 outputmode; /* Current mode */
struct v4l2_fract timeperframe; /* Time per frame in seconds */
__u32 extendedmode; /* Driver-specific extensions */
__u32 writebuffers; /* # of buffers for write */
__u32 reserved[4];
};
/*
* I N P U T I M A G E C R O P P I N G
*/
struct v4l2_cropcap {
__u32 type; /* enum v4l2_buf_type */
struct v4l2_rect bounds;
struct v4l2_rect defrect;
struct v4l2_fract pixelaspect;
};
struct v4l2_crop {
__u32 type; /* enum v4l2_buf_type */
struct v4l2_rect c;
};
/**
* struct v4l2_selection - selection info
* @type: buffer type (do not use *_MPLANE types)
* @target: Selection target, used to choose one of possible rectangles;
* defined in v4l2-common.h; V4L2_SEL_TGT_* .
* @flags: constraints flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of selection window
* @reserved: for future use, rounds structure size to 64 bytes, set to zero
*
* Hardware may use multiple helper windows to process a video stream.
* The structure is used to exchange this selection areas between
* an application and a driver.
*/
struct v4l2_selection {
__u32 type;
__u32 target;
__u32 flags;
struct v4l2_rect r;
__u32 reserved[9];
};
/*
* A N A L O G V I D E O S T A N D A R D
*/
typedef __u64 v4l2_std_id;
/*
* Attention: Keep the V4L2_STD_* bit definitions in sync with
* include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions.
*/
/* one bit for each */
#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
#define V4L2_STD_PAL_G ((v4l2_std_id)0x00000004)
#define V4L2_STD_PAL_H ((v4l2_std_id)0x00000008)
#define V4L2_STD_PAL_I ((v4l2_std_id)0x00000010)
#define V4L2_STD_PAL_D ((v4l2_std_id)0x00000020)
#define V4L2_STD_PAL_D1 ((v4l2_std_id)0x00000040)
#define V4L2_STD_PAL_K ((v4l2_std_id)0x00000080)
#define V4L2_STD_PAL_M ((v4l2_std_id)0x00000100)
#define V4L2_STD_PAL_N ((v4l2_std_id)0x00000200)
#define V4L2_STD_PAL_Nc ((v4l2_std_id)0x00000400)
#define V4L2_STD_PAL_60 ((v4l2_std_id)0x00000800)
#define V4L2_STD_NTSC_M ((v4l2_std_id)0x00001000) /* BTSC */
#define V4L2_STD_NTSC_M_JP ((v4l2_std_id)0x00002000) /* EIA-J */
#define V4L2_STD_NTSC_443 ((v4l2_std_id)0x00004000)
#define V4L2_STD_NTSC_M_KR ((v4l2_std_id)0x00008000) /* FM A2 */
#define V4L2_STD_SECAM_B ((v4l2_std_id)0x00010000)
#define V4L2_STD_SECAM_D ((v4l2_std_id)0x00020000)
#define V4L2_STD_SECAM_G ((v4l2_std_id)0x00040000)
#define V4L2_STD_SECAM_H ((v4l2_std_id)0x00080000)
#define V4L2_STD_SECAM_K ((v4l2_std_id)0x00100000)
#define V4L2_STD_SECAM_K1 ((v4l2_std_id)0x00200000)
#define V4L2_STD_SECAM_L ((v4l2_std_id)0x00400000)
#define V4L2_STD_SECAM_LC ((v4l2_std_id)0x00800000)
/* ATSC/HDTV */
#define V4L2_STD_ATSC_8_VSB ((v4l2_std_id)0x01000000)
#define V4L2_STD_ATSC_16_VSB ((v4l2_std_id)0x02000000)
/* FIXME:
Although std_id is 64 bits, there is an issue on PPC32 architecture that
makes switch(__u64) to break. So, there's a hack on v4l2-common.c rounding
this value to 32 bits.
As, currently, the max value is for V4L2_STD_ATSC_16_VSB (30 bits wide),
it should work fine. However, if needed to add more than two standards,
v4l2-common.c should be fixed.
*/
/*
* Some macros to merge video standards in order to make live easier for the
* drivers and V4L2 applications
*/
/*
* "Common" NTSC/M - It should be noticed that V4L2_STD_NTSC_443 is
* Missing here.
*/
#define V4L2_STD_NTSC (V4L2_STD_NTSC_M |\
V4L2_STD_NTSC_M_JP |\
V4L2_STD_NTSC_M_KR)
/* Secam macros */
#define V4L2_STD_SECAM_DK (V4L2_STD_SECAM_D |\
V4L2_STD_SECAM_K |\
V4L2_STD_SECAM_K1)
/* All Secam Standards */
#define V4L2_STD_SECAM (V4L2_STD_SECAM_B |\
V4L2_STD_SECAM_G |\
V4L2_STD_SECAM_H |\
V4L2_STD_SECAM_DK |\
V4L2_STD_SECAM_L |\
V4L2_STD_SECAM_LC)
/* PAL macros */
#define V4L2_STD_PAL_BG (V4L2_STD_PAL_B |\
V4L2_STD_PAL_B1 |\
V4L2_STD_PAL_G)
#define V4L2_STD_PAL_DK (V4L2_STD_PAL_D |\
V4L2_STD_PAL_D1 |\
V4L2_STD_PAL_K)
/*
* "Common" PAL - This macro is there to be compatible with the old
* V4L1 concept of "PAL": /BGDKHI.
* Several PAL standards are missing here: /M, /N and /Nc
*/
#define V4L2_STD_PAL (V4L2_STD_PAL_BG |\
V4L2_STD_PAL_DK |\
V4L2_STD_PAL_H |\
V4L2_STD_PAL_I)
/* Chroma "agnostic" standards */
#define V4L2_STD_B (V4L2_STD_PAL_B |\
V4L2_STD_PAL_B1 |\
V4L2_STD_SECAM_B)
#define V4L2_STD_G (V4L2_STD_PAL_G |\
V4L2_STD_SECAM_G)
#define V4L2_STD_H (V4L2_STD_PAL_H |\
V4L2_STD_SECAM_H)
#define V4L2_STD_L (V4L2_STD_SECAM_L |\
V4L2_STD_SECAM_LC)
#define V4L2_STD_GH (V4L2_STD_G |\
V4L2_STD_H)
#define V4L2_STD_DK (V4L2_STD_PAL_DK |\
V4L2_STD_SECAM_DK)
#define V4L2_STD_BG (V4L2_STD_B |\
V4L2_STD_G)
#define V4L2_STD_MN (V4L2_STD_PAL_M |\
V4L2_STD_PAL_N |\
V4L2_STD_PAL_Nc |\
V4L2_STD_NTSC)
/* Standards where MTS/BTSC stereo could be found */
#define V4L2_STD_MTS (V4L2_STD_NTSC_M |\
V4L2_STD_PAL_M |\
V4L2_STD_PAL_N |\
V4L2_STD_PAL_Nc)
/* Standards for Countries with 60Hz Line frequency */
#define V4L2_STD_525_60 (V4L2_STD_PAL_M |\
V4L2_STD_PAL_60 |\
V4L2_STD_NTSC |\
V4L2_STD_NTSC_443)
/* Standards for Countries with 50Hz Line frequency */
#define V4L2_STD_625_50 (V4L2_STD_PAL |\
V4L2_STD_PAL_N |\
V4L2_STD_PAL_Nc |\
V4L2_STD_SECAM)
#define V4L2_STD_ATSC (V4L2_STD_ATSC_8_VSB |\
V4L2_STD_ATSC_16_VSB)
/* Macros with none and all analog standards */
#define V4L2_STD_UNKNOWN 0
#define V4L2_STD_ALL (V4L2_STD_525_60 |\
V4L2_STD_625_50)
struct v4l2_standard {
__u32 index;
v4l2_std_id id;
__u8 name[24];
struct v4l2_fract frameperiod; /* Frames, not fields */
__u32 framelines;
__u32 reserved[4];
};
/*
* D V B T T I M I N G S
*/
/** struct v4l2_bt_timings - BT.656/BT.1120 timing data
* @width: total width of the active video in pixels
* @height: total height of the active video in lines
* @interlaced: Interlaced or progressive
* @polarities: Positive or negative polarities
* @pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
* @hfrontporch:Horizontal front porch in pixels
* @hsync: Horizontal Sync length in pixels
* @hbackporch: Horizontal back porch in pixels
* @vfrontporch:Vertical front porch in lines
* @vsync: Vertical Sync length in lines
* @vbackporch: Vertical back porch in lines
* @il_vfrontporch:Vertical front porch for the even field
* (aka field 2) of interlaced field formats
* @il_vsync: Vertical Sync length for the even field
* (aka field 2) of interlaced field formats
* @il_vbackporch:Vertical back porch for the even field
* (aka field 2) of interlaced field formats
* @standards: Standards the timing belongs to
* @flags: Flags
* @picture_aspect: The picture aspect ratio (hor/vert).
* @cea861_vic: VIC code as per the CEA-861 standard.
* @hdmi_vic: VIC code as per the HDMI standard.
* @reserved: Reserved fields, must be zeroed.
*
* A note regarding vertical interlaced timings: height refers to the total
* height of the active video frame (= two fields). The blanking timings refer
* to the blanking of each field. So the height of the total frame is
* calculated as follows:
*
* tot_height = height + vfrontporch + vsync + vbackporch +
* il_vfrontporch + il_vsync + il_vbackporch
*
* The active height of each field is height / 2.
*/
struct v4l2_bt_timings {
__u32 width;
__u32 height;
__u32 interlaced;
__u32 polarities;
__u64 pixelclock;
__u32 hfrontporch;
__u32 hsync;
__u32 hbackporch;
__u32 vfrontporch;
__u32 vsync;
__u32 vbackporch;
__u32 il_vfrontporch;
__u32 il_vsync;
__u32 il_vbackporch;
__u32 standards;
__u32 flags;
struct v4l2_fract picture_aspect;
__u8 cea861_vic;
__u8 hdmi_vic;
__u8 reserved[46];
} __attribute__ ((packed));
/* Interlaced or progressive format */
#define V4L2_DV_PROGRESSIVE 0
#define V4L2_DV_INTERLACED 1
/* Polarities. If bit is not set, it is assumed to be negative polarity */
#define V4L2_DV_VSYNC_POS_POL 0x00000001
#define V4L2_DV_HSYNC_POS_POL 0x00000002
/* Timings standards */
#define V4L2_DV_BT_STD_CEA861 (1 << 0) /* CEA-861 Digital TV Profile */
#define V4L2_DV_BT_STD_DMT (1 << 1) /* VESA Discrete Monitor Timings */
#define V4L2_DV_BT_STD_CVT (1 << 2) /* VESA Coordinated Video Timings */
#define V4L2_DV_BT_STD_GTF (1 << 3) /* VESA Generalized Timings Formula */
#define V4L2_DV_BT_STD_SDI (1 << 4) /* SDI Timings */
/* Flags */
/*
* CVT/GTF specific: timing uses reduced blanking (CVT) or the 'Secondary
* GTF' curve (GTF). In both cases the horizontal and/or vertical blanking
* intervals are reduced, allowing a higher resolution over the same
* bandwidth. This is a read-only flag.
*/
#define V4L2_DV_FL_REDUCED_BLANKING (1 << 0)
/*
* CEA-861 specific: set for CEA-861 formats with a framerate of a multiple
* of six. These formats can be optionally played at 1 / 1.001 speed.
* This is a read-only flag.
*/
#define V4L2_DV_FL_CAN_REDUCE_FPS (1 << 1)
/*
* CEA-861 specific: only valid for video transmitters, the flag is cleared
* by receivers.
* If the framerate of the format is a multiple of six, then the pixelclock
* used to set up the transmitter is divided by 1.001 to make it compatible
* with 60 Hz based standards such as NTSC and PAL-M that use a framerate of
* 29.97 Hz. Otherwise this flag is cleared. If the transmitter can't generate
* such frequencies, then the flag will also be cleared.
*/
#define V4L2_DV_FL_REDUCED_FPS (1 << 2)
/*
* Specific to interlaced formats: if set, then field 1 is really one half-line
* longer and field 2 is really one half-line shorter, so each field has
* exactly the same number of half-lines. Whether half-lines can be detected
* or used depends on the hardware.
*/
#define V4L2_DV_FL_HALF_LINE (1 << 3)
/*
* If set, then this is a Consumer Electronics (CE) video format. Such formats
* differ from other formats (commonly called IT formats) in that if RGB
* encoding is used then by default the RGB values use limited range (i.e.
* use the range 16-235) as opposed to 0-255. All formats defined in CEA-861
* except for the 640x480 format are CE formats.
*/
#define V4L2_DV_FL_IS_CE_VIDEO (1 << 4)
/* Some formats like SMPTE-125M have an interlaced signal with a odd
* total height. For these formats, if this flag is set, the first
* field has the extra line. If not, it is the second field.
*/
#define V4L2_DV_FL_FIRST_FIELD_EXTRA_LINE (1 << 5)
/*
* If set, then the picture_aspect field is valid. Otherwise assume that the
* pixels are square, so the picture aspect ratio is the same as the width to
* height ratio.
*/
#define V4L2_DV_FL_HAS_PICTURE_ASPECT (1 << 6)
/*
* If set, then the cea861_vic field is valid and contains the Video
* Identification Code as per the CEA-861 standard.
*/
#define V4L2_DV_FL_HAS_CEA861_VIC (1 << 7)
/*
* If set, then the hdmi_vic field is valid and contains the Video
* Identification Code as per the HDMI standard (HDMI Vendor Specific
* InfoFrame).
*/
#define V4L2_DV_FL_HAS_HDMI_VIC (1 << 8)
/*
* CEA-861 specific: only valid for video receivers.
* If set, then HW can detect the difference between regular FPS and
* 1000/1001 FPS. Note: This flag is only valid for HDMI VIC codes with
* the V4L2_DV_FL_CAN_REDUCE_FPS flag set.
*/
#define V4L2_DV_FL_CAN_DETECT_REDUCED_FPS (1 << 9)
/* A few useful defines to calculate the total blanking and frame sizes */
#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
((bt)->hfrontporch + (bt)->hsync + (bt)->hbackporch)
#define V4L2_DV_BT_FRAME_WIDTH(bt) \
((bt)->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
((bt)->vfrontporch + (bt)->vsync + (bt)->vbackporch + \
((bt)->interlaced ? \
((bt)->il_vfrontporch + (bt)->il_vsync + (bt)->il_vbackporch) : 0))
#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
((bt)->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
/** struct v4l2_dv_timings - DV timings
* @type: the type of the timings
* @bt: BT656/1120 timings
*/
struct v4l2_dv_timings {
__u32 type;
union {
struct v4l2_bt_timings bt;
__u32 reserved[32];
};
} __attribute__ ((packed));
/* Values for the type field */
#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
/** struct v4l2_enum_dv_timings - DV timings enumeration
* @index: enumeration index
* @pad: the pad number for which to enumerate timings (used with
* v4l-subdev nodes only)
* @reserved: must be zeroed
* @timings: the timings for the given index
*/
struct v4l2_enum_dv_timings {
__u32 index;
__u32 pad;
__u32 reserved[2];
struct v4l2_dv_timings timings;
};
/** struct v4l2_bt_timings_cap - BT.656/BT.1120 timing capabilities
* @min_width: width in pixels
* @max_width: width in pixels
* @min_height: height in lines
* @max_height: height in lines
* @min_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
* @max_pixelclock: Pixel clock in HZ. Ex. 74.25MHz->74250000
* @standards: Supported standards
* @capabilities: Supported capabilities
* @reserved: Must be zeroed
*/
struct v4l2_bt_timings_cap {
__u32 min_width;
__u32 max_width;
__u32 min_height;
__u32 max_height;
__u64 min_pixelclock;
__u64 max_pixelclock;
__u32 standards;
__u32 capabilities;
__u32 reserved[16];
} __attribute__ ((packed));
/* Supports interlaced formats */
#define V4L2_DV_BT_CAP_INTERLACED (1 << 0)
/* Supports progressive formats */
#define V4L2_DV_BT_CAP_PROGRESSIVE (1 << 1)
/* Supports CVT/GTF reduced blanking */
#define V4L2_DV_BT_CAP_REDUCED_BLANKING (1 << 2)
/* Supports custom formats */
#define V4L2_DV_BT_CAP_CUSTOM (1 << 3)
/** struct v4l2_dv_timings_cap - DV timings capabilities
* @type: the type of the timings (same as in struct v4l2_dv_timings)
* @pad: the pad number for which to query capabilities (used with
* v4l-subdev nodes only)
* @bt: the BT656/1120 timings capabilities
*/
struct v4l2_dv_timings_cap {
__u32 type;
__u32 pad;
__u32 reserved[2];
union {
struct v4l2_bt_timings_cap bt;
__u32 raw_data[32];
};
};
/*
* V I D E O I N P U T S
*/
struct v4l2_input {
__u32 index; /* Which input */
__u8 name[32]; /* Label */
__u32 type; /* Type of input */
__u32 audioset; /* Associated audios (bitfield) */
__u32 tuner; /* Tuner index */
v4l2_std_id std;
__u32 status;
__u32 capabilities;
__u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_INPUT_TYPE_TUNER 1
#define V4L2_INPUT_TYPE_CAMERA 2
#define V4L2_INPUT_TYPE_TOUCH 3
/* field 'status' - general */
#define V4L2_IN_ST_NO_POWER 0x00000001 /* Attached device is off */
#define V4L2_IN_ST_NO_SIGNAL 0x00000002
#define V4L2_IN_ST_NO_COLOR 0x00000004
/* field 'status' - sensor orientation */
/* If sensor is mounted upside down set both bits */
#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */
#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */
/* field 'status' - analog */
#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
#define V4L2_IN_ST_NO_V_LOCK 0x00000400 /* No vertical sync lock */
#define V4L2_IN_ST_NO_STD_LOCK 0x00000800 /* No standard format lock */
/* field 'status' - digital */
#define V4L2_IN_ST_NO_SYNC 0x00010000 /* No synchronization lock */
#define V4L2_IN_ST_NO_EQU 0x00020000 /* No equalizer lock */
#define V4L2_IN_ST_NO_CARRIER 0x00040000 /* Carrier recovery failed */
/* field 'status' - VCR and set-top box */
#define V4L2_IN_ST_MACROVISION 0x01000000 /* Macrovision detected */
#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
/* capabilities flags */
#define V4L2_IN_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_IN_CAP_CUSTOM_TIMINGS V4L2_IN_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
#define V4L2_IN_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* V I D E O O U T P U T S
*/
struct v4l2_output {
__u32 index; /* Which output */
__u8 name[32]; /* Label */
__u32 type; /* Type of output */
__u32 audioset; /* Associated audios (bitfield) */
__u32 modulator; /* Associated modulator */
v4l2_std_id std;
__u32 capabilities;
__u32 reserved[3];
};
/* Values for the 'type' field */
#define V4L2_OUTPUT_TYPE_MODULATOR 1
#define V4L2_OUTPUT_TYPE_ANALOG 2
#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
/* capabilities flags */
#define V4L2_OUT_CAP_DV_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
#define V4L2_OUT_CAP_CUSTOM_TIMINGS V4L2_OUT_CAP_DV_TIMINGS /* For compatibility */
#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
#define V4L2_OUT_CAP_NATIVE_SIZE 0x00000008 /* Supports setting native size */
/*
* C O N T R O L S
*/
struct v4l2_control {
__u32 id;
__s32 value;
};
struct v4l2_ext_control {
__u32 id;
__u32 size;
__u32 reserved2[1];
union {
__s32 value;
__s64 value64;
char *string;
__u8 *p_u8;
__u16 *p_u16;
__u32 *p_u32;
__s32 *p_s32;
__s64 *p_s64;
struct v4l2_area *p_area;
struct v4l2_ctrl_h264_sps *p_h264_sps;
struct v4l2_ctrl_h264_pps *p_h264_pps;
struct v4l2_ctrl_h264_scaling_matrix *p_h264_scaling_matrix;
struct v4l2_ctrl_h264_pred_weights *p_h264_pred_weights;
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
struct v4l2_ctrl_fwht_params *p_fwht_params;
struct v4l2_ctrl_vp8_frame *p_vp8_frame;
struct v4l2_ctrl_mpeg2_sequence *p_mpeg2_sequence;
struct v4l2_ctrl_mpeg2_picture *p_mpeg2_picture;
struct v4l2_ctrl_mpeg2_quantisation *p_mpeg2_quantisation;
struct v4l2_ctrl_vp9_compressed_hdr *p_vp9_compressed_hdr_probs;
struct v4l2_ctrl_vp9_frame *p_vp9_frame;
struct v4l2_ctrl_hevc_sps *p_hevc_sps;
struct v4l2_ctrl_hevc_pps *p_hevc_pps;
struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
struct v4l2_ctrl_hevc_scaling_matrix *p_hevc_scaling_matrix;
struct v4l2_ctrl_hevc_decode_params *p_hevc_decode_params;
struct v4l2_ctrl_av1_sequence *p_av1_sequence;
struct v4l2_ctrl_av1_tile_group_entry *p_av1_tile_group_entry;
struct v4l2_ctrl_av1_frame *p_av1_frame;
struct v4l2_ctrl_av1_film_grain *p_av1_film_grain;
struct v4l2_ctrl_hdr10_cll_info *p_hdr10_cll_info;
struct v4l2_ctrl_hdr10_mastering_display *p_hdr10_mastering_display;
void *ptr;
} __attribute__ ((packed));
} __attribute__ ((packed));
struct v4l2_ext_controls {
union {
__u32 ctrl_class;
__u32 which;
};
__u32 count;
__u32 error_idx;
__s32 request_fd;
__u32 reserved[1];
struct v4l2_ext_control *controls;
};
#define V4L2_CTRL_ID_MASK (0x0fffffff)
#define V4L2_CTRL_ID2CLASS(id) ((id) & 0x0fff0000UL)
#define V4L2_CTRL_ID2WHICH(id) ((id) & 0x0fff0000UL)
#define V4L2_CTRL_DRIVER_PRIV(id) (((id) & 0xffff) >= 0x1000)
#define V4L2_CTRL_MAX_DIMS (4)
#define V4L2_CTRL_WHICH_CUR_VAL 0
#define V4L2_CTRL_WHICH_DEF_VAL 0x0f000000
#define V4L2_CTRL_WHICH_REQUEST_VAL 0x0f010000
enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_INTEGER = 1,
V4L2_CTRL_TYPE_BOOLEAN = 2,
V4L2_CTRL_TYPE_MENU = 3,
V4L2_CTRL_TYPE_BUTTON = 4,
V4L2_CTRL_TYPE_INTEGER64 = 5,
V4L2_CTRL_TYPE_CTRL_CLASS = 6,
V4L2_CTRL_TYPE_STRING = 7,
V4L2_CTRL_TYPE_BITMASK = 8,
V4L2_CTRL_TYPE_INTEGER_MENU = 9,
/* Compound types are >= 0x0100 */
V4L2_CTRL_COMPOUND_TYPES = 0x0100,
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
V4L2_CTRL_TYPE_AREA = 0x0106,
V4L2_CTRL_TYPE_HDR10_CLL_INFO = 0x0110,
V4L2_CTRL_TYPE_HDR10_MASTERING_DISPLAY = 0x0111,
V4L2_CTRL_TYPE_H264_SPS = 0x0200,
V4L2_CTRL_TYPE_H264_PPS = 0x0201,
V4L2_CTRL_TYPE_H264_SCALING_MATRIX = 0x0202,
V4L2_CTRL_TYPE_H264_SLICE_PARAMS = 0x0203,
V4L2_CTRL_TYPE_H264_DECODE_PARAMS = 0x0204,
V4L2_CTRL_TYPE_H264_PRED_WEIGHTS = 0x0205,
V4L2_CTRL_TYPE_FWHT_PARAMS = 0x0220,
V4L2_CTRL_TYPE_VP8_FRAME = 0x0240,
V4L2_CTRL_TYPE_MPEG2_QUANTISATION = 0x0250,
V4L2_CTRL_TYPE_MPEG2_SEQUENCE = 0x0251,
V4L2_CTRL_TYPE_MPEG2_PICTURE = 0x0252,
V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260,
V4L2_CTRL_TYPE_VP9_FRAME = 0x0261,
V4L2_CTRL_TYPE_HEVC_SPS = 0x0270,
V4L2_CTRL_TYPE_HEVC_PPS = 0x0271,
V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272,
V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273,
V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274,
V4L2_CTRL_TYPE_AV1_SEQUENCE = 0x280,
V4L2_CTRL_TYPE_AV1_TILE_GROUP_ENTRY = 0x281,
V4L2_CTRL_TYPE_AV1_FRAME = 0x282,
V4L2_CTRL_TYPE_AV1_FILM_GRAIN = 0x283,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
struct v4l2_queryctrl {
__u32 id;
__u32 type; /* enum v4l2_ctrl_type */
__u8 name[32]; /* Whatever */
__s32 minimum; /* Note signedness */
__s32 maximum;
__s32 step;
__s32 default_value;
__u32 flags;
__u32 reserved[2];
};
/* Used in the VIDIOC_QUERY_EXT_CTRL ioctl for querying extended controls */
struct v4l2_query_ext_ctrl {
__u32 id;
__u32 type;
char name[32];
__s64 minimum;
__s64 maximum;
__u64 step;
__s64 default_value;
__u32 flags;
__u32 elem_size;
__u32 elems;
__u32 nr_of_dims;
__u32 dims[V4L2_CTRL_MAX_DIMS];
__u32 reserved[32];
};
/* Used in the VIDIOC_QUERYMENU ioctl for querying menu items */
struct v4l2_querymenu {
__u32 id;
__u32 index;
union {
__u8 name[32]; /* Whatever */
__s64 value;
};
__u32 reserved;
} __attribute__ ((packed));
/* Control flags */
#define V4L2_CTRL_FLAG_DISABLED 0x0001
#define V4L2_CTRL_FLAG_GRABBED 0x0002
#define V4L2_CTRL_FLAG_READ_ONLY 0x0004
#define V4L2_CTRL_FLAG_UPDATE 0x0008
#define V4L2_CTRL_FLAG_INACTIVE 0x0010
#define V4L2_CTRL_FLAG_SLIDER 0x0020
#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
#define V4L2_CTRL_FLAG_NEXT_COMPOUND 0x40000000
/* User-class control IDs defined by V4L2 */
#define V4L2_CID_MAX_CTRLS 1024
/* IDs reserved for driver specific controls */
#define V4L2_CID_PRIVATE_BASE 0x08000000
/*
* T U N I N G
*/
struct v4l2_tuner {
__u32 index;
__u8 name[32];
__u32 type; /* enum v4l2_tuner_type */
__u32 capability;
__u32 rangelow;
__u32 rangehigh;
__u32 rxsubchans;
__u32 audmode;
__s32 signal;
__s32 afc;
__u32 reserved[4];
};
struct v4l2_modulator {
__u32 index;
__u8 name[32];
__u32 capability;
__u32 rangelow;
__u32 rangehigh;
__u32 txsubchans;
__u32 type; /* enum v4l2_tuner_type */
__u32 reserved[3];
};
/* Flags for the 'capability' field */
#define V4L2_TUNER_CAP_LOW 0x0001
#define V4L2_TUNER_CAP_NORM 0x0002
#define V4L2_TUNER_CAP_HWSEEK_BOUNDED 0x0004
#define V4L2_TUNER_CAP_HWSEEK_WRAP 0x0008
#define V4L2_TUNER_CAP_STEREO 0x0010
#define V4L2_TUNER_CAP_LANG2 0x0020
#define V4L2_TUNER_CAP_SAP 0x0020
#define V4L2_TUNER_CAP_LANG1 0x0040
#define V4L2_TUNER_CAP_RDS 0x0080
#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
#define V4L2_TUNER_CAP_FREQ_BANDS 0x0400
#define V4L2_TUNER_CAP_HWSEEK_PROG_LIM 0x0800
#define V4L2_TUNER_CAP_1HZ 0x1000
/* Flags for the 'rxsubchans' field */
#define V4L2_TUNER_SUB_MONO 0x0001
#define V4L2_TUNER_SUB_STEREO 0x0002
#define V4L2_TUNER_SUB_LANG2 0x0004
#define V4L2_TUNER_SUB_SAP 0x0004
#define V4L2_TUNER_SUB_LANG1 0x0008
#define V4L2_TUNER_SUB_RDS 0x0010
/* Values for the 'audmode' field */
#define V4L2_TUNER_MODE_MONO 0x0000
#define V4L2_TUNER_MODE_STEREO 0x0001
#define V4L2_TUNER_MODE_LANG2 0x0002
#define V4L2_TUNER_MODE_SAP 0x0002
#define V4L2_TUNER_MODE_LANG1 0x0003
#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004
struct v4l2_frequency {
__u32 tuner;
__u32 type; /* enum v4l2_tuner_type */
__u32 frequency;
__u32 reserved[8];
};
#define V4L2_BAND_MODULATION_VSB (1 << 1)
#define V4L2_BAND_MODULATION_FM (1 << 2)
#define V4L2_BAND_MODULATION_AM (1 << 3)
struct v4l2_frequency_band {
__u32 tuner;
__u32 type; /* enum v4l2_tuner_type */
__u32 index;
__u32 capability;
__u32 rangelow;
__u32 rangehigh;
__u32 modulation;
__u32 reserved[9];
};
struct v4l2_hw_freq_seek {
__u32 tuner;
__u32 type; /* enum v4l2_tuner_type */
__u32 seek_upward;
__u32 wrap_around;
__u32 spacing;
__u32 rangelow;
__u32 rangehigh;
__u32 reserved[5];
};
/*
* R D S
*/
struct v4l2_rds_data {
__u8 lsb;
__u8 msb;
__u8 block;
} __attribute__ ((packed));
#define V4L2_RDS_BLOCK_MSK 0x7
#define V4L2_RDS_BLOCK_A 0
#define V4L2_RDS_BLOCK_B 1
#define V4L2_RDS_BLOCK_C 2
#define V4L2_RDS_BLOCK_D 3
#define V4L2_RDS_BLOCK_C_ALT 4
#define V4L2_RDS_BLOCK_INVALID 7
#define V4L2_RDS_BLOCK_CORRECTED 0x40
#define V4L2_RDS_BLOCK_ERROR 0x80
/*
* A U D I O
*/
struct v4l2_audio {
__u32 index;
__u8 name[32];
__u32 capability;
__u32 mode;
__u32 reserved[2];
};
/* Flags for the 'capability' field */
#define V4L2_AUDCAP_STEREO 0x00001
#define V4L2_AUDCAP_AVL 0x00002
/* Flags for the 'mode' field */
#define V4L2_AUDMODE_AVL 0x00001
struct v4l2_audioout {
__u32 index;
__u8 name[32];
__u32 capability;
__u32 mode;
__u32 reserved[2];
};
/*
* M P E G S E R V I C E S
*/
#if 1
#define V4L2_ENC_IDX_FRAME_I (0)
#define V4L2_ENC_IDX_FRAME_P (1)
#define V4L2_ENC_IDX_FRAME_B (2)
#define V4L2_ENC_IDX_FRAME_MASK (0xf)
struct v4l2_enc_idx_entry {
__u64 offset;
__u64 pts;
__u32 length;
__u32 flags;
__u32 reserved[2];
};
#define V4L2_ENC_IDX_ENTRIES (64)
struct v4l2_enc_idx {
__u32 entries;
__u32 entries_cap;
__u32 reserved[4];
struct v4l2_enc_idx_entry entry[V4L2_ENC_IDX_ENTRIES];
};
#define V4L2_ENC_CMD_START (0)
#define V4L2_ENC_CMD_STOP (1)
#define V4L2_ENC_CMD_PAUSE (2)
#define V4L2_ENC_CMD_RESUME (3)
/* Flags for V4L2_ENC_CMD_STOP */
#define V4L2_ENC_CMD_STOP_AT_GOP_END (1 << 0)
struct v4l2_encoder_cmd {
__u32 cmd;
__u32 flags;
union {
struct {
__u32 data[8];
} raw;
};
};
/* Decoder commands */
#define V4L2_DEC_CMD_START (0)
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
#define V4L2_DEC_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
/* Flags for V4L2_DEC_CMD_PAUSE */
#define V4L2_DEC_CMD_PAUSE_TO_BLACK (1 << 0)
/* Flags for V4L2_DEC_CMD_STOP */
#define V4L2_DEC_CMD_STOP_TO_BLACK (1 << 0)
#define V4L2_DEC_CMD_STOP_IMMEDIATELY (1 << 1)
/* Play format requirements (returned by the driver): */
/* The decoder has no special format requirements */
#define V4L2_DEC_START_FMT_NONE (0)
/* The decoder requires full GOPs */
#define V4L2_DEC_START_FMT_GOP (1)
/* The structure must be zeroed before use by the application
This ensures it can be extended safely in the future. */
struct v4l2_decoder_cmd {
__u32 cmd;
__u32 flags;
union {
struct {
__u64 pts;
} stop;
struct {
/* 0 or 1000 specifies normal speed,
1 specifies forward single stepping,
-1 specifies backward single stepping,
>1: playback at speed/1000 of the normal speed,
<-1: reverse playback at (-speed/1000) of the normal speed. */
__s32 speed;
__u32 format;
} start;
struct {
__u32 data[16];
} raw;
};
};
#endif
/*
* D A T A S E R V I C E S ( V B I )
*
* Data services API by Michael Schimek
*/
/* Raw VBI */
struct v4l2_vbi_format {
__u32 sampling_rate; /* in 1 Hz */
__u32 offset;
__u32 samples_per_line;
__u32 sample_format; /* V4L2_PIX_FMT_* */
__s32 start[2];
__u32 count[2];
__u32 flags; /* V4L2_VBI_* */
__u32 reserved[2]; /* must be zero */
};
/* VBI flags */
#define V4L2_VBI_UNSYNC (1 << 0)
#define V4L2_VBI_INTERLACED (1 << 1)
/* ITU-R start lines for each field */
#define V4L2_VBI_ITU_525_F1_START (1)
#define V4L2_VBI_ITU_525_F2_START (264)
#define V4L2_VBI_ITU_625_F1_START (1)
#define V4L2_VBI_ITU_625_F2_START (314)
/* Sliced VBI
*
* This implements is a proposal V4L2 API to allow SLICED VBI
* required for some hardware encoders. It should change without
* notice in the definitive implementation.
*/
struct v4l2_sliced_vbi_format {
__u16 service_set;
/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
(equals frame lines 313-336 for 625 line video
standards, 263-286 for 525 line standards) */
__u16 service_lines[2][24];
__u32 io_size;
__u32 reserved[2]; /* must be zero */
};
/* Teletext World System Teletext
(WST), defined on ITU-R BT.653-2 */
#define V4L2_SLICED_TELETEXT_B (0x0001)
/* Video Program System, defined on ETS 300 231*/
#define V4L2_SLICED_VPS (0x0400)
/* Closed Caption, defined on EIA-608 */
#define V4L2_SLICED_CAPTION_525 (0x1000)
/* Wide Screen System, defined on ITU-R BT1119.1 */
#define V4L2_SLICED_WSS_625 (0x4000)
#define V4L2_SLICED_VBI_525 (V4L2_SLICED_CAPTION_525)
#define V4L2_SLICED_VBI_625 (V4L2_SLICED_TELETEXT_B | V4L2_SLICED_VPS | V4L2_SLICED_WSS_625)
struct v4l2_sliced_vbi_cap {
__u16 service_set;
/* service_lines[0][...] specifies lines 0-23 (1-23 used) of the first field
service_lines[1][...] specifies lines 0-23 (1-23 used) of the second field
(equals frame lines 313-336 for 625 line video
standards, 263-286 for 525 line standards) */
__u16 service_lines[2][24];
__u32 type; /* enum v4l2_buf_type */
__u32 reserved[3]; /* must be 0 */
};
struct v4l2_sliced_vbi_data {
__u32 id;
__u32 field; /* 0: first field, 1: second field */
__u32 line; /* 1-23 */
__u32 reserved; /* must be 0 */
__u8 data[48];
};
/*
* Sliced VBI data inserted into MPEG Streams
*/
/*
* V4L2_MPEG_STREAM_VBI_FMT_IVTV:
*
* Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an
* MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI
* data
*
* Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header
* definitions are not included here. See the MPEG-2 specifications for details
* on these headers.
*/
/* Line type IDs */
#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1)
#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4)
#define V4L2_MPEG_VBI_IVTV_WSS_625 (5)
#define V4L2_MPEG_VBI_IVTV_VPS (7)
struct v4l2_mpeg_vbi_itv0_line {
__u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */
__u8 data[42]; /* Sliced VBI data for the line */
} __attribute__ ((packed));
struct v4l2_mpeg_vbi_itv0 {
__le32 linemask[2]; /* Bitmasks of VBI service lines present */
struct v4l2_mpeg_vbi_itv0_line line[35];
} __attribute__ ((packed));
struct v4l2_mpeg_vbi_ITV0 {
struct v4l2_mpeg_vbi_itv0_line line[36];
} __attribute__ ((packed));
#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0"
#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0"
struct v4l2_mpeg_vbi_fmt_ivtv {
__u8 magic[4];
union {
struct v4l2_mpeg_vbi_itv0 itv0;
struct v4l2_mpeg_vbi_ITV0 ITV0;
};
} __attribute__ ((packed));
/*
* A G G R E G A T E S T R U C T U R E S
*/
/**
* struct v4l2_plane_pix_format - additional, per-plane format definition
* @sizeimage: maximum size in bytes required for data, for which
* this plane will be used
* @bytesperline: distance in bytes between the leftmost pixels in two
* adjacent lines
* @reserved: drivers and applications must zero this array
*/
struct v4l2_plane_pix_format {
__u32 sizeimage;
__u32 bytesperline;
__u16 reserved[6];
} __attribute__ ((packed));
/**
* struct v4l2_pix_format_mplane - multiplanar format definition
* @width: image width in pixels
* @height: image height in pixels
* @pixelformat: little endian four character code (fourcc)
* @field: enum v4l2_field; field order (for interlaced video)
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @plane_fmt: per-plane information
* @num_planes: number of planes for this format
* @flags: format flags (V4L2_PIX_FMT_FLAG_*)
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
* @hsv_enc: enum v4l2_hsv_encoding, HSV encoding
* @quantization: enum v4l2_quantization, colorspace quantization
* @xfer_func: enum v4l2_xfer_func, colorspace transfer function
* @reserved: drivers and applications must zero this array
*/
struct v4l2_pix_format_mplane {
__u32 width;
__u32 height;
__u32 pixelformat;
__u32 field;
__u32 colorspace;
struct v4l2_plane_pix_format plane_fmt[VIDEO_MAX_PLANES];
__u8 num_planes;
__u8 flags;
union {
__u8 ycbcr_enc;
__u8 hsv_enc;
};
__u8 quantization;
__u8 xfer_func;
__u8 reserved[7];
} __attribute__ ((packed));
/**
* struct v4l2_sdr_format - SDR format definition
* @pixelformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
* @reserved: drivers and applications must zero this array
*/
struct v4l2_sdr_format {
__u32 pixelformat;
__u32 buffersize;
__u8 reserved[24];
} __attribute__ ((packed));
/**
* struct v4l2_meta_format - metadata format definition
* @dataformat: little endian four character code (fourcc)
* @buffersize: maximum size in bytes required for data
* @width: number of data units of data per line (valid for line
* based formats only, see format documentation)
* @height: number of lines of data per buffer (valid for line based
* formats only)
* @bytesperline: offset between the beginnings of two adjacent lines in
* bytes (valid for line based formats only)
*/
struct v4l2_meta_format {
__u32 dataformat;
__u32 buffersize;
__u32 width;
__u32 height;
__u32 bytesperline;
} __attribute__ ((packed));
/**
* struct v4l2_format - stream data format
* @type: enum v4l2_buf_type; type of the data stream
* @fmt.pix: definition of an image format
* @fmt.pix_mp: definition of a multiplanar image format
* @fmt.win: definition of an overlaid image
* @fmt.vbi: raw VBI capture or output parameters
* @fmt.sliced: sliced VBI capture or output parameters
* @fmt.raw_data: placeholder for future extensions and custom formats
* @fmt: union of @pix, @pix_mp, @win, @vbi, @sliced, @sdr,
* @meta and @raw_data
*/
struct v4l2_format {
__u32 type;
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_pix_format_mplane pix_mp; /* V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */
struct v4l2_meta_format meta; /* V4L2_BUF_TYPE_META_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
/* Stream type-dependent parameters
*/
struct v4l2_streamparm {
__u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_captureparm capture;
struct v4l2_outputparm output;
__u8 raw_data[200]; /* user-defined */
} parm;
};
/*
* E V E N T S
*/
#define V4L2_EVENT_ALL 0
#define V4L2_EVENT_VSYNC 1
#define V4L2_EVENT_EOS 2
#define V4L2_EVENT_CTRL 3
#define V4L2_EVENT_FRAME_SYNC 4
#define V4L2_EVENT_SOURCE_CHANGE 5
#define V4L2_EVENT_MOTION_DET 6
#define V4L2_EVENT_PRIVATE_START 0x08000000
/* Payload for V4L2_EVENT_VSYNC */
struct v4l2_event_vsync {
/* Can be V4L2_FIELD_ANY, _NONE, _TOP or _BOTTOM */
__u8 field;
} __attribute__ ((packed));
/* Payload for V4L2_EVENT_CTRL */
#define V4L2_EVENT_CTRL_CH_VALUE (1 << 0)
#define V4L2_EVENT_CTRL_CH_FLAGS (1 << 1)
#define V4L2_EVENT_CTRL_CH_RANGE (1 << 2)
#define V4L2_EVENT_CTRL_CH_DIMENSIONS (1 << 3)
struct v4l2_event_ctrl {
__u32 changes;
__u32 type;
union {
__s32 value;
__s64 value64;
};
__u32 flags;
__s32 minimum;
__s32 maximum;
__s32 step;
__s32 default_value;
};
struct v4l2_event_frame_sync {
__u32 frame_sequence;
};
#define V4L2_EVENT_SRC_CH_RESOLUTION (1 << 0)
struct v4l2_event_src_change {
__u32 changes;
};
#define V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ (1 << 0)
/**
* struct v4l2_event_motion_det - motion detection event
* @flags: if V4L2_EVENT_MD_FL_HAVE_FRAME_SEQ is set, then the
* frame_sequence field is valid.
* @frame_sequence: the frame sequence number associated with this event.
* @region_mask: which regions detected motion.
*/
struct v4l2_event_motion_det {
__u32 flags;
__u32 frame_sequence;
__u32 region_mask;
};
struct v4l2_event {
__u32 type;
union {
struct v4l2_event_vsync vsync;
struct v4l2_event_ctrl ctrl;
struct v4l2_event_frame_sync frame_sync;
struct v4l2_event_src_change src_change;
struct v4l2_event_motion_det motion_det;
__u8 data[64];
} u;
__u32 pending;
__u32 sequence;
struct timespec timestamp;
__u32 id;
__u32 reserved[8];
};
#define V4L2_EVENT_SUB_FL_SEND_INITIAL (1 << 0)
#define V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK (1 << 1)
struct v4l2_event_subscription {
__u32 type;
__u32 id;
__u32 flags;
__u32 reserved[5];
};
/*
* A D V A N C E D D E B U G G I N G
*
* NOTE: EXPERIMENTAL API, NEVER RELY ON THIS IN APPLICATIONS!
* FOR DEBUGGING, TESTING AND INTERNAL USE ONLY!
*/
/* VIDIOC_DBG_G_REGISTER and VIDIOC_DBG_S_REGISTER */
#define V4L2_CHIP_MATCH_BRIDGE 0 /* Match against chip ID on the bridge (0 for the bridge) */
#define V4L2_CHIP_MATCH_SUBDEV 4 /* Match against subdev index */
/* The following four defines are no longer in use */
#define V4L2_CHIP_MATCH_HOST V4L2_CHIP_MATCH_BRIDGE
#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver name */
#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
#define V4L2_CHIP_MATCH_AC97 3 /* Match against ancillary AC97 chip */
struct v4l2_dbg_match {
__u32 type; /* Match type */
union { /* Match this chip, meaning determined by type */
__u32 addr;
char name[32];
};
} __attribute__ ((packed));
struct v4l2_dbg_register {
struct v4l2_dbg_match match;
__u32 size; /* register size in bytes */
__u64 reg;
__u64 val;
} __attribute__ ((packed));
#define V4L2_CHIP_FL_READABLE (1 << 0)
#define V4L2_CHIP_FL_WRITABLE (1 << 1)
/* VIDIOC_DBG_G_CHIP_INFO */
struct v4l2_dbg_chip_info {
struct v4l2_dbg_match match;
char name[32];
__u32 flags;
__u32 reserved[32];
} __attribute__ ((packed));
/**
* struct v4l2_create_buffers - VIDIOC_CREATE_BUFS argument
* @index: on return, index of the first created buffer
* @count: entry: number of requested buffers,
* return: number of created buffers
* @memory: enum v4l2_memory; buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
* @flags: additional buffer management attributes (ignored unless the
* queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability
* and configured for MMAP streaming I/O).
* @max_num_buffers: if V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS capability flag is set
* this field indicate the maximum possible number of buffers
* for this queue.
* @reserved: future extensions
*/
struct v4l2_create_buffers {
__u32 index;
__u32 count;
__u32 memory;
struct v4l2_format format;
__u32 capabilities;
__u32 flags;
__u32 max_num_buffers;
__u32 reserved[5];
};
/**
* struct v4l2_remove_buffers - VIDIOC_REMOVE_BUFS argument
* @index: the first buffer to be removed
* @count: number of buffers to removed
* @type: enum v4l2_buf_type
* @reserved: future extensions
*/
struct v4l2_remove_buffers {
__u32 index;
__u32 count;
__u32 type;
__u32 reserved[13];
};
/*
* I O C T L C O D E S F O R V I D E O D E V I C E S
*
*/
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
#define VIDIOC_REQBUFS _IOWR('V', 8, struct v4l2_requestbuffers)
#define VIDIOC_QUERYBUF _IOWR('V', 9, struct v4l2_buffer)
#define VIDIOC_G_FBUF _IOR('V', 10, struct v4l2_framebuffer)
#define VIDIOC_S_FBUF _IOW('V', 11, struct v4l2_framebuffer)
#define VIDIOC_OVERLAY _IOW('V', 14, int)
#define VIDIOC_QBUF _IOWR('V', 15, struct v4l2_buffer)
#define VIDIOC_EXPBUF _IOWR('V', 16, struct v4l2_exportbuffer)
#define VIDIOC_DQBUF _IOWR('V', 17, struct v4l2_buffer)
#define VIDIOC_STREAMON _IOW('V', 18, int)
#define VIDIOC_STREAMOFF _IOW('V', 19, int)
#define VIDIOC_G_PARM _IOWR('V', 21, struct v4l2_streamparm)
#define VIDIOC_S_PARM _IOWR('V', 22, struct v4l2_streamparm)
#define VIDIOC_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_S_STD _IOW('V', 24, v4l2_std_id)
#define VIDIOC_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_ENUMINPUT _IOWR('V', 26, struct v4l2_input)
#define VIDIOC_G_CTRL _IOWR('V', 27, struct v4l2_control)
#define VIDIOC_S_CTRL _IOWR('V', 28, struct v4l2_control)
#define VIDIOC_G_TUNER _IOWR('V', 29, struct v4l2_tuner)
#define VIDIOC_S_TUNER _IOW('V', 30, struct v4l2_tuner)
#define VIDIOC_G_AUDIO _IOR('V', 33, struct v4l2_audio)
#define VIDIOC_S_AUDIO _IOW('V', 34, struct v4l2_audio)
#define VIDIOC_QUERYCTRL _IOWR('V', 36, struct v4l2_queryctrl)
#define VIDIOC_QUERYMENU _IOWR('V', 37, struct v4l2_querymenu)
#define VIDIOC_G_INPUT _IOR('V', 38, int)
#define VIDIOC_S_INPUT _IOWR('V', 39, int)
#define VIDIOC_G_EDID _IOWR('V', 40, struct v4l2_edid)
#define VIDIOC_S_EDID _IOWR('V', 41, struct v4l2_edid)
#define VIDIOC_G_OUTPUT _IOR('V', 46, int)
#define VIDIOC_S_OUTPUT _IOWR('V', 47, int)
#define VIDIOC_ENUMOUTPUT _IOWR('V', 48, struct v4l2_output)
#define VIDIOC_G_AUDOUT _IOR('V', 49, struct v4l2_audioout)
#define VIDIOC_S_AUDOUT _IOW('V', 50, struct v4l2_audioout)
#define VIDIOC_G_MODULATOR _IOWR('V', 54, struct v4l2_modulator)
#define VIDIOC_S_MODULATOR _IOW('V', 55, struct v4l2_modulator)
#define VIDIOC_G_FREQUENCY _IOWR('V', 56, struct v4l2_frequency)
#define VIDIOC_S_FREQUENCY _IOW('V', 57, struct v4l2_frequency)
#define VIDIOC_CROPCAP _IOWR('V', 58, struct v4l2_cropcap)
#define VIDIOC_G_CROP _IOWR('V', 59, struct v4l2_crop)
#define VIDIOC_S_CROP _IOW('V', 60, struct v4l2_crop)
#define VIDIOC_G_JPEGCOMP _IOR('V', 61, struct v4l2_jpegcompression)
#define VIDIOC_S_JPEGCOMP _IOW('V', 62, struct v4l2_jpegcompression)
#define VIDIOC_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_TRY_FMT _IOWR('V', 64, struct v4l2_format)
#define VIDIOC_ENUMAUDIO _IOWR('V', 65, struct v4l2_audio)
#define VIDIOC_ENUMAUDOUT _IOWR('V', 66, struct v4l2_audioout)
#define VIDIOC_G_PRIORITY _IOR('V', 67, __u32) /* enum v4l2_priority */
#define VIDIOC_S_PRIORITY _IOW('V', 68, __u32) /* enum v4l2_priority */
#define VIDIOC_G_SLICED_VBI_CAP _IOWR('V', 69, struct v4l2_sliced_vbi_cap)
#define VIDIOC_LOG_STATUS _IO('V', 70)
#define VIDIOC_G_EXT_CTRLS _IOWR('V', 71, struct v4l2_ext_controls)
#define VIDIOC_S_EXT_CTRLS _IOWR('V', 72, struct v4l2_ext_controls)
#define VIDIOC_TRY_EXT_CTRLS _IOWR('V', 73, struct v4l2_ext_controls)
#define VIDIOC_ENUM_FRAMESIZES _IOWR('V', 74, struct v4l2_frmsizeenum)
#define VIDIOC_ENUM_FRAMEINTERVALS _IOWR('V', 75, struct v4l2_frmivalenum)
#define VIDIOC_G_ENC_INDEX _IOR('V', 76, struct v4l2_enc_idx)
#define VIDIOC_ENCODER_CMD _IOWR('V', 77, struct v4l2_encoder_cmd)
#define VIDIOC_TRY_ENCODER_CMD _IOWR('V', 78, struct v4l2_encoder_cmd)
/*
* Experimental, meant for debugging, testing and internal use.
* Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
* You must be root to use these ioctls. Never use these in applications!
*/
#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
#define VIDIOC_S_SELECTION _IOWR('V', 95, struct v4l2_selection)
#define VIDIOC_DECODER_CMD _IOWR('V', 96, struct v4l2_decoder_cmd)
#define VIDIOC_TRY_DECODER_CMD _IOWR('V', 97, struct v4l2_decoder_cmd)
#define VIDIOC_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band)
/*
* Experimental, meant for debugging, testing and internal use.
* Never use this in applications!
*/
#define VIDIOC_DBG_G_CHIP_INFO _IOWR('V', 102, struct v4l2_dbg_chip_info)
#define VIDIOC_QUERY_EXT_CTRL _IOWR('V', 103, struct v4l2_query_ext_ctrl)
#define VIDIOC_REMOVE_BUFS _IOWR('V', 104, struct v4l2_remove_buffers)
/* Reminder: when adding new ioctls please add support for them to
drivers/media/v4l2-core/v4l2-compat-ioctl32.c as well! */
#define BASE_VIDIOC_PRIVATE 192 /* 192-255 are private */
/* Deprecated definitions kept for backwards compatibility */
#define V4L2_PIX_FMT_HM12 V4L2_PIX_FMT_NV12_16L16
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 V4L2_PIX_FMT_NV12_32L32
/*
* This capability was never implemented, anyone using this cap should drop it
* from their code.
*/
#define V4L2_CAP_ASYNCIO 0x02000000
#endif /* __LINUX_VIDEODEV2_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/media.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Multimedia device API
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_MEDIA_H
#define __LINUX_MEDIA_H
#include <linux/ioctl.h>
#include <linux/types.h>
struct media_device_info {
char driver[16];
char model[32];
char serial[40];
char bus_info[32];
__u32 media_version;
__u32 hw_revision;
__u32 driver_version;
__u32 reserved[31];
};
/*
* Base number ranges for entity functions
*
* NOTE: Userspace should not rely on these ranges to identify a group
* of function types, as newer functions can be added with any name within
* the full u32 range.
*
* Some older functions use the MEDIA_ENT_F_OLD_*_BASE range. Do not
* change this, this is for backwards compatibility. When adding new
* functions always use MEDIA_ENT_F_BASE.
*/
#define MEDIA_ENT_F_BASE 0x00000000
#define MEDIA_ENT_F_OLD_BASE 0x00010000
#define MEDIA_ENT_F_OLD_SUBDEV_BASE 0x00020000
/*
* Initial value to be used when a new entity is created
* Drivers should change it to something useful.
*/
#define MEDIA_ENT_F_UNKNOWN MEDIA_ENT_F_BASE
/*
* Subdevs are initialized with MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN in order
* to preserve backward compatibility. Drivers must change to the proper
* subdev type before registering the entity.
*/
#define MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN MEDIA_ENT_F_OLD_SUBDEV_BASE
/*
* DVB entity functions
*/
#define MEDIA_ENT_F_DTV_DEMOD (MEDIA_ENT_F_BASE + 0x00001)
#define MEDIA_ENT_F_TS_DEMUX (MEDIA_ENT_F_BASE + 0x00002)
#define MEDIA_ENT_F_DTV_CA (MEDIA_ENT_F_BASE + 0x00003)
#define MEDIA_ENT_F_DTV_NET_DECAP (MEDIA_ENT_F_BASE + 0x00004)
/*
* I/O entity functions
*/
#define MEDIA_ENT_F_IO_V4L (MEDIA_ENT_F_OLD_BASE + 1)
#define MEDIA_ENT_F_IO_DTV (MEDIA_ENT_F_BASE + 0x01001)
#define MEDIA_ENT_F_IO_VBI (MEDIA_ENT_F_BASE + 0x01002)
#define MEDIA_ENT_F_IO_SWRADIO (MEDIA_ENT_F_BASE + 0x01003)
/*
* Sensor functions
*/
#define MEDIA_ENT_F_CAM_SENSOR (MEDIA_ENT_F_OLD_SUBDEV_BASE + 1)
#define MEDIA_ENT_F_FLASH (MEDIA_ENT_F_OLD_SUBDEV_BASE + 2)
#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
/*
* Digital TV, analog TV, radio and/or software defined radio tuner functions.
*
* It is a responsibility of the master/bridge drivers to add connectors
* and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners
* may require the usage of separate I2C chips to decode analog TV signals,
* when the master/bridge chipset doesn't have its own TV standard decoder.
* On such cases, the IF-PLL staging is mapped via one or two entities:
* MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER.
*/
#define MEDIA_ENT_F_TUNER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5)
/*
* Analog TV IF-PLL decoder functions
*
* It is a responsibility of the master/bridge drivers to create links
* for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER.
*/
#define MEDIA_ENT_F_IF_VID_DECODER (MEDIA_ENT_F_BASE + 0x02001)
#define MEDIA_ENT_F_IF_AUD_DECODER (MEDIA_ENT_F_BASE + 0x02002)
/*
* Audio entity functions
*/
#define MEDIA_ENT_F_AUDIO_CAPTURE (MEDIA_ENT_F_BASE + 0x03001)
#define MEDIA_ENT_F_AUDIO_PLAYBACK (MEDIA_ENT_F_BASE + 0x03002)
#define MEDIA_ENT_F_AUDIO_MIXER (MEDIA_ENT_F_BASE + 0x03003)
/*
* Processing entity functions
*/
#define MEDIA_ENT_F_PROC_VIDEO_COMPOSER (MEDIA_ENT_F_BASE + 0x4001)
#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER (MEDIA_ENT_F_BASE + 0x4002)
#define MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV (MEDIA_ENT_F_BASE + 0x4003)
#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
#define MEDIA_ENT_F_PROC_VIDEO_ISP (MEDIA_ENT_F_BASE + 0x4009)
/*
* Switch and bridge entity functions
*/
#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
/*
* Video decoder/encoder functions
*/
#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
/* Entity flags */
#define MEDIA_ENT_FL_DEFAULT (1U << 0)
#define MEDIA_ENT_FL_CONNECTOR (1U << 1)
/* OR with the entity id value to find the next entity */
#define MEDIA_ENT_ID_FLAG_NEXT (1U << 31)
struct media_entity_desc {
__u32 id;
char name[32];
__u32 type;
__u32 revision;
__u32 flags;
__u32 group_id;
__u16 pads;
__u16 links;
__u32 reserved[4];
union {
/* Node specifications */
struct {
__u32 major;
__u32 minor;
} dev;
/*
* TODO: this shouldn't have been added without
* actual drivers that use this. When the first real driver
* appears that sets this information, special attention
* should be given whether this information is 1) enough, and
* 2) can deal with udev rules that rename devices. The struct
* dev would not be sufficient for this since that does not
* contain the subdevice information. In addition, struct dev
* can only refer to a single device, and not to multiple (e.g.
* pcm and mixer devices).
*/
struct {
__u32 card;
__u32 device;
__u32 subdevice;
} alsa;
/*
* DEPRECATED: previous node specifications. Kept just to
* avoid breaking compilation. Use media_entity_desc.dev
* instead.
*/
struct {
__u32 major;
__u32 minor;
} v4l;
struct {
__u32 major;
__u32 minor;
} fb;
int dvb;
/* Sub-device specifications */
/* Nothing needed yet */
__u8 raw[184];
};
};
#define MEDIA_PAD_FL_SINK (1U << 0)
#define MEDIA_PAD_FL_SOURCE (1U << 1)
#define MEDIA_PAD_FL_MUST_CONNECT (1U << 2)
struct media_pad_desc {
__u32 entity; /* entity ID */
__u16 index; /* pad index */
__u32 flags; /* pad flags */
__u32 reserved[2];
};
#define MEDIA_LNK_FL_ENABLED (1U << 0)
#define MEDIA_LNK_FL_IMMUTABLE (1U << 1)
#define MEDIA_LNK_FL_DYNAMIC (1U << 2)
#define MEDIA_LNK_FL_LINK_TYPE (0xf << 28)
# define MEDIA_LNK_FL_DATA_LINK (0U << 28)
# define MEDIA_LNK_FL_INTERFACE_LINK (1U << 28)
# define MEDIA_LNK_FL_ANCILLARY_LINK (2U << 28)
struct media_link_desc {
struct media_pad_desc source;
struct media_pad_desc sink;
__u32 flags;
__u32 reserved[2];
};
struct media_links_enum {
__u32 entity;
/* Should have enough room for pads elements */
struct media_pad_desc *pads;
/* Should have enough room for links elements */
struct media_link_desc *links;
__u32 reserved[4];
};
/* Interface type ranges */
#define MEDIA_INTF_T_DVB_BASE 0x00000100
#define MEDIA_INTF_T_V4L_BASE 0x00000200
/* Interface types */
#define MEDIA_INTF_T_DVB_FE (MEDIA_INTF_T_DVB_BASE)
#define MEDIA_INTF_T_DVB_DEMUX (MEDIA_INTF_T_DVB_BASE + 1)
#define MEDIA_INTF_T_DVB_DVR (MEDIA_INTF_T_DVB_BASE + 2)
#define MEDIA_INTF_T_DVB_CA (MEDIA_INTF_T_DVB_BASE + 3)
#define MEDIA_INTF_T_DVB_NET (MEDIA_INTF_T_DVB_BASE + 4)
#define MEDIA_INTF_T_V4L_VIDEO (MEDIA_INTF_T_V4L_BASE)
#define MEDIA_INTF_T_V4L_VBI (MEDIA_INTF_T_V4L_BASE + 1)
#define MEDIA_INTF_T_V4L_RADIO (MEDIA_INTF_T_V4L_BASE + 2)
#define MEDIA_INTF_T_V4L_SUBDEV (MEDIA_INTF_T_V4L_BASE + 3)
#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5)
#define MEDIA_INTF_T_ALSA_BASE 0x00000300
#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2)
/*
* MC next gen API definitions
*/
/*
* Appeared in 4.19.0.
*
* The media_version argument comes from the media_version field in
* struct media_device_info.
*/
#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_entity {
__u32 id;
char name[64];
__u32 function; /* Main function of the entity */
__u32 flags;
__u32 reserved[5];
} __attribute__ ((packed));
/* Should match the specific fields at media_intf_devnode */
struct media_v2_intf_devnode {
__u32 major;
__u32 minor;
} __attribute__ ((packed));
struct media_v2_interface {
__u32 id;
__u32 intf_type;
__u32 flags;
__u32 reserved[9];
union {
struct media_v2_intf_devnode devnode;
__u32 raw[16];
};
} __attribute__ ((packed));
/*
* Appeared in 4.19.0.
*
* The media_version argument comes from the media_version field in
* struct media_device_info.
*/
#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
((media_version) >= ((4U << 16) | (19U << 8) | 0U))
struct media_v2_pad {
__u32 id;
__u32 entity_id;
__u32 flags;
__u32 index;
__u32 reserved[4];
} __attribute__ ((packed));
struct media_v2_link {
__u32 id;
__u32 source_id;
__u32 sink_id;
__u32 flags;
__u32 reserved[6];
} __attribute__ ((packed));
struct media_v2_topology {
__u64 topology_version;
__u32 num_entities;
__u32 reserved1;
__u64 ptr_entities;
__u32 num_interfaces;
__u32 reserved2;
__u64 ptr_interfaces;
__u32 num_pads;
__u32 reserved3;
__u64 ptr_pads;
__u32 num_links;
__u32 reserved4;
__u64 ptr_links;
} __attribute__ ((packed));
/* ioctls */
#define MEDIA_IOC_DEVICE_INFO _IOWR('|', 0x00, struct media_device_info)
#define MEDIA_IOC_ENUM_ENTITIES _IOWR('|', 0x01, struct media_entity_desc)
#define MEDIA_IOC_ENUM_LINKS _IOWR('|', 0x02, struct media_links_enum)
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
#define MEDIA_IOC_REQUEST_ALLOC _IOR ('|', 0x05, int)
/*
* These ioctls are called on the request file descriptor as returned
* by MEDIA_IOC_REQUEST_ALLOC.
*/
#define MEDIA_REQUEST_IOC_QUEUE _IO('|', 0x80)
#define MEDIA_REQUEST_IOC_REINIT _IO('|', 0x81)
/*
* Legacy symbols used to avoid userspace compilation breakages.
* Do not use any of this in new applications!
*
* Those symbols map the entity function into types and should be
* used only on legacy programs for legacy hardware. Don't rely
* on those for MEDIA_IOC_G_TOPOLOGY.
*/
#define MEDIA_ENT_TYPE_SHIFT 16
#define MEDIA_ENT_TYPE_MASK 0x00ff0000
#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
MEDIA_ENT_SUBTYPE_MASK)
#define MEDIA_ENT_T_DEVNODE MEDIA_ENT_F_OLD_BASE
#define MEDIA_ENT_T_DEVNODE_V4L MEDIA_ENT_F_IO_V4L
#define MEDIA_ENT_T_DEVNODE_FB (MEDIA_ENT_F_OLD_BASE + 2)
#define MEDIA_ENT_T_DEVNODE_ALSA (MEDIA_ENT_F_OLD_BASE + 3)
#define MEDIA_ENT_T_DEVNODE_DVB (MEDIA_ENT_F_OLD_BASE + 4)
#define MEDIA_ENT_T_UNKNOWN MEDIA_ENT_F_UNKNOWN
#define MEDIA_ENT_T_V4L2_VIDEO MEDIA_ENT_F_IO_V4L
#define MEDIA_ENT_T_V4L2_SUBDEV MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN
#define MEDIA_ENT_T_V4L2_SUBDEV_SENSOR MEDIA_ENT_F_CAM_SENSOR
#define MEDIA_ENT_T_V4L2_SUBDEV_FLASH MEDIA_ENT_F_FLASH
#define MEDIA_ENT_T_V4L2_SUBDEV_LENS MEDIA_ENT_F_LENS
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
/*
* There is still no full ALSA support in the media controller. These
* defines should not have been added and we leave them here only
* in case some application tries to use these defines.
*
* The ALSA defines that are in use have been moved into __KERNEL__
* scope. As support gets added to these interface types, they should
* be moved into __KERNEL__ scope with the code that uses them.
*/
#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3)
#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4)
#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5)
#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6)
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
#define MEDIA_API_VERSION ((0U << 16) | (1U << 8) | 0U)
#endif /* __LINUX_MEDIA_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/udmabuf.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_UDMABUF_H
#define _LINUX_UDMABUF_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define UDMABUF_FLAGS_CLOEXEC 0x01
struct udmabuf_create {
__u32 memfd;
__u32 flags;
__u64 offset;
__u64 size;
};
struct udmabuf_create_item {
__u32 memfd;
__u32 __pad;
__u64 offset;
__u64 size;
};
struct udmabuf_create_list {
__u32 flags;
__u32 count;
struct udmabuf_create_item list[];
};
#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
#define UDMABUF_CREATE_LIST _IOW('u', 0x43, struct udmabuf_create_list)
#endif /* _LINUX_UDMABUF_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/intel-ipu3.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2017 - 2018 Intel Corporation */
#ifndef __IPU3_UAPI_H
#define __IPU3_UAPI_H
#include <linux/types.h>
/* from /drivers/staging/media/ipu3/include/videodev2.h */
/* Vendor specific - used for IPU3 camera sub-system */
/* IPU3 processing parameters */
#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p')
/* IPU3 3A statistics */
#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's')
/* from include/uapi/linux/v4l2-controls.h */
#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0)
#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1)
/******************* ipu3_uapi_stats_3a *******************/
#define IPU3_UAPI_MAX_STRIPES 2
#define IPU3_UAPI_MAX_BUBBLE_SIZE 10
#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1)
#define IPU3_UAPI_GRID_Y_START_EN (1 << 15)
/* controls generation of meta_data (like FF enable/disable) */
#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14)
#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15)
/**
* struct ipu3_uapi_grid_config - Grid plane config
*
* @width: Grid horizontal dimensions, in number of grid blocks(cells).
* For AWB, the range is (16, 80).
* For AF/AE, the range is (16, 32).
* @height: Grid vertical dimensions, in number of grid cells.
* For AWB, the range is (16, 60).
* For AF/AE, the range is (16, 24).
* @block_width_log2: Log2 of the width of each cell in pixels.
* For AWB, the range is [3, 6].
* For AF/AE, the range is [3, 7].
* @block_height_log2: Log2 of the height of each cell in pixels.
* For AWB, the range is [3, 6].
* For AF/AE, the range is [3, 7].
* @height_per_slice: The number of blocks in vertical axis per slice.
* Default 2.
* @x_start: X value of top left corner of Region of Interest(ROI).
* @y_start: Y value of top left corner of ROI
* @x_end: X value of bottom right corner of ROI
* @y_end: Y value of bottom right corner of ROI
*
* Due to the size of total amount of collected data, most statistics
* create a grid-based output, and the data is then divided into "slices".
*/
struct ipu3_uapi_grid_config {
__u8 width;
__u8 height;
__u16 block_width_log2:3;
__u16 block_height_log2:3;
__u16 height_per_slice:8;
__u16 x_start;
__u16 y_start;
__u16 x_end;
__u16 y_end;
} __attribute__((packed));
/**
* struct ipu3_uapi_awb_set_item - Memory layout for each cell in AWB
*
* @Gr_avg: Green average for red lines in the cell.
* @R_avg: Red average in the cell.
* @B_avg: Blue average in the cell.
* @Gb_avg: Green average for blue lines in the cell.
* @sat_ratio: Percentage of pixels over the thresholds specified in
* ipu3_uapi_awb_config_s, coded from 0 to 255.
* @padding0: Unused byte for padding.
* @padding1: Unused byte for padding.
* @padding2: Unused byte for padding.
*/
struct ipu3_uapi_awb_set_item {
__u8 Gr_avg;
__u8 R_avg;
__u8 B_avg;
__u8 Gb_avg;
__u8 sat_ratio;
__u8 padding0;
__u8 padding1;
__u8 padding2;
} __attribute__((packed));
/*
* The grid based data is divided into "slices" called set, each slice of setX
* refers to ipu3_uapi_grid_config width * height_per_slice.
*/
#define IPU3_UAPI_AWB_MAX_SETS 60
/* Based on grid size 80 * 60 and cell size 16 x 16 */
#define IPU3_UAPI_AWB_SET_SIZE 160
#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
(IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES)
#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
(IPU3_UAPI_AWB_MAX_SETS * \
(IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
/**
* struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
*
* @meta_data: buffer to hold auto white balance meta data which is
* the average values for each color channel.
*/
struct ipu3_uapi_awb_raw_buffer {
struct ipu3_uapi_awb_set_item meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE]
__attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_awb_config_s - AWB config
*
* @rgbs_thr_gr: gr threshold value.
* @rgbs_thr_r: Red threshold value.
* @rgbs_thr_gb: gb threshold value.
* @rgbs_thr_b: Blue threshold value.
* @grid: &ipu3_uapi_grid_config, the default grid resolution is 16x16 cells.
*
* The threshold is a saturation measure range [0, 8191], 8191 is default.
* Values over threshold may be optionally rejected for averaging.
*/
struct ipu3_uapi_awb_config_s {
__u16 rgbs_thr_gr;
__u16 rgbs_thr_r;
__u16 rgbs_thr_gb;
__u16 rgbs_thr_b;
struct ipu3_uapi_grid_config grid;
} __attribute__((aligned(32))) __attribute__((packed));
/**
* struct ipu3_uapi_awb_config - AWB config wrapper
*
* @config: config for auto white balance as defined by &ipu3_uapi_awb_config_s
*/
struct ipu3_uapi_awb_config {
struct ipu3_uapi_awb_config_s config __attribute__((aligned(32)));
} __attribute__((packed));
#define IPU3_UAPI_AE_COLORS 4 /* R, G, B, Y */
#define IPU3_UAPI_AE_BINS 256
#define IPU3_UAPI_AE_WEIGHTS 96
/**
* struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
*
* @vals: Sum of IPU3_UAPI_AE_COLORS in cell
*
* Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
* for counting the number of the pixel.
*/
struct ipu3_uapi_ae_raw_buffer {
__u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS];
} __attribute__((packed));
/**
* struct ipu3_uapi_ae_raw_buffer_aligned - AE raw buffer
*
* @buff: &ipu3_uapi_ae_raw_buffer to hold full frame meta data.
*/
struct ipu3_uapi_ae_raw_buffer_aligned {
struct ipu3_uapi_ae_raw_buffer buff __attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_ae_grid_config - AE weight grid
*
* @width: Grid horizontal dimensions. Value: [16, 32], default 16.
* @height: Grid vertical dimensions. Value: [16, 24], default 16.
* @block_width_log2: Log2 of the width of the grid cell, value: [3, 7].
* @block_height_log2: Log2 of the height of the grid cell, value: [3, 7].
* default is 3 (cell size 8x8), 4 cell per grid.
* @reserved0: reserved
* @ae_en: 0: does not write to &ipu3_uapi_ae_raw_buffer_aligned array,
* 1: write normally.
* @rst_hist_array: write 1 to trigger histogram array reset.
* @done_rst_hist_array: flag for histogram array reset done.
* @x_start: X value of top left corner of ROI, default 0.
* @y_start: Y value of top left corner of ROI, default 0.
* @x_end: X value of bottom right corner of ROI
* @y_end: Y value of bottom right corner of ROI
*
* The AE block accumulates 4 global weighted histograms(R, G, B, Y) over
* a defined ROI within the frame. The contribution of each pixel into the
* histogram, defined by &ipu3_uapi_ae_weight_elem LUT, is indexed by a grid.
*/
struct ipu3_uapi_ae_grid_config {
__u8 width;
__u8 height;
__u8 block_width_log2:4;
__u8 block_height_log2:4;
__u8 reserved0:5;
__u8 ae_en:1;
__u8 rst_hist_array:1;
__u8 done_rst_hist_array:1;
__u16 x_start;
__u16 y_start;
__u16 x_end;
__u16 y_end;
} __attribute__((packed));
/**
* struct ipu3_uapi_ae_weight_elem - AE weights LUT
*
* @cell0: weighted histogram grid value.
* @cell1: weighted histogram grid value.
* @cell2: weighted histogram grid value.
* @cell3: weighted histogram grid value.
* @cell4: weighted histogram grid value.
* @cell5: weighted histogram grid value.
* @cell6: weighted histogram grid value.
* @cell7: weighted histogram grid value.
*
* Use weighted grid value to give a different contribution factor to each cell.
* Precision u4, range [0, 15].
*/
struct ipu3_uapi_ae_weight_elem {
__u32 cell0:4;
__u32 cell1:4;
__u32 cell2:4;
__u32 cell3:4;
__u32 cell4:4;
__u32 cell5:4;
__u32 cell6:4;
__u32 cell7:4;
} __attribute__((packed));
/**
* struct ipu3_uapi_ae_ccm - AE coefficients for WB and CCM
*
* @gain_gr: WB gain factor for the gr channels. Default 256.
* @gain_r: WB gain factor for the r channel. Default 256.
* @gain_b: WB gain factor for the b channel. Default 256.
* @gain_gb: WB gain factor for the gb channels. Default 256.
* @mat: 4x4 matrix that transforms Bayer quad output from WB to RGB+Y.
*
* Default:
* 128, 0, 0, 0,
* 0, 128, 0, 0,
* 0, 0, 128, 0,
* 0, 0, 0, 128,
*
* As part of the raw frame pre-process stage, the WB and color conversion need
* to be applied to expose the impact of these gain operations.
*/
struct ipu3_uapi_ae_ccm {
__u16 gain_gr;
__u16 gain_r;
__u16 gain_b;
__u16 gain_gb;
__s16 mat[16];
} __attribute__((packed));
/**
* struct ipu3_uapi_ae_config - AE config
*
* @grid_cfg: config for auto exposure statistics grid. See struct
* &ipu3_uapi_ae_grid_config, as Imgu did not support output
* auto exposure statistics, so user can ignore this configuration
* and use the RGB table in auto-whitebalance statistics instead.
* @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
* Each grid cell has a corresponding value in weights LUT called
* grid value, global histogram is updated based on grid value and
* pixel value.
* @ae_ccm: Color convert matrix pre-processing block.
*
* Calculate AE grid from image resolution, resample ae weights.
*/
struct ipu3_uapi_ae_config {
struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32)));
struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
__attribute__((aligned(32)));
struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_af_filter_config - AF 2D filter for contrast measurements
*
* @y1_coeff_0: filter Y1, structure: 3x11, support both symmetry and
* anti-symmetry type. A12 is center, A1-A11 are neighbours.
* for analyzing low frequency content, used to calculate sum
* of gradients in x direction.
* @y1_coeff_0.a1: filter1 coefficients A1, u8, default 0.
* @y1_coeff_0.a2: filter1 coefficients A2, u8, default 0.
* @y1_coeff_0.a3: filter1 coefficients A3, u8, default 0.
* @y1_coeff_0.a4: filter1 coefficients A4, u8, default 0.
* @y1_coeff_1: Struct
* @y1_coeff_1.a5: filter1 coefficients A5, u8, default 0.
* @y1_coeff_1.a6: filter1 coefficients A6, u8, default 0.
* @y1_coeff_1.a7: filter1 coefficients A7, u8, default 0.
* @y1_coeff_1.a8: filter1 coefficients A8, u8, default 0.
* @y1_coeff_2: Struct
* @y1_coeff_2.a9: filter1 coefficients A9, u8, default 0.
* @y1_coeff_2.a10: filter1 coefficients A10, u8, default 0.
* @y1_coeff_2.a11: filter1 coefficients A11, u8, default 0.
* @y1_coeff_2.a12: filter1 coefficients A12, u8, default 128.
* @y1_sign_vec: Each bit corresponds to one coefficient sign bit,
* 0: positive, 1: negative, default 0.
* @y2_coeff_0: Y2, same structure as Y1. For analyzing high frequency content.
* @y2_coeff_0.a1: filter2 coefficients A1, u8, default 0.
* @y2_coeff_0.a2: filter2 coefficients A2, u8, default 0.
* @y2_coeff_0.a3: filter2 coefficients A3, u8, default 0.
* @y2_coeff_0.a4: filter2 coefficients A4, u8, default 0.
* @y2_coeff_1: Struct
* @y2_coeff_1.a5: filter2 coefficients A5, u8, default 0.
* @y2_coeff_1.a6: filter2 coefficients A6, u8, default 0.
* @y2_coeff_1.a7: filter2 coefficients A7, u8, default 0.
* @y2_coeff_1.a8: filter2 coefficients A8, u8, default 0.
* @y2_coeff_2: Struct
* @y2_coeff_2.a9: filter1 coefficients A9, u8, default 0.
* @y2_coeff_2.a10: filter1 coefficients A10, u8, default 0.
* @y2_coeff_2.a11: filter1 coefficients A11, u8, default 0.
* @y2_coeff_2.a12: filter1 coefficients A12, u8, default 128.
* @y2_sign_vec: Each bit corresponds to one coefficient sign bit,
* 0: positive, 1: negative, default 0.
* @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
* used for building histogram. Range [0, 32], default 8.
* Rule:
* y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
* A single Y is calculated based on sum of Gr/R/B/Gb based on
* their contribution ratio.
* @y_calc.y_gen_rate_gr: Contribution ratio Gr for Y
* @y_calc.y_gen_rate_r: Contribution ratio R for Y
* @y_calc.y_gen_rate_b: Contribution ratio B for Y
* @y_calc.y_gen_rate_gb: Contribution ratio Gb for Y
* @nf: The shift right value that should be applied during the Y1/Y2 filter to
* make sure the total memory needed is 2 bytes per grid cell.
* @nf.reserved0: reserved
* @nf.y1_nf: Normalization factor for the convolution coeffs of y1,
* should be log2 of the sum of the abs values of the filter
* coeffs, default 7 (2^7 = 128).
* @nf.reserved1: reserved
* @nf.y2_nf: Normalization factor for y2, should be log2 of the sum of the
* abs values of the filter coeffs.
* @nf.reserved2: reserved
*/
struct ipu3_uapi_af_filter_config {
struct {
__u8 a1;
__u8 a2;
__u8 a3;
__u8 a4;
} y1_coeff_0;
struct {
__u8 a5;
__u8 a6;
__u8 a7;
__u8 a8;
} y1_coeff_1;
struct {
__u8 a9;
__u8 a10;
__u8 a11;
__u8 a12;
} y1_coeff_2;
__u32 y1_sign_vec;
struct {
__u8 a1;
__u8 a2;
__u8 a3;
__u8 a4;
} y2_coeff_0;
struct {
__u8 a5;
__u8 a6;
__u8 a7;
__u8 a8;
} y2_coeff_1;
struct {
__u8 a9;
__u8 a10;
__u8 a11;
__u8 a12;
} y2_coeff_2;
__u32 y2_sign_vec;
struct {
__u8 y_gen_rate_gr;
__u8 y_gen_rate_r;
__u8 y_gen_rate_b;
__u8 y_gen_rate_gb;
} y_calc;
struct {
__u32 reserved0:8;
__u32 y1_nf:4;
__u32 reserved1:4;
__u32 y2_nf:4;
__u32 reserved2:12;
} nf;
} __attribute__((packed));
#define IPU3_UAPI_AF_MAX_SETS 24
#define IPU3_UAPI_AF_MD_ITEM_SIZE 4
#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \
(IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
IPU3_UAPI_AF_MD_ITEM_SIZE)
#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 128
#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \
(IPU3_UAPI_AF_MAX_SETS * \
(IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \
IPU3_UAPI_MAX_STRIPES)
/**
* struct ipu3_uapi_af_raw_buffer - AF meta data
*
* @y_table: Each color component will be convolved separately with filter1
* and filter2 and the result will be summed out and averaged for
* each cell.
*/
struct ipu3_uapi_af_raw_buffer {
__u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] __attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_af_config_s - AF config
*
* @filter_config: AF uses Y1 and Y2 filters as configured in
* &ipu3_uapi_af_filter_config
* @padding: paddings
* @grid_cfg: See &ipu3_uapi_grid_config, default resolution 16x16. Use large
* grid size for large image and vice versa.
*/
struct ipu3_uapi_af_config_s {
struct ipu3_uapi_af_filter_config filter_config __attribute__((aligned(32)));
__u8 padding[4];
struct ipu3_uapi_grid_config grid_cfg __attribute__((aligned(32)));
} __attribute__((packed));
#define IPU3_UAPI_AWB_FR_MAX_SETS 24
#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8
#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 256
#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \
(IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
IPU3_UAPI_AWB_FR_MD_ITEM_SIZE)
#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \
(IPU3_UAPI_AWB_FR_MAX_SETS * \
(IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \
IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
/**
* struct ipu3_uapi_awb_fr_raw_buffer - AWB filter response meta data
*
* @meta_data: Statistics output on the grid after convolving with 1D filter.
*/
struct ipu3_uapi_awb_fr_raw_buffer {
__u8 meta_data[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE]
__attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_awb_fr_config_s - AWB filter response config
*
* @grid_cfg: grid config, default 16x16.
* @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry.
* coefficients defaults { 0, 0, 0, 0, 0, 128 }.
* Applied on whole image for each Bayer channel separately
* by a weighted sum of its 11x1 neighbors.
* @reserved1: reserved
* @bayer_sign: sign of filter coefficients, default 0.
* @bayer_nf: normalization factor for the convolution coeffs, to make sure
* total memory needed is within pre-determined range.
* NF should be the log2 of the sum of the abs values of the
* filter coeffs, range [7, 14], default 7.
* @reserved2: reserved
*/
struct ipu3_uapi_awb_fr_config_s {
struct ipu3_uapi_grid_config grid_cfg;
__u8 bayer_coeff[6];
__u16 reserved1;
__u32 bayer_sign;
__u8 bayer_nf;
__u8 reserved2[7];
} __attribute__((packed));
/**
* struct ipu3_uapi_4a_config - 4A config
*
* @awb_config: &ipu3_uapi_awb_config_s, default resolution 16x16
* @ae_grd_config: auto exposure statistics &ipu3_uapi_ae_grid_config
* @padding: paddings
* @af_config: auto focus config &ipu3_uapi_af_config_s
* @awb_fr_config: &ipu3_uapi_awb_fr_config_s, default resolution 16x16
*/
struct ipu3_uapi_4a_config {
struct ipu3_uapi_awb_config_s awb_config __attribute__((aligned(32)));
struct ipu3_uapi_ae_grid_config ae_grd_config;
__u8 padding[20];
struct ipu3_uapi_af_config_s af_config;
struct ipu3_uapi_awb_fr_config_s awb_fr_config
__attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_bubble_info - Bubble info for host side debugging
*
* @num_of_stripes: A single frame is divided into several parts called stripes
* due to limitation on line buffer memory.
* The separation between the stripes is vertical. Each such
* stripe is processed as a single frame by the ISP pipe.
* @padding: padding bytes.
* @num_sets: number of sets.
* @padding1: padding bytes.
* @size_of_set: set size.
* @padding2: padding bytes.
* @bubble_size: is the amount of padding in the bubble expressed in "sets".
* @padding3: padding bytes.
*/
struct ipu3_uapi_bubble_info {
__u32 num_of_stripes __attribute__((aligned(32)));
__u8 padding[28];
__u32 num_sets;
__u8 padding1[28];
__u32 size_of_set;
__u8 padding2[28];
__u32 bubble_size;
__u8 padding3[28];
} __attribute__((packed));
/*
* struct ipu3_uapi_stats_3a_bubble_info_per_stripe
*/
struct ipu3_uapi_stats_3a_bubble_info_per_stripe {
struct ipu3_uapi_bubble_info awb[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_bubble_info af[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_bubble_info awb_fr[IPU3_UAPI_MAX_STRIPES];
} __attribute__((packed));
/**
* struct ipu3_uapi_ff_status - Enable bits for each 3A fixed function
*
* @awb_en: auto white balance enable
* @padding: padding config
* @ae_en: auto exposure enable
* @padding1: padding config
* @af_en: auto focus enable
* @padding2: padding config
* @awb_fr_en: awb filter response enable bit
* @padding3: padding config
*/
struct ipu3_uapi_ff_status {
__u32 awb_en __attribute__((aligned(32)));
__u8 padding[28];
__u32 ae_en;
__u8 padding1[28];
__u32 af_en;
__u8 padding2[28];
__u32 awb_fr_en;
__u8 padding3[28];
} __attribute__((packed));
/**
* struct ipu3_uapi_stats_3a - 3A statistics
*
* @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
* @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
* current Imgu does not output the auto exposure statistics
* to ae_raw_buffer, the user such as 3A algorithm can use the
* RGB table in &ipu3_uapi_awb_raw_buffer to do auto-exposure.
* @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
* @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
* @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
* @ae_join_buffers: 1 to use ae_raw_buffer.
* @padding: padding config
* @stats_3a_bubble_per_stripe: a &ipu3_uapi_stats_3a_bubble_info_per_stripe
* @stats_3a_status: 3a statistics status set in &ipu3_uapi_ff_status
*/
struct ipu3_uapi_stats_3a {
struct ipu3_uapi_awb_raw_buffer awb_raw_buffer;
struct ipu3_uapi_ae_raw_buffer_aligned
ae_raw_buffer[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_af_raw_buffer af_raw_buffer;
struct ipu3_uapi_awb_fr_raw_buffer awb_fr_raw_buffer;
struct ipu3_uapi_4a_config stats_4a_config;
__u32 ae_join_buffers;
__u8 padding[28];
struct ipu3_uapi_stats_3a_bubble_info_per_stripe
stats_3a_bubble_per_stripe;
struct ipu3_uapi_ff_status stats_3a_status;
} __attribute__((packed));
/******************* ipu3_uapi_acc_param *******************/
#define IPU3_UAPI_ISP_VEC_ELEMS 64
#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9
#define IPU3_UAPI_BNR_LUT_SIZE 32
/* number of elements in gamma correction LUT */
#define IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES 256
/* largest grid is 73x56, for grid_height_per_slice of 2, 73x2 = 146 */
#define IPU3_UAPI_SHD_MAX_CELLS_PER_SET 146
#define IPU3_UAPI_SHD_MAX_CFG_SETS 28
/* Normalization shift aka nf */
#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13
#define IPU3_UAPI_SHD_BLGR_NF_MASK 7
#define IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS 16
#define IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS 14
#define IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS 258
#define IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS 24
#define IPU3_UAPI_ANR_LUT_SIZE 26
#define IPU3_UAPI_ANR_PYRAMID_SIZE 22
#define IPU3_UAPI_LIN_LUT_SIZE 64
/* Bayer Noise Reduction related structs */
/**
* struct ipu3_uapi_bnr_static_config_wb_gains_config - White balance gains
*
* @gr: white balance gain for Gr channel.
* @r: white balance gain for R channel.
* @b: white balance gain for B channel.
* @gb: white balance gain for Gb channel.
*
* For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr].
* Their precision is U3.13 and the range is (0, 8) and the actual gain is
* Gx + 1, it is typically Gx = 1.
*
* Pout = {Pin * (1 + Gx)}.
*/
struct ipu3_uapi_bnr_static_config_wb_gains_config {
__u16 gr;
__u16 r;
__u16 b;
__u16 gb;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_wb_gains_thr_config - Threshold config
*
* @gr: white balance threshold gain for Gr channel.
* @r: white balance threshold gain for R channel.
* @b: white balance threshold gain for B channel.
* @gb: white balance threshold gain for Gb channel.
*
* Defines the threshold that specifies how different a defect pixel can be from
* its neighbors.(used by dynamic defect pixel correction sub block)
* Precision u4.4 range [0, 8].
*/
struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
__u8 gr;
__u8 r;
__u8 b;
__u8 gb;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_thr_coeffs_config - Noise model
* coefficients that controls noise threshold
*
* @cf: Free coefficient for threshold calculation, range [0, 8191], default 0.
* @reserved0: reserved
* @cg: Gain coefficient for threshold calculation, [0, 31], default 8.
* @ci: Intensity coefficient for threshold calculation. range [0, 0x1f]
* default 6.
* format: u3.2 (3 most significant bits represent whole number,
* 2 least significant bits represent the fractional part
* with each count representing 0.25)
* e.g. 6 in binary format is 00110, that translates to 1.5
* @reserved1: reserved
* @r_nf: Normalization shift value for r^2 calculation, range [12, 20]
* where r is a radius of pixel [row, col] from centor of sensor.
* default 14.
*
* Threshold used to distinguish between noise and details.
*/
struct ipu3_uapi_bnr_static_config_thr_coeffs_config {
__u32 cf:13;
__u32 reserved0:3;
__u32 cg:5;
__u32 ci:5;
__u32 reserved1:1;
__u32 r_nf:5;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config - Shading config
*
* @gr: Coefficient defines lens shading gain approximation for gr channel
* @r: Coefficient defines lens shading gain approximation for r channel
* @b: Coefficient defines lens shading gain approximation for b channel
* @gb: Coefficient defines lens shading gain approximation for gb channel
*
* Parameters for noise model (NM) adaptation of BNR due to shading correction.
* All above have precision of u3.3, default to 0.
*/
struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config {
__u8 gr;
__u8 r;
__u8 b;
__u8 gb;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_opt_center_config - Optical center config
*
* @x_reset: Reset value of X (col start - X center). Precision s12.0.
* @reserved0: reserved
* @y_reset: Reset value of Y (row start - Y center). Precision s12.0.
* @reserved2: reserved
*
* Distance from corner to optical center for NM adaptation due to shading
* correction (should be calculated based on shading tables)
*/
struct ipu3_uapi_bnr_static_config_opt_center_config {
__s32 x_reset:13;
__u32 reserved0:3;
__s32 y_reset:13;
__u32 reserved2:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_lut_config - BNR square root lookup table
*
* @values: pre-calculated values of square root function.
*
* LUT implementation of square root operation.
*/
struct ipu3_uapi_bnr_static_config_lut_config {
__u8 values[IPU3_UAPI_BNR_LUT_SIZE];
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_bp_ctrl_config - Detect bad pixels (bp)
*
* @bp_thr_gain: Defines the threshold that specifies how different a
* defect pixel can be from its neighbors. Threshold is
* dependent on de-noise threshold calculated by algorithm.
* Range [4, 31], default 4.
* @reserved0: reserved
* @defect_mode: Mode of addressed defect pixels,
* 0 - single defect pixel is expected,
* 1 - 2 adjacent defect pixels are expected, default 1.
* @bp_gain: Defines how 2nd derivation that passes through a defect pixel
* is different from 2nd derivations that pass through
* neighbor pixels. u4.2, range [0, 256], default 8.
* @reserved1: reserved
* @w0_coeff: Blending coefficient of defect pixel correction.
* Precision u4, range [0, 8], default 8.
* @reserved2: reserved
* @w1_coeff: Enable influence of incorrect defect pixel correction to be
* avoided. Precision u4, range [1, 8], default 8.
* @reserved3: reserved
*/
struct ipu3_uapi_bnr_static_config_bp_ctrl_config {
__u32 bp_thr_gain:5;
__u32 reserved0:2;
__u32 defect_mode:1;
__u32 bp_gain:6;
__u32 reserved1:18;
__u32 w0_coeff:4;
__u32 reserved2:4;
__u32 w1_coeff:4;
__u32 reserved3:20;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config - Denoising config
*
* @alpha: Weight of central element of smoothing filter.
* @beta: Weight of peripheral elements of smoothing filter, default 4.
* @gamma: Weight of diagonal elements of smoothing filter, default 4.
*
* beta and gamma parameter define the strength of the noise removal filter.
* All above has precision u0.4, range [0, 0xf]
* format: u0.4 (no / zero bits represent whole number,
* 4 bits represent the fractional part
* with each count representing 0.0625)
* e.g. 0xf translates to 0.0625x15 = 0.9375
*
* @reserved0: reserved
* @max_inf: Maximum increase of peripheral or diagonal element influence
* relative to the pre-defined value range: [0x5, 0xa]
* @reserved1: reserved
* @gd_enable: Green disparity enable control, 0 - disable, 1 - enable.
* @bpc_enable: Bad pixel correction enable control, 0 - disable, 1 - enable.
* @bnr_enable: Bayer noise removal enable control, 0 - disable, 1 - enable.
* @ff_enable: Fixed function enable, 0 - disable, 1 - enable.
* @reserved2: reserved
*/
struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
__u32 alpha:4;
__u32 beta:4;
__u32 gamma:4;
__u32 reserved0:4;
__u32 max_inf:4;
__u32 reserved1:7;
__u32 gd_enable:1;
__u32 bpc_enable:1;
__u32 bnr_enable:1;
__u32 ff_enable:1;
__u32 reserved2:1;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_opt_center_sqr_config - BNR optical square
*
* @x_sqr_reset: Reset value of X^2.
* @y_sqr_reset: Reset value of Y^2.
*
* Please note:
*
* #. X and Y ref to
* &ipu3_uapi_bnr_static_config_opt_center_config
* #. Both structs are used in threshold formula to calculate r^2, where r
* is a radius of pixel [row, col] from centor of sensor.
*/
struct ipu3_uapi_bnr_static_config_opt_center_sqr_config {
__u32 x_sqr_reset;
__u32 y_sqr_reset;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config - BNR static config
*
* @wb_gains: white balance gains &ipu3_uapi_bnr_static_config_wb_gains_config
* @wb_gains_thr: white balance gains threshold as defined by
* &ipu3_uapi_bnr_static_config_wb_gains_thr_config
* @thr_coeffs: coefficients of threshold
* &ipu3_uapi_bnr_static_config_thr_coeffs_config
* @thr_ctrl_shd: control of shading threshold
* &ipu3_uapi_bnr_static_config_thr_ctrl_shd_config
* @opt_center: optical center &ipu3_uapi_bnr_static_config_opt_center_config
*
* Above parameters and opt_center_sqr are used for white balance and shading.
*
* @lut: lookup table &ipu3_uapi_bnr_static_config_lut_config
* @bp_ctrl: detect and remove bad pixels as defined in struct
* &ipu3_uapi_bnr_static_config_bp_ctrl_config
* @dn_detect_ctrl: detect and remove noise.
* &ipu3_uapi_bnr_static_config_dn_detect_ctrl_config
* @column_size: The number of pixels in column.
* @opt_center_sqr: Reset value of r^2 to optical center, see
* &ipu3_uapi_bnr_static_config_opt_center_sqr_config.
*/
struct ipu3_uapi_bnr_static_config {
struct ipu3_uapi_bnr_static_config_wb_gains_config wb_gains;
struct ipu3_uapi_bnr_static_config_wb_gains_thr_config wb_gains_thr;
struct ipu3_uapi_bnr_static_config_thr_coeffs_config thr_coeffs;
struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config thr_ctrl_shd;
struct ipu3_uapi_bnr_static_config_opt_center_config opt_center;
struct ipu3_uapi_bnr_static_config_lut_config lut;
struct ipu3_uapi_bnr_static_config_bp_ctrl_config bp_ctrl;
struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config dn_detect_ctrl;
__u32 column_size;
struct ipu3_uapi_bnr_static_config_opt_center_sqr_config opt_center_sqr;
} __attribute__((packed));
/**
* struct ipu3_uapi_bnr_static_config_green_disparity - Correct green disparity
*
* @gd_red: Shading gain coeff for gr disparity level in bright red region.
* Precision u0.6, default 4(0.0625).
* @reserved0: reserved
* @gd_green: Shading gain coeff for gr disparity level in bright green
* region. Precision u0.6, default 4(0.0625).
* @reserved1: reserved
* @gd_blue: Shading gain coeff for gr disparity level in bright blue region.
* Precision u0.6, default 4(0.0625).
* @reserved2: reserved
* @gd_black: Maximal green disparity level in dark region (stronger disparity
* assumed to be image detail). Precision u14, default 80.
* @reserved3: reserved
* @gd_shading: Change maximal green disparity level according to square
* distance from image center.
* @reserved4: reserved
* @gd_support: Lower bound for the number of second green color pixels in
* current pixel neighborhood with less than threshold difference
* from it.
*
* The shading gain coeff of red, green, blue and black are used to calculate
* threshold given a pixel's color value and its coordinates in the image.
*
* @reserved5: reserved
* @gd_clip: Turn green disparity clip on/off, [0, 1], default 1.
* @gd_central_weight: Central pixel weight in 9 pixels weighted sum.
*/
struct ipu3_uapi_bnr_static_config_green_disparity {
__u32 gd_red:6;
__u32 reserved0:2;
__u32 gd_green:6;
__u32 reserved1:2;
__u32 gd_blue:6;
__u32 reserved2:10;
__u32 gd_black:14;
__u32 reserved3:2;
__u32 gd_shading:7;
__u32 reserved4:1;
__u32 gd_support:2;
__u32 reserved5:1;
__u32 gd_clip:1;
__u32 gd_central_weight:4;
} __attribute__((packed));
/**
* struct ipu3_uapi_dm_config - De-mosaic parameters
*
* @dm_en: de-mosaic enable.
* @ch_ar_en: Checker artifacts removal enable flag. Default 0.
* @fcc_en: False color correction (FCC) enable flag. Default 0.
* @reserved0: reserved
* @frame_width: do not care
* @gamma_sc: Sharpening coefficient (coefficient of 2-d derivation of
* complementary color in Hamilton-Adams interpolation).
* u5, range [0, 31], default 8.
* @reserved1: reserved
* @lc_ctrl: Parameter that controls weights of Chroma Homogeneity metric
* in calculation of final homogeneity metric.
* u5, range [0, 31], default 7.
* @reserved2: reserved
* @cr_param1: First parameter that defines Checker artifact removal
* feature gain. Precision u5, range [0, 31], default 8.
* @reserved3: reserved
* @cr_param2: Second parameter that defines Checker artifact removal
* feature gain. Precision u5, range [0, 31], default 8.
* @reserved4: reserved
* @coring_param: Defines power of false color correction operation.
* low for preserving edge colors, high for preserving gray
* edge artifacts.
* Precision u1.4, range [0, 1.9375], default 4 (0.25).
* @reserved5: reserved
*
* The demosaic fixed function block is responsible to covert Bayer(mosaiced)
* images into color images based on demosaicing algorithm.
*/
struct ipu3_uapi_dm_config {
__u32 dm_en:1;
__u32 ch_ar_en:1;
__u32 fcc_en:1;
__u32 reserved0:13;
__u32 frame_width:16;
__u32 gamma_sc:5;
__u32 reserved1:3;
__u32 lc_ctrl:5;
__u32 reserved2:3;
__u32 cr_param1:5;
__u32 reserved3:3;
__u32 cr_param2:5;
__u32 reserved4:3;
__u32 coring_param:5;
__u32 reserved5:27;
} __attribute__((packed));
/**
* struct ipu3_uapi_ccm_mat_config - Color correction matrix
*
* @coeff_m11: CCM 3x3 coefficient, range [-65536, 65535]
* @coeff_m12: CCM 3x3 coefficient, range [-8192, 8191]
* @coeff_m13: CCM 3x3 coefficient, range [-32768, 32767]
* @coeff_o_r: Bias 3x1 coefficient, range [-8191, 8181]
* @coeff_m21: CCM 3x3 coefficient, range [-32767, 32767]
* @coeff_m22: CCM 3x3 coefficient, range [-8192, 8191]
* @coeff_m23: CCM 3x3 coefficient, range [-32768, 32767]
* @coeff_o_g: Bias 3x1 coefficient, range [-8191, 8181]
* @coeff_m31: CCM 3x3 coefficient, range [-32768, 32767]
* @coeff_m32: CCM 3x3 coefficient, range [-8192, 8191]
* @coeff_m33: CCM 3x3 coefficient, range [-32768, 32767]
* @coeff_o_b: Bias 3x1 coefficient, range [-8191, 8181]
*
* Transform sensor specific color space to standard sRGB by applying 3x3 matrix
* and adding a bias vector O. The transformation is basically a rotation and
* translation in the 3-dimensional color spaces. Here are the defaults:
*
* 9775, -2671, 1087, 0
* -1071, 8303, 815, 0
* -23, -7887, 16103, 0
*/
struct ipu3_uapi_ccm_mat_config {
__s16 coeff_m11;
__s16 coeff_m12;
__s16 coeff_m13;
__s16 coeff_o_r;
__s16 coeff_m21;
__s16 coeff_m22;
__s16 coeff_m23;
__s16 coeff_o_g;
__s16 coeff_m31;
__s16 coeff_m32;
__s16 coeff_m33;
__s16 coeff_o_b;
} __attribute__((packed));
/**
* struct ipu3_uapi_gamma_corr_ctrl - Gamma correction
*
* @enable: gamma correction enable.
* @reserved: reserved
*/
struct ipu3_uapi_gamma_corr_ctrl {
__u32 enable:1;
__u32 reserved:31;
} __attribute__((packed));
/**
* struct ipu3_uapi_gamma_corr_lut - Per-pixel tone mapping implemented as LUT.
*
* @lut: 256 tabulated values of the gamma function. LUT[1].. LUT[256]
* format u13.0, range [0, 8191].
*
* The tone mapping operation is done by a Piece wise linear graph
* that is implemented as a lookup table(LUT). The pixel component input
* intensity is the X-axis of the graph which is the table entry.
*/
struct ipu3_uapi_gamma_corr_lut {
__u16 lut[IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES];
} __attribute__((packed));
/**
* struct ipu3_uapi_gamma_config - Gamma config
*
* @gc_ctrl: control of gamma correction &ipu3_uapi_gamma_corr_ctrl
* @gc_lut: lookup table of gamma correction &ipu3_uapi_gamma_corr_lut
*/
struct ipu3_uapi_gamma_config {
struct ipu3_uapi_gamma_corr_ctrl gc_ctrl __attribute__((aligned(32)));
struct ipu3_uapi_gamma_corr_lut gc_lut __attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_csc_mat_config - Color space conversion matrix config
*
* @coeff_c11: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_c12: Conversion matrix value, format s0.14, range [-8192, 8191].
* @coeff_c13: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_b1: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
* @coeff_c21: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_c22: Conversion matrix value, format s0.14, range [-8192, 8191].
* @coeff_c23: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_b2: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
* @coeff_c31: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_c32: Conversion matrix value, format s0.14, range [-8192, 8191].
* @coeff_c33: Conversion matrix value, format s0.14, range [-16384, 16383].
* @coeff_b3: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
*
* To transform each pixel from RGB to YUV (Y - brightness/luminance,
* UV -chroma) by applying the pixel's values by a 3x3 matrix and adding an
* optional bias 3x1 vector. Here are the default values for the matrix:
*
* 4898, 9617, 1867, 0,
* -2410, -4732, 7143, 0,
* 10076, -8437, -1638, 0,
*
* (i.e. for real number 0.299, 0.299 * 2^14 becomes 4898.)
*/
struct ipu3_uapi_csc_mat_config {
__s16 coeff_c11;
__s16 coeff_c12;
__s16 coeff_c13;
__s16 coeff_b1;
__s16 coeff_c21;
__s16 coeff_c22;
__s16 coeff_c23;
__s16 coeff_b2;
__s16 coeff_c31;
__s16 coeff_c32;
__s16 coeff_c33;
__s16 coeff_b3;
} __attribute__((packed));
/**
* struct ipu3_uapi_cds_params - Chroma down-scaling
*
* @ds_c00: range [0, 3]
* @ds_c01: range [0, 3]
* @ds_c02: range [0, 3]
* @ds_c03: range [0, 3]
* @ds_c10: range [0, 3]
* @ds_c11: range [0, 3]
* @ds_c12: range [0, 3]
* @ds_c13: range [0, 3]
*
* In case user does not provide, above 4x2 filter will use following defaults:
* 1, 3, 3, 1,
* 1, 3, 3, 1,
*
* @ds_nf: Normalization factor for Chroma output downscaling filter,
* range 0,4, default 2.
* @reserved0: reserved
* @csc_en: Color space conversion enable
* @uv_bin_output: 0: output YUV 4.2.0, 1: output YUV 4.2.2(default).
* @reserved1: reserved
*/
struct ipu3_uapi_cds_params {
__u32 ds_c00:2;
__u32 ds_c01:2;
__u32 ds_c02:2;
__u32 ds_c03:2;
__u32 ds_c10:2;
__u32 ds_c11:2;
__u32 ds_c12:2;
__u32 ds_c13:2;
__u32 ds_nf:5;
__u32 reserved0:3;
__u32 csc_en:1;
__u32 uv_bin_output:1;
__u32 reserved1:6;
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_grid_config - Bayer shading(darkening) correction
*
* @width: Grid horizontal dimensions, u8, [8, 128], default 73
* @height: Grid vertical dimensions, u8, [8, 128], default 56
* @block_width_log2: Log2 of the width of the grid cell in pixel count
* u4, [0, 15], default value 5.
* @reserved0: reserved
* @block_height_log2: Log2 of the height of the grid cell in pixel count
* u4, [0, 15], default value 6.
* @reserved1: reserved
* @grid_height_per_slice: SHD_MAX_CELLS_PER_SET/width.
* (with SHD_MAX_CELLS_PER_SET = 146).
* @x_start: X value of top left corner of sensor relative to ROI
* s13, [-4096, 0], default 0, only negative values.
* @y_start: Y value of top left corner of sensor relative to ROI
* s13, [-4096, 0], default 0, only negative values.
*/
struct ipu3_uapi_shd_grid_config {
/* reg 0 */
__u8 width;
__u8 height;
__u8 block_width_log2:3;
__u8 reserved0:1;
__u8 block_height_log2:3;
__u8 reserved1:1;
__u8 grid_height_per_slice;
/* reg 1 */
__s16 x_start;
__s16 y_start;
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_general_config - Shading general config
*
* @init_set_vrt_offst_ul: set vertical offset,
* y_start >> block_height_log2 % grid_height_per_slice.
* @shd_enable: shading enable.
* @gain_factor: Gain factor. Shift calculated anti shading value. Precision u2.
* 0x0 - gain factor [1, 5], means no shift interpolated value.
* 0x1 - gain factor [1, 9], means shift interpolated by 1.
* 0x2 - gain factor [1, 17], means shift interpolated by 2.
* @reserved: reserved
*
* Correction is performed by multiplying a gain factor for each of the 4 Bayer
* channels as a function of the pixel location in the sensor.
*/
struct ipu3_uapi_shd_general_config {
__u32 init_set_vrt_offst_ul:8;
__u32 shd_enable:1;
__u32 gain_factor:2;
__u32 reserved:21;
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_black_level_config - Black level correction
*
* @bl_r: Bios values for green red. s11 range [-2048, 2047].
* @bl_gr: Bios values for green blue. s11 range [-2048, 2047].
* @bl_gb: Bios values for red. s11 range [-2048, 2047].
* @bl_b: Bios values for blue. s11 range [-2048, 2047].
*/
struct ipu3_uapi_shd_black_level_config {
__s16 bl_r;
__s16 bl_gr;
__s16 bl_gb;
__s16 bl_b;
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_config_static - Shading config static
*
* @grid: shading grid config &ipu3_uapi_shd_grid_config
* @general: shading general config &ipu3_uapi_shd_general_config
* @black_level: black level config for shading correction as defined by
* &ipu3_uapi_shd_black_level_config
*/
struct ipu3_uapi_shd_config_static {
struct ipu3_uapi_shd_grid_config grid;
struct ipu3_uapi_shd_general_config general;
struct ipu3_uapi_shd_black_level_config black_level;
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_lut - Shading gain factor lookup table.
*
* @sets: array
* @sets.r_and_gr: Red and GreenR Lookup table.
* @sets.r_and_gr.r: Red shading factor.
* @sets.r_and_gr.gr: GreenR shading factor.
* @sets.reserved1: reserved
* @sets.gb_and_b: GreenB and Blue Lookup table.
* @sets.gb_and_b.gb: GreenB shading factor.
* @sets.gb_and_b.b: Blue shading factor.
* @sets.reserved2: reserved
*
* Map to shading correction LUT register set.
*/
struct ipu3_uapi_shd_lut {
struct {
struct {
__u16 r;
__u16 gr;
} r_and_gr[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
__u8 reserved1[24];
struct {
__u16 gb;
__u16 b;
} gb_and_b[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
__u8 reserved2[24];
} sets[IPU3_UAPI_SHD_MAX_CFG_SETS];
} __attribute__((packed));
/**
* struct ipu3_uapi_shd_config - Shading config
*
* @shd: shading static config, see &ipu3_uapi_shd_config_static
* @shd_lut: shading lookup table &ipu3_uapi_shd_lut
*/
struct ipu3_uapi_shd_config {
struct ipu3_uapi_shd_config_static shd __attribute__((aligned(32)));
struct ipu3_uapi_shd_lut shd_lut __attribute__((aligned(32)));
} __attribute__((packed));
/* Image Enhancement Filter directed */
/**
* struct ipu3_uapi_iefd_cux2 - IEFd Config Unit 2 parameters
*
* @x0: X0 point of Config Unit, u9.0, default 0.
* @x1: X1 point of Config Unit, u9.0, default 0.
* @a01: Slope A of Config Unit, s4.4, default 0.
* @b01: Slope B, always 0.
*
* Calculate weight for blending directed and non-directed denoise elements
*
* Note:
* Each instance of Config Unit needs X coordinate of n points and
* slope A factor between points calculated by driver based on calibration
* parameters.
*
* All CU inputs are unsigned, they will be converted to signed when written
* to register, i.e. a01 will be written to 9 bit register in s4.4 format.
* The data precision s4.4 means 4 bits for integer parts and 4 bits for the
* fractional part, the first bit indicates positive or negative value.
* For userspace software (commonly the imaging library), the computation for
* the CU slope values should be based on the slope resolution 1/16 (binary
* 0.0001 - the minimal interval value), the slope value range is [-256, +255].
* This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
* &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
*/
struct ipu3_uapi_iefd_cux2 {
__u32 x0:9;
__u32 x1:9;
__u32 a01:9;
__u32 b01:5;
} __attribute__((packed));
/**
* struct ipu3_uapi_iefd_cux6_ed - Calculate power of non-directed sharpening
* element, Config Unit 6 for edge detail (ED).
*
* @x0: X coordinate of point 0, u9.0, default 0.
* @x1: X coordinate of point 1, u9.0, default 0.
* @x2: X coordinate of point 2, u9.0, default 0.
* @reserved0: reserved
* @x3: X coordinate of point 3, u9.0, default 0.
* @x4: X coordinate of point 4, u9.0, default 0.
* @x5: X coordinate of point 5, u9.0, default 0.
* @reserved1: reserved
* @a01: slope A points 01, s4.4, default 0.
* @a12: slope A points 12, s4.4, default 0.
* @a23: slope A points 23, s4.4, default 0.
* @reserved2: reserved
* @a34: slope A points 34, s4.4, default 0.
* @a45: slope A points 45, s4.4, default 0.
* @reserved3: reserved
* @b01: slope B points 01, s4.4, default 0.
* @b12: slope B points 12, s4.4, default 0.
* @b23: slope B points 23, s4.4, default 0.
* @reserved4: reserved
* @b34: slope B points 34, s4.4, default 0.
* @b45: slope B points 45, s4.4, default 0.
* @reserved5: reserved.
*/
struct ipu3_uapi_iefd_cux6_ed {
__u32 x0:9;
__u32 x1:9;
__u32 x2:9;
__u32 reserved0:5;
__u32 x3:9;
__u32 x4:9;
__u32 x5:9;
__u32 reserved1:5;
__u32 a01:9;
__u32 a12:9;
__u32 a23:9;
__u32 reserved2:5;
__u32 a34:9;
__u32 a45:9;
__u32 reserved3:14;
__u32 b01:9;
__u32 b12:9;
__u32 b23:9;
__u32 reserved4:5;
__u32 b34:9;
__u32 b45:9;
__u32 reserved5:14;
} __attribute__((packed));
/**
* struct ipu3_uapi_iefd_cux2_1 - Calculate power of non-directed denoise
* element apply.
* @x0: X0 point of Config Unit, u9.0, default 0.
* @x1: X1 point of Config Unit, u9.0, default 0.
* @a01: Slope A of Config Unit, s4.4, default 0.
* @reserved1: reserved
* @b01: offset B0 of Config Unit, u7.0, default 0.
* @reserved2: reserved
*/
struct ipu3_uapi_iefd_cux2_1 {
__u32 x0:9;
__u32 x1:9;
__u32 a01:9;
__u32 reserved1:5;
__u32 b01:8;
__u32 reserved2:24;
} __attribute__((packed));
/**
* struct ipu3_uapi_iefd_cux4 - Calculate power of non-directed sharpening
* element.
*
* @x0: X0 point of Config Unit, u9.0, default 0.
* @x1: X1 point of Config Unit, u9.0, default 0.
* @x2: X2 point of Config Unit, u9.0, default 0.
* @reserved0: reserved
* @x3: X3 point of Config Unit, u9.0, default 0.
* @a01: Slope A0 of Config Unit, s4.4, default 0.
* @a12: Slope A1 of Config Unit, s4.4, default 0.
* @reserved1: reserved
* @a23: Slope A2 of Config Unit, s4.4, default 0.
* @b01: Offset B0 of Config Unit, s7.0, default 0.
* @b12: Offset B1 of Config Unit, s7.0, default 0.
* @reserved2: reserved
* @b23: Offset B2 of Config Unit, s7.0, default 0.
* @reserved3: reserved
*/
struct ipu3_uapi_iefd_cux4 {
__u32 x0:9;
__u32 x1:9;
__u32 x2:9;
__u32 reserved0:5;
__u32 x3:9;
__u32 a01:9;
__u32 a12:9;
__u32 reserved1:5;
__u32 a23:9;
__u32 b01:8;
__u32 b12:8;
__u32 reserved2:7;
__u32 b23:8;
__u32 reserved3:24;
} __attribute__((packed));
/**
* struct ipu3_uapi_iefd_cux6_rad - Radial Config Unit (CU)
*
* @x0: x0 points of Config Unit radial, u8.0
* @x1: x1 points of Config Unit radial, u8.0
* @x2: x2 points of Config Unit radial, u8.0
* @x3: x3 points of Config Unit radial, u8.0
* @x4: x4 points of Config Unit radial, u8.0
* @x5: x5 points of Config Unit radial, u8.0
* @reserved1: reserved
* @a01: Slope A of Config Unit radial, s7.8
* @a12: Slope A of Config Unit radial, s7.8
* @a23: Slope A of Config Unit radial, s7.8
* @a34: Slope A of Config Unit radial, s7.8
* @a45: Slope A of Config Unit radial, s7.8
* @reserved2: reserved
* @b01: Slope B of Config Unit radial, s9.0
* @b12: Slope B of Config Unit radial, s9.0
* @b23: Slope B of Config Unit radial, s9.0
* @reserved4: reserved
* @b34: Slope B of Config Unit radial, s9.0
* @b45: Slope B of Config Unit radial, s9.0
* @reserved5: reserved
*/
struct ipu3_uapi_iefd_cux6_rad {
__u32 x0:8;
__u32 x1:8;
__u32 x2:8;
__u32 x3:8;
__u32 x4:8;
__u32 x5:8;
__u32 reserved1:16;
__u32 a01:16;
__u32 a12:16;
__u32 a23:16;
__u32 a34:16;
__u32 a45:16;
__u32 reserved2:16;
__u32 b01:10;
__u32 b12:10;
__u32 b23:10;
__u32 reserved4:2;
__u32 b34:10;
__u32 b45:10;
__u32 reserved5:12;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_cfg_units - IEFd Config Units parameters
*
* @cu_1: calculate weight for blending directed and
* non-directed denoise elements. See &ipu3_uapi_iefd_cux2
* @cu_ed: calculate power of non-directed sharpening element, see
* &ipu3_uapi_iefd_cux6_ed
* @cu_3: calculate weight for blending directed and
* non-directed denoise elements. A &ipu3_uapi_iefd_cux2
* @cu_5: calculate power of non-directed denoise element apply, use
* &ipu3_uapi_iefd_cux2_1
* @cu_6: calculate power of non-directed sharpening element. See
* &ipu3_uapi_iefd_cux4
* @cu_7: calculate weight for blending directed and
* non-directed denoise elements. Use &ipu3_uapi_iefd_cux2
* @cu_unsharp: Config Unit of unsharp &ipu3_uapi_iefd_cux4
* @cu_radial: Config Unit of radial &ipu3_uapi_iefd_cux6_rad
* @cu_vssnlm: Config Unit of vssnlm &ipu3_uapi_iefd_cux2
*/
struct ipu3_uapi_yuvp1_iefd_cfg_units {
struct ipu3_uapi_iefd_cux2 cu_1;
struct ipu3_uapi_iefd_cux6_ed cu_ed;
struct ipu3_uapi_iefd_cux2 cu_3;
struct ipu3_uapi_iefd_cux2_1 cu_5;
struct ipu3_uapi_iefd_cux4 cu_6;
struct ipu3_uapi_iefd_cux2 cu_7;
struct ipu3_uapi_iefd_cux4 cu_unsharp;
struct ipu3_uapi_iefd_cux6_rad cu_radial;
struct ipu3_uapi_iefd_cux2 cu_vssnlm;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_config_s - IEFd config
*
* @horver_diag_coeff: Gradient compensation. Compared with vertical /
* horizontal (0 / 90 degree), coefficient of diagonal (45 /
* 135 degree) direction should be corrected by approx.
* 1/sqrt(2).
* @reserved0: reserved
* @clamp_stitch: Slope to stitch between clamped and unclamped edge values
* @reserved1: reserved
* @direct_metric_update: Update coeff for direction metric
* @reserved2: reserved
* @ed_horver_diag_coeff: Radial Coefficient that compensates for
* different distance for vertical/horizontal and
* diagonal gradient calculation (approx. 1/sqrt(2))
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp1_iefd_config_s {
__u32 horver_diag_coeff:7;
__u32 reserved0:1;
__u32 clamp_stitch:6;
__u32 reserved1:2;
__u32 direct_metric_update:5;
__u32 reserved2:3;
__u32 ed_horver_diag_coeff:7;
__u32 reserved3:1;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_control - IEFd control
*
* @iefd_en: Enable IEFd
* @denoise_en: Enable denoise
* @direct_smooth_en: Enable directional smooth
* @rad_en: Enable radial update
* @vssnlm_en: Enable VSSNLM output filter
* @reserved: reserved
*/
struct ipu3_uapi_yuvp1_iefd_control {
__u32 iefd_en:1;
__u32 denoise_en:1;
__u32 direct_smooth_en:1;
__u32 rad_en:1;
__u32 vssnlm_en:1;
__u32 reserved:27;
} __attribute__((packed));
/**
* struct ipu3_uapi_sharp_cfg - Sharpening config
*
* @nega_lmt_txt: Sharpening limit for negative overshoots for texture.
* @reserved0: reserved
* @posi_lmt_txt: Sharpening limit for positive overshoots for texture.
* @reserved1: reserved
* @nega_lmt_dir: Sharpening limit for negative overshoots for direction (edge).
* @reserved2: reserved
* @posi_lmt_dir: Sharpening limit for positive overshoots for direction (edge).
* @reserved3: reserved
*
* Fixed point type u13.0, range [0, 8191].
*/
struct ipu3_uapi_sharp_cfg {
__u32 nega_lmt_txt:13;
__u32 reserved0:19;
__u32 posi_lmt_txt:13;
__u32 reserved1:19;
__u32 nega_lmt_dir:13;
__u32 reserved2:19;
__u32 posi_lmt_dir:13;
__u32 reserved3:19;
} __attribute__((packed));
/**
* struct ipu3_uapi_far_w - Sharpening config for far sub-group
*
* @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64.
* @reserved0: reserved
* @dir_dns: Weight of wide direct denoising, u1.6, range [0, 64], default 0.
* @reserved1: reserved
* @ndir_dns_powr: Power of non-direct denoising,
* Precision u1.6, range [0, 64], default 64.
* @reserved2: reserved
*/
struct ipu3_uapi_far_w {
__u32 dir_shrp:7;
__u32 reserved0:1;
__u32 dir_dns:7;
__u32 reserved1:1;
__u32 ndir_dns_powr:7;
__u32 reserved2:9;
} __attribute__((packed));
/**
* struct ipu3_uapi_unsharp_cfg - Unsharp config
*
* @unsharp_weight: Unsharp mask blending weight.
* u1.6, range [0, 64], default 16.
* 0 - disabled, 64 - use only unsharp.
* @reserved0: reserved
* @unsharp_amount: Unsharp mask amount, u4.5, range [0, 511], default 0.
* @reserved1: reserved
*/
struct ipu3_uapi_unsharp_cfg {
__u32 unsharp_weight:7;
__u32 reserved0:1;
__u32 unsharp_amount:9;
__u32 reserved1:15;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_shrp_cfg - IEFd sharpness config
*
* @cfg: sharpness config &ipu3_uapi_sharp_cfg
* @far_w: wide range config, value as specified by &ipu3_uapi_far_w:
* The 5x5 environment is separated into 2 sub-groups, the 3x3 nearest
* neighbors (8 pixels called Near), and the second order neighborhood
* around them (16 pixels called Far).
* @unshrp_cfg: unsharpness config. &ipu3_uapi_unsharp_cfg
*/
struct ipu3_uapi_yuvp1_iefd_shrp_cfg {
struct ipu3_uapi_sharp_cfg cfg;
struct ipu3_uapi_far_w far_w;
struct ipu3_uapi_unsharp_cfg unshrp_cfg;
} __attribute__((packed));
/**
* struct ipu3_uapi_unsharp_coef0 - Unsharp mask coefficients
*
* @c00: Coeff11, s0.8, range [-255, 255], default 1.
* @c01: Coeff12, s0.8, range [-255, 255], default 5.
* @c02: Coeff13, s0.8, range [-255, 255], default 9.
* @reserved: reserved
*
* Configurable registers for common sharpening support.
*/
struct ipu3_uapi_unsharp_coef0 {
__u32 c00:9;
__u32 c01:9;
__u32 c02:9;
__u32 reserved:5;
} __attribute__((packed));
/**
* struct ipu3_uapi_unsharp_coef1 - Unsharp mask coefficients
*
* @c11: Coeff22, s0.8, range [-255, 255], default 29.
* @c12: Coeff23, s0.8, range [-255, 255], default 55.
* @c22: Coeff33, s0.8, range [-255, 255], default 96.
* @reserved: reserved
*/
struct ipu3_uapi_unsharp_coef1 {
__u32 c11:9;
__u32 c12:9;
__u32 c22:9;
__u32 reserved:5;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_unshrp_cfg - Unsharp mask config
*
* @unsharp_coef0: unsharp coefficient 0 config. See &ipu3_uapi_unsharp_coef0
* @unsharp_coef1: unsharp coefficient 1 config. See &ipu3_uapi_unsharp_coef1
*/
struct ipu3_uapi_yuvp1_iefd_unshrp_cfg {
struct ipu3_uapi_unsharp_coef0 unsharp_coef0;
struct ipu3_uapi_unsharp_coef1 unsharp_coef1;
} __attribute__((packed));
/**
* struct ipu3_uapi_radial_reset_xy - Radial coordinate reset
*
* @x: Radial reset of x coordinate. Precision s12, [-4095, 4095], default 0.
* @reserved0: reserved
* @y: Radial center y coordinate. Precision s12, [-4095, 4095], default 0.
* @reserved1: reserved
*/
struct ipu3_uapi_radial_reset_xy {
__s32 x:13;
__u32 reserved0:3;
__s32 y:13;
__u32 reserved1:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_radial_reset_x2 - Radial X^2 reset
*
* @x2: Radial reset of x^2 coordinate. Precision u24, default 0.
* @reserved: reserved
*/
struct ipu3_uapi_radial_reset_x2 {
__u32 x2:24;
__u32 reserved:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_radial_reset_y2 - Radial Y^2 reset
*
* @y2: Radial reset of y^2 coordinate. Precision u24, default 0.
* @reserved: reserved
*/
struct ipu3_uapi_radial_reset_y2 {
__u32 y2:24;
__u32 reserved:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_radial_cfg - Radial config
*
* @rad_nf: Radial. R^2 normalization factor is scale down by 2^ - (15 + scale)
* @reserved0: reserved
* @rad_inv_r2: Radial R^-2 normelized to (0.5..1).
* Precision u7, range [0, 127].
* @reserved1: reserved
*/
struct ipu3_uapi_radial_cfg {
__u32 rad_nf:4;
__u32 reserved0:4;
__u32 rad_inv_r2:7;
__u32 reserved1:17;
} __attribute__((packed));
/**
* struct ipu3_uapi_rad_far_w - Radial FAR sub-group
*
* @rad_dir_far_sharp_w: Weight of wide direct sharpening, u1.6, range [0, 64],
* default 64.
* @rad_dir_far_dns_w: Weight of wide direct denoising, u1.6, range [0, 64],
* default 0.
* @rad_ndir_far_dns_power: power of non-direct sharpening, u1.6, range [0, 64],
* default 0.
* @reserved: reserved
*/
struct ipu3_uapi_rad_far_w {
__u32 rad_dir_far_sharp_w:8;
__u32 rad_dir_far_dns_w:8;
__u32 rad_ndir_far_dns_power:8;
__u32 reserved:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_cu_cfg0 - Radius Config Unit cfg0 register
*
* @cu6_pow: Power of CU6. Power of non-direct sharpening, u3.4.
* @reserved0: reserved
* @cu_unsharp_pow: Power of unsharp mask, u2.4.
* @reserved1: reserved
* @rad_cu6_pow: Radial/corner CU6. Directed sharpening power, u3.4.
* @reserved2: reserved
* @rad_cu_unsharp_pow: Radial power of unsharp mask, u2.4.
* @reserved3: reserved
*/
struct ipu3_uapi_cu_cfg0 {
__u32 cu6_pow:7;
__u32 reserved0:1;
__u32 cu_unsharp_pow:7;
__u32 reserved1:1;
__u32 rad_cu6_pow:7;
__u32 reserved2:1;
__u32 rad_cu_unsharp_pow:6;
__u32 reserved3:2;
} __attribute__((packed));
/**
* struct ipu3_uapi_cu_cfg1 - Radius Config Unit cfg1 register
*
* @rad_cu6_x1: X1 point of Config Unit 6, precision u9.0.
* @reserved0: reserved
* @rad_cu_unsharp_x1: X1 point for Config Unit unsharp for radial/corner point
* precision u9.0.
* @reserved1: reserved
*/
struct ipu3_uapi_cu_cfg1 {
__u32 rad_cu6_x1:9;
__u32 reserved0:1;
__u32 rad_cu_unsharp_x1:9;
__u32 reserved1:13;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_rad_cfg - IEFd parameters changed radially over
* the picture plane.
*
* @reset_xy: reset xy value in radial calculation. &ipu3_uapi_radial_reset_xy
* @reset_x2: reset x square value in radial calculation. See struct
* &ipu3_uapi_radial_reset_x2
* @reset_y2: reset y square value in radial calculation. See struct
* &ipu3_uapi_radial_reset_y2
* @cfg: radial config defined in &ipu3_uapi_radial_cfg
* @rad_far_w: weight for wide range radial. &ipu3_uapi_rad_far_w
* @cu_cfg0: configuration unit 0. See &ipu3_uapi_cu_cfg0
* @cu_cfg1: configuration unit 1. See &ipu3_uapi_cu_cfg1
*/
struct ipu3_uapi_yuvp1_iefd_rad_cfg {
struct ipu3_uapi_radial_reset_xy reset_xy;
struct ipu3_uapi_radial_reset_x2 reset_x2;
struct ipu3_uapi_radial_reset_y2 reset_y2;
struct ipu3_uapi_radial_cfg cfg;
struct ipu3_uapi_rad_far_w rad_far_w;
struct ipu3_uapi_cu_cfg0 cu_cfg0;
struct ipu3_uapi_cu_cfg1 cu_cfg1;
} __attribute__((packed));
/* Vssnlm - Very small scale non-local mean algorithm */
/**
* struct ipu3_uapi_vss_lut_x - Vssnlm LUT x0/x1/x2
*
* @vs_x0: Vssnlm LUT x0, precision u8, range [0, 255], default 16.
* @vs_x1: Vssnlm LUT x1, precision u8, range [0, 255], default 32.
* @vs_x2: Vssnlm LUT x2, precision u8, range [0, 255], default 64.
* @reserved2: reserved
*/
struct ipu3_uapi_vss_lut_x {
__u32 vs_x0:8;
__u32 vs_x1:8;
__u32 vs_x2:8;
__u32 reserved2:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_vss_lut_y - Vssnlm LUT y0/y1/y2
*
* @vs_y1: Vssnlm LUT y1, precision u4, range [0, 8], default 1.
* @reserved0: reserved
* @vs_y2: Vssnlm LUT y2, precision u4, range [0, 8], default 3.
* @reserved1: reserved
* @vs_y3: Vssnlm LUT y3, precision u4, range [0, 8], default 8.
* @reserved2: reserved
*/
struct ipu3_uapi_vss_lut_y {
__u32 vs_y1:4;
__u32 reserved0:4;
__u32 vs_y2:4;
__u32 reserved1:4;
__u32 vs_y3:4;
__u32 reserved2:12;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg - IEFd Vssnlm Lookup table
*
* @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description
* @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description
*/
struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg {
struct ipu3_uapi_vss_lut_x vss_lut_x;
struct ipu3_uapi_vss_lut_y vss_lut_y;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_iefd_config - IEFd config
*
* @units: configuration unit setting, &ipu3_uapi_yuvp1_iefd_cfg_units
* @config: configuration, as defined by &ipu3_uapi_yuvp1_iefd_config_s
* @control: control setting, as defined by &ipu3_uapi_yuvp1_iefd_control
* @sharp: sharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_shrp_cfg
* @unsharp: unsharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_unshrp_cfg
* @rad: radial setting, as defined by &ipu3_uapi_yuvp1_iefd_rad_cfg
* @vsslnm: vsslnm setting, as defined by &ipu3_uapi_yuvp1_iefd_vssnlm_cfg
*/
struct ipu3_uapi_yuvp1_iefd_config {
struct ipu3_uapi_yuvp1_iefd_cfg_units units;
struct ipu3_uapi_yuvp1_iefd_config_s config;
struct ipu3_uapi_yuvp1_iefd_control control;
struct ipu3_uapi_yuvp1_iefd_shrp_cfg sharp;
struct ipu3_uapi_yuvp1_iefd_unshrp_cfg unsharp;
struct ipu3_uapi_yuvp1_iefd_rad_cfg rad;
struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg vsslnm;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_yds_config - Y Down-Sampling config
*
* @c00: range [0, 3], default 0x0
* @c01: range [0, 3], default 0x1
* @c02: range [0, 3], default 0x1
* @c03: range [0, 3], default 0x0
* @c10: range [0, 3], default 0x0
* @c11: range [0, 3], default 0x1
* @c12: range [0, 3], default 0x1
* @c13: range [0, 3], default 0x0
*
* Above are 4x2 filter coefficients for chroma output downscaling.
*
* @norm_factor: Normalization factor, range [0, 4], default 2
* 0 - divide by 1
* 1 - divide by 2
* 2 - divide by 4
* 3 - divide by 8
* 4 - divide by 16
* @reserved0: reserved
* @bin_output: Down sampling on Luma channel in two optional modes
* 0 - Bin output 4.2.0 (default), 1 output 4.2.2.
* @reserved1: reserved
*/
struct ipu3_uapi_yuvp1_yds_config {
__u32 c00:2;
__u32 c01:2;
__u32 c02:2;
__u32 c03:2;
__u32 c10:2;
__u32 c11:2;
__u32 c12:2;
__u32 c13:2;
__u32 norm_factor:5;
__u32 reserved0:4;
__u32 bin_output:1;
__u32 reserved1:6;
} __attribute__((packed));
/* Chroma Noise Reduction */
/**
* struct ipu3_uapi_yuvp1_chnr_enable_config - Chroma noise reduction enable
*
* @enable: enable/disable chroma noise reduction
* @yuv_mode: 0 - YUV420, 1 - YUV422
* @reserved0: reserved
* @col_size: number of columns in the frame, max width is 2560
* @reserved1: reserved
*/
struct ipu3_uapi_yuvp1_chnr_enable_config {
__u32 enable:1;
__u32 yuv_mode:1;
__u32 reserved0:14;
__u32 col_size:12;
__u32 reserved1:4;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_chnr_coring_config - Coring thresholds for UV
*
* @u: U coring level, u0.13, range [0.0, 1.0], default 0.0
* @reserved0: reserved
* @v: V coring level, u0.13, range [0.0, 1.0], default 0.0
* @reserved1: reserved
*/
struct ipu3_uapi_yuvp1_chnr_coring_config {
__u32 u:13;
__u32 reserved0:3;
__u32 v:13;
__u32 reserved1:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_chnr_sense_gain_config - Chroma noise reduction gains
*
* All sensitivity gain parameters have precision u13.0, range [0, 8191].
*
* @vy: Sensitivity of horizontal edge of Y, default 100
* @vu: Sensitivity of horizontal edge of U, default 100
* @vv: Sensitivity of horizontal edge of V, default 100
* @reserved0: reserved
* @hy: Sensitivity of vertical edge of Y, default 50
* @hu: Sensitivity of vertical edge of U, default 50
* @hv: Sensitivity of vertical edge of V, default 50
* @reserved1: reserved
*/
struct ipu3_uapi_yuvp1_chnr_sense_gain_config {
__u32 vy:8;
__u32 vu:8;
__u32 vv:8;
__u32 reserved0:8;
__u32 hy:8;
__u32 hu:8;
__u32 hv:8;
__u32 reserved1:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_chnr_iir_fir_config - Chroma IIR/FIR filter config
*
* @fir_0h: Value of center tap in horizontal FIR, range [0, 32], default 8.
* @reserved0: reserved
* @fir_1h: Value of distance 1 in horizontal FIR, range [0, 32], default 12.
* @reserved1: reserved
* @fir_2h: Value of distance 2 tap in horizontal FIR, range [0, 32], default 0.
* @dalpha_clip_val: weight for previous row in IIR, range [1, 256], default 0.
* @reserved2: reserved
*/
struct ipu3_uapi_yuvp1_chnr_iir_fir_config {
__u32 fir_0h:6;
__u32 reserved0:2;
__u32 fir_1h:6;
__u32 reserved1:2;
__u32 fir_2h:6;
__u32 dalpha_clip_val:9;
__u32 reserved2:1;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_chnr_config - Chroma noise reduction config
*
* @enable: chroma noise reduction enable, see
* &ipu3_uapi_yuvp1_chnr_enable_config
* @coring: coring config for chroma noise reduction, see
* &ipu3_uapi_yuvp1_chnr_coring_config
* @sense_gain: sensitivity config for chroma noise reduction, see
* ipu3_uapi_yuvp1_chnr_sense_gain_config
* @iir_fir: iir and fir config for chroma noise reduction, see
* ipu3_uapi_yuvp1_chnr_iir_fir_config
*/
struct ipu3_uapi_yuvp1_chnr_config {
struct ipu3_uapi_yuvp1_chnr_enable_config enable;
struct ipu3_uapi_yuvp1_chnr_coring_config coring;
struct ipu3_uapi_yuvp1_chnr_sense_gain_config sense_gain;
struct ipu3_uapi_yuvp1_chnr_iir_fir_config iir_fir;
} __attribute__((packed));
/* Edge Enhancement and Noise Reduction */
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config - Luma(Y) edge enhancement low-pass
* filter coefficients
*
* @a_diag: Smoothing diagonal coefficient, u5.0.
* @reserved0: reserved
* @a_periph: Image smoothing perpherial, u5.0.
* @reserved1: reserved
* @a_cent: Image Smoothing center coefficient, u5.0.
* @reserved2: reserved
* @enable: 0: Y_EE_NR disabled, output = input; 1: Y_EE_NR enabled.
*/
struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config {
__u32 a_diag:5;
__u32 reserved0:3;
__u32 a_periph:5;
__u32 reserved1:3;
__u32 a_cent:5;
__u32 reserved2:9;
__u32 enable:1;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_sense_config - Luma(Y) edge enhancement
* noise reduction sensitivity gains
*
* @edge_sense_0: Sensitivity of edge in dark area. u13.0, default 8191.
* @reserved0: reserved
* @delta_edge_sense: Difference in the sensitivity of edges between
* the bright and dark areas. u13.0, default 0.
* @reserved1: reserved
* @corner_sense_0: Sensitivity of corner in dark area. u13.0, default 0.
* @reserved2: reserved
* @delta_corner_sense: Difference in the sensitivity of corners between
* the bright and dark areas. u13.0, default 8191.
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp1_y_ee_nr_sense_config {
__u32 edge_sense_0:13;
__u32 reserved0:3;
__u32 delta_edge_sense:13;
__u32 reserved1:3;
__u32 corner_sense_0:13;
__u32 reserved2:3;
__u32 delta_corner_sense:13;
__u32 reserved3:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_gain_config - Luma(Y) edge enhancement
* noise reduction gain config
*
* @gain_pos_0: Gain for positive edge in dark area. u5.0, [0, 16], default 2.
* @reserved0: reserved
* @delta_gain_posi: Difference in the gain of edges between the bright and
* dark areas for positive edges. u5.0, [0, 16], default 0.
* @reserved1: reserved
* @gain_neg_0: Gain for negative edge in dark area. u5.0, [0, 16], default 8.
* @reserved2: reserved
* @delta_gain_neg: Difference in the gain of edges between the bright and
* dark areas for negative edges. u5.0, [0, 16], default 0.
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp1_y_ee_nr_gain_config {
__u32 gain_pos_0:5;
__u32 reserved0:3;
__u32 delta_gain_posi:5;
__u32 reserved1:3;
__u32 gain_neg_0:5;
__u32 reserved2:3;
__u32 delta_gain_neg:5;
__u32 reserved3:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_clip_config - Luma(Y) edge enhancement
* noise reduction clipping config
*
* @clip_pos_0: Limit of positive edge in dark area
* u5, value [0, 16], default 8.
* @reserved0: reserved
* @delta_clip_posi: Difference in the limit of edges between the bright
* and dark areas for positive edges.
* u5, value [0, 16], default 8.
* @reserved1: reserved
* @clip_neg_0: Limit of negative edge in dark area
* u5, value [0, 16], default 8.
* @reserved2: reserved
* @delta_clip_neg: Difference in the limit of edges between the bright
* and dark areas for negative edges.
* u5, value [0, 16], default 8.
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp1_y_ee_nr_clip_config {
__u32 clip_pos_0:5;
__u32 reserved0:3;
__u32 delta_clip_posi:5;
__u32 reserved1:3;
__u32 clip_neg_0:5;
__u32 reserved2:3;
__u32 delta_clip_neg:5;
__u32 reserved3:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_frng_config - Luma(Y) edge enhancement
* noise reduction fringe config
*
* @gain_exp: Common exponent of gains, u4, [0, 8], default 2.
* @reserved0: reserved
* @min_edge: Threshold for edge and smooth stitching, u13.
* @reserved1: reserved
* @lin_seg_param: Power of LinSeg, u4.
* @reserved2: reserved
* @t1: Parameter for enabling/disabling the edge enhancement, u1.0, [0, 1],
* default 1.
* @t2: Parameter for enabling/disabling the smoothing, u1.0, [0, 1],
* default 1.
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp1_y_ee_nr_frng_config {
__u32 gain_exp:4;
__u32 reserved0:28;
__u32 min_edge:13;
__u32 reserved1:3;
__u32 lin_seg_param:4;
__u32 reserved2:4;
__u32 t1:1;
__u32 t2:1;
__u32 reserved3:6;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_diag_config - Luma(Y) edge enhancement
* noise reduction diagonal config
*
* @diag_disc_g: Coefficient that prioritize diagonal edge direction on
* horizontal or vertical for final enhancement.
* u4.0, [1, 15], default 1.
* @reserved0: reserved
* @hvw_hor: Weight of horizontal/vertical edge enhancement for hv edge.
* u2.2, [1, 15], default 4.
* @dw_hor: Weight of diagonal edge enhancement for hv edge.
* u2.2, [1, 15], default 1.
* @hvw_diag: Weight of horizontal/vertical edge enhancement for diagonal edge.
* u2.2, [1, 15], default 1.
* @dw_diag: Weight of diagonal edge enhancement for diagonal edge.
* u2.2, [1, 15], default 4.
* @reserved1: reserved
*/
struct ipu3_uapi_yuvp1_y_ee_nr_diag_config {
__u32 diag_disc_g:4;
__u32 reserved0:4;
__u32 hvw_hor:4;
__u32 dw_hor:4;
__u32 hvw_diag:4;
__u32 dw_diag:4;
__u32 reserved1:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config - Luma(Y) edge enhancement
* noise reduction false color correction (FCC) coring config
*
* @pos_0: Gain for positive edge in dark, u13.0, [0, 16], default 0.
* @reserved0: reserved
* @pos_delta: Gain for positive edge in bright, value: pos_0 + pos_delta <=16
* u13.0, default 0.
* @reserved1: reserved
* @neg_0: Gain for negative edge in dark area, u13.0, range [0, 16], default 0.
* @reserved2: reserved
* @neg_delta: Gain for negative edge in bright area. neg_0 + neg_delta <=16
* u13.0, default 0.
* @reserved3: reserved
*
* Coring is a simple soft thresholding technique.
*/
struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config {
__u32 pos_0:13;
__u32 reserved0:3;
__u32 pos_delta:13;
__u32 reserved1:3;
__u32 neg_0:13;
__u32 reserved2:3;
__u32 neg_delta:13;
__u32 reserved3:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp1_y_ee_nr_config - Edge enhancement and noise reduction
*
* @lpf: low-pass filter config. See &ipu3_uapi_yuvp1_y_ee_nr_lpf_config
* @sense: sensitivity config. See &ipu3_uapi_yuvp1_y_ee_nr_sense_config
* @gain: gain config as defined in &ipu3_uapi_yuvp1_y_ee_nr_gain_config
* @clip: clip config as defined in &ipu3_uapi_yuvp1_y_ee_nr_clip_config
* @frng: fringe config as defined in &ipu3_uapi_yuvp1_y_ee_nr_frng_config
* @diag: diagonal edge config. See &ipu3_uapi_yuvp1_y_ee_nr_diag_config
* @fc_coring: coring config for fringe control. See
* &ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config
*/
struct ipu3_uapi_yuvp1_y_ee_nr_config {
struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config lpf;
struct ipu3_uapi_yuvp1_y_ee_nr_sense_config sense;
struct ipu3_uapi_yuvp1_y_ee_nr_gain_config gain;
struct ipu3_uapi_yuvp1_y_ee_nr_clip_config clip;
struct ipu3_uapi_yuvp1_y_ee_nr_frng_config frng;
struct ipu3_uapi_yuvp1_y_ee_nr_diag_config diag;
struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config fc_coring;
} __attribute__((packed));
/* Total Color Correction */
/**
* struct ipu3_uapi_yuvp2_tcc_gen_control_static_config - Total color correction
* general control config
*
* @en: 0 - TCC disabled. Output = input 1 - TCC enabled.
* @blend_shift: blend shift, Range[3, 4], default NA.
* @gain_according_to_y_only: 0: Gain is calculated according to YUV,
* 1: Gain is calculated according to Y only
* @reserved0: reserved
* @gamma: Final blending coefficients. Values[-16, 16], default NA.
* @reserved1: reserved
* @delta: Final blending coefficients. Values[-16, 16], default NA.
* @reserved2: reserved
*/
struct ipu3_uapi_yuvp2_tcc_gen_control_static_config {
__u32 en:1;
__u32 blend_shift:3;
__u32 gain_according_to_y_only:1;
__u32 reserved0:11;
__s32 gamma:5;
__u32 reserved1:3;
__s32 delta:5;
__u32 reserved2:3;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config - Total color correction
* multi-axis color control (MACC) config
*
* @a: a coefficient for 2x2 MACC conversion matrix.
* @reserved0: reserved
* @b: b coefficient 2x2 MACC conversion matrix.
* @reserved1: reserved
* @c: c coefficient for 2x2 MACC conversion matrix.
* @reserved2: reserved
* @d: d coefficient for 2x2 MACC conversion matrix.
* @reserved3: reserved
*/
struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config {
__s32 a:12;
__u32 reserved0:4;
__s32 b:12;
__u32 reserved1:4;
__s32 c:12;
__u32 reserved2:4;
__s32 d:12;
__u32 reserved3:4;
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_macc_table_static_config - Total color correction
* multi-axis color control (MACC) table array
*
* @entries: config for multi axis color correction, as specified by
* &ipu3_uapi_yuvp2_tcc_macc_elem_static_config
*/
struct ipu3_uapi_yuvp2_tcc_macc_table_static_config {
struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config
entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS];
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config - Total color correction
* inverse y lookup table
*
* @entries: lookup table for inverse y estimation, and use it to estimate the
* ratio between luma and chroma. Chroma by approximate the absolute
* value of the radius on the chroma plane (R = sqrt(u^2+v^2) ) and
* luma by approximate by 1/Y.
*/
struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config {
__u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS];
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config - Total color
* correction lookup table for PCWL
*
* @entries: lookup table for gain piece wise linear transformation (PCWL)
*/
struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config {
__u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS];
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config - Total color correction
* lookup table for r square root
*
* @entries: lookup table for r square root estimation
*/
struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config {
__s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS];
} __attribute__((packed));
/**
* struct ipu3_uapi_yuvp2_tcc_static_config- Total color correction static
*
* @gen_control: general config for Total Color Correction
* @macc_table: config for multi axis color correction
* @inv_y_lut: lookup table for inverse y estimation
* @gain_pcwl: lookup table for gain PCWL
* @r_sqr_lut: lookup table for r square root estimation.
*/
struct ipu3_uapi_yuvp2_tcc_static_config {
struct ipu3_uapi_yuvp2_tcc_gen_control_static_config gen_control;
struct ipu3_uapi_yuvp2_tcc_macc_table_static_config macc_table;
struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config inv_y_lut;
struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config gain_pcwl;
struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config r_sqr_lut;
} __attribute__((packed));
/* Advanced Noise Reduction related structs */
/*
* struct ipu3_uapi_anr_alpha - Advanced noise reduction alpha
*
* Tunable parameters that are subject to modification according to the
* total gain used.
*/
struct ipu3_uapi_anr_alpha {
__u16 gr;
__u16 r;
__u16 b;
__u16 gb;
__u16 dc_gr;
__u16 dc_r;
__u16 dc_b;
__u16 dc_gb;
} __attribute__((packed));
/*
* struct ipu3_uapi_anr_beta - Advanced noise reduction beta
*
* Tunable parameters that are subject to modification according to the
* total gain used.
*/
struct ipu3_uapi_anr_beta {
__u16 beta_gr;
__u16 beta_r;
__u16 beta_b;
__u16 beta_gb;
} __attribute__((packed));
/*
* struct ipu3_uapi_anr_plane_color - Advanced noise reduction per plane R, Gr,
* Gb and B register settings
*
* Tunable parameters that are subject to modification according to the
* total gain used.
*/
struct ipu3_uapi_anr_plane_color {
__u16 reg_w_gr[16];
__u16 reg_w_r[16];
__u16 reg_w_b[16];
__u16 reg_w_gb[16];
} __attribute__((packed));
/**
* struct ipu3_uapi_anr_transform_config - Advanced noise reduction transform
*
* @enable: advanced noise reduction enabled.
* @adaptive_treshhold_en: On IPU3, adaptive threshold is always enabled.
* @reserved1: reserved
* @reserved2: reserved
* @alpha: using following defaults:
* 13, 13, 13, 13, 0, 0, 0, 0
* 11, 11, 11, 11, 0, 0, 0, 0
* 14, 14, 14, 14, 0, 0, 0, 0
* @beta: use following defaults:
* 24, 24, 24, 24
* 21, 20, 20, 21
* 25, 25, 25, 25
* @color: use defaults defined in driver/media/pci/intel/ipu3-tables.c
* @sqrt_lut: 11 bits per element, values =
* [724 768 810 849 887
* 923 958 991 1024 1056
* 1116 1145 1173 1201 1086
* 1228 1254 1280 1305 1330
* 1355 1379 1402 1425 1448]
* @xreset: Reset value of X for r^2 calculation Value: col_start-X_center
* Constraint: Xreset + FrameWdith=4095 Xreset= -4095, default -1632.
* @reserved3: reserved
* @yreset: Reset value of Y for r^2 calculation Value: row_start-Y_center
* Constraint: Yreset + FrameHeight=4095 Yreset= -4095, default -1224.
* @reserved4: reserved
* @x_sqr_reset: Reset value of X^2 for r^2 calculation Value = (Xreset)^2
* @r_normfactor: Normalization factor for R. Default 14.
* @reserved5: reserved
* @y_sqr_reset: Reset value of Y^2 for r^2 calculation Value = (Yreset)^2
* @gain_scale: Parameter describing shading gain as a function of distance
* from the image center.
* A single value per frame, loaded by the driver. Default 115.
*/
struct ipu3_uapi_anr_transform_config {
__u32 enable:1; /* 0 or 1, disabled or enabled */
__u32 adaptive_treshhold_en:1; /* On IPU3, always enabled */
__u32 reserved1:30;
__u8 reserved2[44];
struct ipu3_uapi_anr_alpha alpha[3];
struct ipu3_uapi_anr_beta beta[3];
struct ipu3_uapi_anr_plane_color color[3];
__u16 sqrt_lut[IPU3_UAPI_ANR_LUT_SIZE]; /* 11 bits per element */
__s16 xreset:13;
__u16 reserved3:3;
__s16 yreset:13;
__u16 reserved4:3;
__u32 x_sqr_reset:24;
__u32 r_normfactor:5;
__u32 reserved5:3;
__u32 y_sqr_reset:24;
__u32 gain_scale:8;
} __attribute__((packed));
/**
* struct ipu3_uapi_anr_stitch_pyramid - ANR stitch pyramid
*
* @entry0: pyramid LUT entry0, range [0x0, 0x3f]
* @entry1: pyramid LUT entry1, range [0x0, 0x3f]
* @entry2: pyramid LUT entry2, range [0x0, 0x3f]
* @reserved: reserved
*/
struct ipu3_uapi_anr_stitch_pyramid {
__u32 entry0:6;
__u32 entry1:6;
__u32 entry2:6;
__u32 reserved:14;
} __attribute__((packed));
/**
* struct ipu3_uapi_anr_stitch_config - ANR stitch config
*
* @anr_stitch_en: enable stitch. Enabled with 1.
* @reserved: reserved
* @pyramid: pyramid table as defined by &ipu3_uapi_anr_stitch_pyramid
* default values:
* { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
* { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
* { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
* { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
* { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
* { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
* { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
*/
struct ipu3_uapi_anr_stitch_config {
__u32 anr_stitch_en;
__u8 reserved[44];
struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
} __attribute__((packed));
/**
* struct ipu3_uapi_anr_config - ANR config
*
* @transform: advanced noise reduction transform config as specified by
* &ipu3_uapi_anr_transform_config
* @stitch: create 4x4 patch from 4 surrounding 8x8 patches.
*/
struct ipu3_uapi_anr_config {
struct ipu3_uapi_anr_transform_config transform __attribute__((aligned(32)));
struct ipu3_uapi_anr_stitch_config stitch __attribute__((aligned(32)));
} __attribute__((packed));
/**
* struct ipu3_uapi_acc_param - Accelerator cluster parameters
*
* ACC refers to the HW cluster containing all Fixed Functions (FFs). Each FF
* implements a specific algorithm.
*
* @bnr: parameters for bayer noise reduction static config. See
* &ipu3_uapi_bnr_static_config
* @green_disparity: disparity static config between gr and gb channel.
* See &ipu3_uapi_bnr_static_config_green_disparity
* @dm: de-mosaic config. See &ipu3_uapi_dm_config
* @ccm: color correction matrix. See &ipu3_uapi_ccm_mat_config
* @gamma: gamma correction config. See &ipu3_uapi_gamma_config
* @csc: color space conversion matrix. See &ipu3_uapi_csc_mat_config
* @cds: color down sample config. See &ipu3_uapi_cds_params
* @shd: lens shading correction config. See &ipu3_uapi_shd_config
* @iefd: Image enhancement filter and denoise config.
* &ipu3_uapi_yuvp1_iefd_config
* @yds_c0: y down scaler config. &ipu3_uapi_yuvp1_yds_config
* @chnr_c0: chroma noise reduction config. &ipu3_uapi_yuvp1_chnr_config
* @y_ee_nr: y edge enhancement and noise reduction config.
* &ipu3_uapi_yuvp1_y_ee_nr_config
* @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
* @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
* @tcc: total color correction config as defined in struct
* &ipu3_uapi_yuvp2_tcc_static_config
* @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
* @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
* @ae: auto exposure config As specified by &ipu3_uapi_ae_config
* @af: auto focus config. As specified by &ipu3_uapi_af_config
* @awb: auto white balance config. As specified by &ipu3_uapi_awb_config
*/
struct ipu3_uapi_acc_param {
struct ipu3_uapi_bnr_static_config bnr;
struct ipu3_uapi_bnr_static_config_green_disparity
green_disparity __attribute__((aligned(32)));
struct ipu3_uapi_dm_config dm __attribute__((aligned(32)));
struct ipu3_uapi_ccm_mat_config ccm __attribute__((aligned(32)));
struct ipu3_uapi_gamma_config gamma __attribute__((aligned(32)));
struct ipu3_uapi_csc_mat_config csc __attribute__((aligned(32)));
struct ipu3_uapi_cds_params cds __attribute__((aligned(32)));
struct ipu3_uapi_shd_config shd __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_iefd_config iefd __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_yds_config yds_c0 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_yds_config yds __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_chnr_config chnr __attribute__((aligned(32)));
struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
struct ipu3_uapi_anr_config anr;
struct ipu3_uapi_awb_fr_config_s awb_fr;
struct ipu3_uapi_ae_config ae;
struct ipu3_uapi_af_config_s af;
struct ipu3_uapi_awb_config awb;
} __attribute__((packed));
/**
* struct ipu3_uapi_isp_lin_vmem_params - Linearization parameters
*
* @lin_lutlow_gr: linearization look-up table for GR channel interpolation.
* @lin_lutlow_r: linearization look-up table for R channel interpolation.
* @lin_lutlow_b: linearization look-up table for B channel interpolation.
* @lin_lutlow_gb: linearization look-up table for GB channel interpolation.
* lin_lutlow_gr / lin_lutlow_r / lin_lutlow_b /
* lin_lutlow_gb <= LIN_MAX_VALUE - 1.
* @lin_lutdif_gr: lin_lutlow_gr[i+1] - lin_lutlow_gr[i].
* @lin_lutdif_r: lin_lutlow_r[i+1] - lin_lutlow_r[i].
* @lin_lutdif_b: lin_lutlow_b[i+1] - lin_lutlow_b[i].
* @lin_lutdif_gb: lin_lutlow_gb[i+1] - lin_lutlow_gb[i].
*/
struct ipu3_uapi_isp_lin_vmem_params {
__s16 lin_lutlow_gr[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutlow_r[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutlow_b[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutlow_gb[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutdif_gr[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutdif_r[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutdif_b[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutdif_gb[IPU3_UAPI_LIN_LUT_SIZE];
} __attribute__((packed));
/* Temporal Noise Reduction */
/**
* struct ipu3_uapi_isp_tnr3_vmem_params - Temporal noise reduction vector
* memory parameters
*
* @slope: slope setting in interpolation curve for temporal noise reduction.
* @reserved1: reserved
* @sigma: knee point setting in interpolation curve for temporal
* noise reduction.
* @reserved2: reserved
*/
struct ipu3_uapi_isp_tnr3_vmem_params {
__u16 slope[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
__u16 reserved1[IPU3_UAPI_ISP_VEC_ELEMS
- IPU3_UAPI_ISP_TNR3_VMEM_LEN];
__u16 sigma[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
__u16 reserved2[IPU3_UAPI_ISP_VEC_ELEMS
- IPU3_UAPI_ISP_TNR3_VMEM_LEN];
} __attribute__((packed));
/**
* struct ipu3_uapi_isp_tnr3_params - Temporal noise reduction v3 parameters
*
* @knee_y1: Knee point TNR3 assumes standard deviation of Y,U and
* V at Y1 are TnrY1_Sigma_Y, U and V.
* @knee_y2: Knee point TNR3 assumes standard deviation of Y,U and
* V at Y2 are TnrY2_Sigma_Y, U and V.
* @maxfb_y: Max feedback gain for Y
* @maxfb_u: Max feedback gain for U
* @maxfb_v: Max feedback gain for V
* @round_adj_y: rounding Adjust for Y
* @round_adj_u: rounding Adjust for U
* @round_adj_v: rounding Adjust for V
* @ref_buf_select: selection of the reference frame buffer to be used.
*/
struct ipu3_uapi_isp_tnr3_params {
__u32 knee_y1;
__u32 knee_y2;
__u32 maxfb_y;
__u32 maxfb_u;
__u32 maxfb_v;
__u32 round_adj_y;
__u32 round_adj_u;
__u32 round_adj_v;
__u32 ref_buf_select;
} __attribute__((packed));
/* Extreme Noise Reduction version 3 */
/**
* struct ipu3_uapi_isp_xnr3_vmem_params - Extreme noise reduction v3
* vector memory parameters
*
* @x: xnr3 parameters.
* @a: xnr3 parameters.
* @b: xnr3 parameters.
* @c: xnr3 parameters.
*/
struct ipu3_uapi_isp_xnr3_vmem_params {
__u16 x[IPU3_UAPI_ISP_VEC_ELEMS];
__u16 a[IPU3_UAPI_ISP_VEC_ELEMS];
__u16 b[IPU3_UAPI_ISP_VEC_ELEMS];
__u16 c[IPU3_UAPI_ISP_VEC_ELEMS];
} __attribute__((packed));
/**
* struct ipu3_uapi_xnr3_alpha_params - Extreme noise reduction v3
* alpha tuning parameters
*
* @y0: Sigma for Y range similarity in dark area.
* @u0: Sigma for U range similarity in dark area.
* @v0: Sigma for V range similarity in dark area.
* @ydiff: Sigma difference for Y between bright area and dark area.
* @udiff: Sigma difference for U between bright area and dark area.
* @vdiff: Sigma difference for V between bright area and dark area.
*/
struct ipu3_uapi_xnr3_alpha_params {
__u32 y0;
__u32 u0;
__u32 v0;
__u32 ydiff;
__u32 udiff;
__u32 vdiff;
} __attribute__((packed));
/**
* struct ipu3_uapi_xnr3_coring_params - Extreme noise reduction v3
* coring parameters
*
* @u0: Coring Threshold of U channel in dark area.
* @v0: Coring Threshold of V channel in dark area.
* @udiff: Threshold difference of U channel between bright and dark area.
* @vdiff: Threshold difference of V channel between bright and dark area.
*/
struct ipu3_uapi_xnr3_coring_params {
__u32 u0;
__u32 v0;
__u32 udiff;
__u32 vdiff;
} __attribute__((packed));
/**
* struct ipu3_uapi_xnr3_blending_params - Blending factor
*
* @strength: The factor for blending output with input. This is tuning
* parameterHigher values lead to more aggressive XNR operation.
*/
struct ipu3_uapi_xnr3_blending_params {
__u32 strength;
} __attribute__((packed));
/**
* struct ipu3_uapi_isp_xnr3_params - Extreme noise reduction v3 parameters
*
* @alpha: parameters for xnr3 alpha. See &ipu3_uapi_xnr3_alpha_params
* @coring: parameters for xnr3 coring. See &ipu3_uapi_xnr3_coring_params
* @blending: parameters for xnr3 blending. See &ipu3_uapi_xnr3_blending_params
*/
struct ipu3_uapi_isp_xnr3_params {
struct ipu3_uapi_xnr3_alpha_params alpha;
struct ipu3_uapi_xnr3_coring_params coring;
struct ipu3_uapi_xnr3_blending_params blending;
} __attribute__((packed));
/***** Obgrid (optical black level compensation) table entry *****/
/**
* struct ipu3_uapi_obgrid_param - Optical black level compensation parameters
*
* @gr: Grid table values for color GR
* @r: Grid table values for color R
* @b: Grid table values for color B
* @gb: Grid table values for color GB
*
* Black level is different for red, green, and blue channels. So black level
* compensation is different per channel.
*/
struct ipu3_uapi_obgrid_param {
__u16 gr;
__u16 r;
__u16 b;
__u16 gb;
} __attribute__((packed));
/******************* V4L2_META_FMT_IPU3_PARAMS *******************/
/**
* struct ipu3_uapi_flags - bits to indicate which pipeline needs update
*
* @gdc: 0 = no update, 1 = update.
* @obgrid: 0 = no update, 1 = update.
* @reserved1: Not used.
* @acc_bnr: 0 = no update, 1 = update.
* @acc_green_disparity: 0 = no update, 1 = update.
* @acc_dm: 0 = no update, 1 = update.
* @acc_ccm: 0 = no update, 1 = update.
* @acc_gamma: 0 = no update, 1 = update.
* @acc_csc: 0 = no update, 1 = update.
* @acc_cds: 0 = no update, 1 = update.
* @acc_shd: 0 = no update, 1 = update.
* @reserved2: Not used.
* @acc_iefd: 0 = no update, 1 = update.
* @acc_yds_c0: 0 = no update, 1 = update.
* @acc_chnr_c0: 0 = no update, 1 = update.
* @acc_y_ee_nr: 0 = no update, 1 = update.
* @acc_yds: 0 = no update, 1 = update.
* @acc_chnr: 0 = no update, 1 = update.
* @acc_ytm: 0 = no update, 1 = update.
* @acc_yds2: 0 = no update, 1 = update.
* @acc_tcc: 0 = no update, 1 = update.
* @acc_dpc: 0 = no update, 1 = update.
* @acc_bds: 0 = no update, 1 = update.
* @acc_anr: 0 = no update, 1 = update.
* @acc_awb_fr: 0 = no update, 1 = update.
* @acc_ae: 0 = no update, 1 = update.
* @acc_af: 0 = no update, 1 = update.
* @acc_awb: 0 = no update, 1 = update.
* @reserved3: Not used.
* @lin_vmem_params: 0 = no update, 1 = update.
* @tnr3_vmem_params: 0 = no update, 1 = update.
* @xnr3_vmem_params: 0 = no update, 1 = update.
* @tnr3_dmem_params: 0 = no update, 1 = update.
* @xnr3_dmem_params: 0 = no update, 1 = update.
* @reserved4: Not used.
* @obgrid_param: 0 = no update, 1 = update.
* @reserved5: Not used.
*/
struct ipu3_uapi_flags {
__u32 gdc:1;
__u32 obgrid:1;
__u32 reserved1:30;
__u32 acc_bnr:1;
__u32 acc_green_disparity:1;
__u32 acc_dm:1;
__u32 acc_ccm:1;
__u32 acc_gamma:1;
__u32 acc_csc:1;
__u32 acc_cds:1;
__u32 acc_shd:1;
__u32 reserved2:2;
__u32 acc_iefd:1;
__u32 acc_yds_c0:1;
__u32 acc_chnr_c0:1;
__u32 acc_y_ee_nr:1;
__u32 acc_yds:1;
__u32 acc_chnr:1;
__u32 acc_ytm:1;
__u32 acc_yds2:1;
__u32 acc_tcc:1;
__u32 acc_dpc:1;
__u32 acc_bds:1;
__u32 acc_anr:1;
__u32 acc_awb_fr:1;
__u32 acc_ae:1;
__u32 acc_af:1;
__u32 acc_awb:1;
__u32 reserved3:4;
__u32 lin_vmem_params:1;
__u32 tnr3_vmem_params:1;
__u32 xnr3_vmem_params:1;
__u32 tnr3_dmem_params:1;
__u32 xnr3_dmem_params:1;
__u32 reserved4:1;
__u32 obgrid_param:1;
__u32 reserved5:25;
} __attribute__((packed));
/**
* struct ipu3_uapi_params - V4L2_META_FMT_IPU3_PARAMS
*
* @use: select which parameters to apply, see &ipu3_uapi_flags
* @acc_param: ACC parameters, as specified by &ipu3_uapi_acc_param
* @lin_vmem_params: linearization VMEM, as specified by
* &ipu3_uapi_isp_lin_vmem_params
* @tnr3_vmem_params: tnr3 VMEM as specified by
* &ipu3_uapi_isp_tnr3_vmem_params
* @xnr3_vmem_params: xnr3 VMEM as specified by
* &ipu3_uapi_isp_xnr3_vmem_params
* @tnr3_dmem_params: tnr3 DMEM as specified by &ipu3_uapi_isp_tnr3_params
* @xnr3_dmem_params: xnr3 DMEM as specified by &ipu3_uapi_isp_xnr3_params
* @obgrid_param: obgrid parameters as specified by
* &ipu3_uapi_obgrid_param
*
* The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS.
* This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT.
*
* struct ipu3_uapi_params as defined below contains a lot of parameters and
* ipu3_uapi_flags selects which parameters to apply.
*/
struct ipu3_uapi_params {
/* Flags which of the settings below are to be applied */
struct ipu3_uapi_flags use __attribute__((aligned(32)));
/* Accelerator cluster parameters */
struct ipu3_uapi_acc_param acc_param;
/* ISP vector address space parameters */
struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params;
struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params;
struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params;
/* ISP data memory (DMEM) parameters */
struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params;
struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params;
/* Optical black level compensation */
struct ipu3_uapi_obgrid_param obgrid_param;
} __attribute__((packed));
#endif /* __IPU3_UAPI_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/bcm2835-isp.h | /* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* bcm2835-isp.h
*
* BCM2835 ISP driver - user space header file.
*
* Copyright © 2019-2020 Raspberry Pi Ltd
*
* Author: Naushir Patuck ([email protected])
*
*/
#ifndef __BCM2835_ISP_H_
#define __BCM2835_ISP_H_
#include <linux/v4l2-controls.h>
#define V4L2_CID_USER_BCM2835_ISP_CC_MATRIX \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0001)
#define V4L2_CID_USER_BCM2835_ISP_LENS_SHADING \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0002)
#define V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0003)
#define V4L2_CID_USER_BCM2835_ISP_GEQ \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0004)
#define V4L2_CID_USER_BCM2835_ISP_GAMMA \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0005)
#define V4L2_CID_USER_BCM2835_ISP_DENOISE \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0006)
#define V4L2_CID_USER_BCM2835_ISP_SHARPEN \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0007)
#define V4L2_CID_USER_BCM2835_ISP_DPC \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0008)
#define V4L2_CID_USER_BCM2835_ISP_CDN \
(V4L2_CID_USER_BCM2835_ISP_BASE + 0x0009)
/*
* All structs below are directly mapped onto the equivalent structs in
* drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
* for convenience.
*/
/**
* struct bcm2835_isp_rational - Rational value type.
*
* @num: Numerator.
* @den: Denominator.
*/
struct bcm2835_isp_rational {
__s32 num;
__u32 den;
};
/**
* struct bcm2835_isp_ccm - Colour correction matrix.
*
* @ccm: 3x3 correction matrix coefficients.
* @offsets: 1x3 correction offsets.
*/
struct bcm2835_isp_ccm {
struct bcm2835_isp_rational ccm[3][3];
__s32 offsets[3];
};
/**
* struct bcm2835_isp_custom_ccm - Custom CCM applied with the
* V4L2_CID_USER_BCM2835_ISP_CC_MATRIX ctrl.
*
* @enabled: Enable custom CCM.
* @ccm: Custom CCM coefficients and offsets.
*/
struct bcm2835_isp_custom_ccm {
__u32 enabled;
struct bcm2835_isp_ccm ccm;
};
/**
* enum bcm2835_isp_gain_format - format of the gains in the lens shading
* tables used with the
* V4L2_CID_USER_BCM2835_ISP_LENS_SHADING ctrl.
*
* @GAIN_FORMAT_U0P8_1: Gains are u0.8 format, starting at 1.0
* @GAIN_FORMAT_U1P7_0: Gains are u1.7 format, starting at 0.0
* @GAIN_FORMAT_U1P7_1: Gains are u1.7 format, starting at 1.0
* @GAIN_FORMAT_U2P6_0: Gains are u2.6 format, starting at 0.0
* @GAIN_FORMAT_U2P6_1: Gains are u2.6 format, starting at 1.0
* @GAIN_FORMAT_U3P5_0: Gains are u3.5 format, starting at 0.0
* @GAIN_FORMAT_U3P5_1: Gains are u3.5 format, starting at 1.0
* @GAIN_FORMAT_U4P10: Gains are u4.10 format, starting at 0.0
*/
enum bcm2835_isp_gain_format {
GAIN_FORMAT_U0P8_1 = 0,
GAIN_FORMAT_U1P7_0 = 1,
GAIN_FORMAT_U1P7_1 = 2,
GAIN_FORMAT_U2P6_0 = 3,
GAIN_FORMAT_U2P6_1 = 4,
GAIN_FORMAT_U3P5_0 = 5,
GAIN_FORMAT_U3P5_1 = 6,
GAIN_FORMAT_U4P10 = 7,
};
/**
* struct bcm2835_isp_lens_shading - Lens shading tables supplied with the
* V4L2_CID_USER_BCM2835_ISP_LENS_SHADING
* ctrl.
*
* @enabled: Enable lens shading.
* @grid_cell_size: Size of grid cells in samples (16, 32, 64, 128 or 256).
* @grid_width: Width of lens shading tables in grid cells.
* @grid_stride: Row to row distance (in grid cells) between grid cells
* in the same horizontal location.
* @grid_height: Height of lens shading tables in grid cells.
* @dmabuf: dmabuf file handle containing the table.
* @ref_transform: Reference transform - unsupported, please pass zero.
* @corner_sampled: Whether the gains are sampled at the corner points
* of the grid cells or in the cell centres.
* @gain_format: Format of the gains (see enum &bcm2835_isp_gain_format).
*/
struct bcm2835_isp_lens_shading {
__u32 enabled;
__u32 grid_cell_size;
__u32 grid_width;
__u32 grid_stride;
__u32 grid_height;
__s32 dmabuf;
__u32 ref_transform;
__u32 corner_sampled;
__u32 gain_format;
};
/**
* struct bcm2835_isp_black_level - Sensor black level set with the
* V4L2_CID_USER_BCM2835_ISP_BLACK_LEVEL ctrl.
*
* @enabled: Enable black level.
* @black_level_r: Black level for red channel.
* @black_level_g: Black level for green channels.
* @black_level_b: Black level for blue channel.
*/
struct bcm2835_isp_black_level {
__u32 enabled;
__u16 black_level_r;
__u16 black_level_g;
__u16 black_level_b;
__u8 padding[2]; /* Unused */
};
/**
* struct bcm2835_isp_geq - Green equalisation parameters set with the
* V4L2_CID_USER_BCM2835_ISP_GEQ ctrl.
*
* @enabled: Enable green equalisation.
* @offset: Fixed offset of the green equalisation threshold.
* @slope: Slope of the green equalisation threshold.
*/
struct bcm2835_isp_geq {
__u32 enabled;
__u32 offset;
struct bcm2835_isp_rational slope;
};
#define BCM2835_NUM_GAMMA_PTS 33
/**
* struct bcm2835_isp_gamma - Gamma parameters set with the
* V4L2_CID_USER_BCM2835_ISP_GAMMA ctrl.
*
* @enabled: Enable gamma adjustment.
* @X: X values of the points defining the gamma curve.
* Values should be scaled to 16 bits.
* @Y: Y values of the points defining the gamma curve.
* Values should be scaled to 16 bits.
*/
struct bcm2835_isp_gamma {
__u32 enabled;
__u16 x[BCM2835_NUM_GAMMA_PTS];
__u16 y[BCM2835_NUM_GAMMA_PTS];
};
/**
* enum bcm2835_isp_cdn_mode - Mode of operation for colour denoise.
*
* @CDN_MODE_FAST: Fast (but lower quality) colour denoise
* algorithm, typically used for video recording.
* @CDN_HIGH_QUALITY: High quality (but slower) colour denoise
* algorithm, typically used for stills capture.
*/
enum bcm2835_isp_cdn_mode {
CDN_MODE_FAST = 0,
CDN_MODE_HIGH_QUALITY = 1,
};
/**
* struct bcm2835_isp_cdn - Colour denoise parameters set with the
* V4L2_CID_USER_BCM2835_ISP_CDN ctrl.
*
* @enabled: Enable colour denoise.
* @cdn_mode: Colour denoise operating mode (see enum &bcm2835_isp_cdn_mode)
*/
struct bcm2835_isp_cdn {
__u32 enabled;
__u32 mode;
};
/**
* struct bcm2835_isp_denoise - Denoise parameters set with the
* V4L2_CID_USER_BCM2835_ISP_DENOISE ctrl.
*
* @enabled: Enable denoise.
* @constant: Fixed offset of the noise threshold.
* @slope: Slope of the noise threshold.
* @strength: Denoise strength between 0.0 (off) and 1.0 (maximum).
*/
struct bcm2835_isp_denoise {
__u32 enabled;
__u32 constant;
struct bcm2835_isp_rational slope;
struct bcm2835_isp_rational strength;
};
/**
* struct bcm2835_isp_sharpen - Sharpen parameters set with the
* V4L2_CID_USER_BCM2835_ISP_SHARPEN ctrl.
*
* @enabled: Enable sharpening.
* @threshold: Threshold at which to start sharpening pixels.
* @strength: Strength with which pixel sharpening increases.
* @limit: Limit to the amount of sharpening applied.
*/
struct bcm2835_isp_sharpen {
__u32 enabled;
struct bcm2835_isp_rational threshold;
struct bcm2835_isp_rational strength;
struct bcm2835_isp_rational limit;
};
/**
* enum bcm2835_isp_dpc_mode - defective pixel correction (DPC) strength.
*
* @DPC_MODE_OFF: No DPC.
* @DPC_MODE_NORMAL: Normal DPC.
* @DPC_MODE_STRONG: Strong DPC.
*/
enum bcm2835_isp_dpc_mode {
DPC_MODE_OFF = 0,
DPC_MODE_NORMAL = 1,
DPC_MODE_STRONG = 2,
};
/**
* struct bcm2835_isp_dpc - Defective pixel correction (DPC) parameters set
* with the V4L2_CID_USER_BCM2835_ISP_DPC ctrl.
*
* @enabled: Enable DPC.
* @strength: DPC strength (see enum &bcm2835_isp_dpc_mode).
*/
struct bcm2835_isp_dpc {
__u32 enabled;
__u32 strength;
};
/*
* ISP statistics structures.
*
* The bcm2835_isp_stats structure is generated at the output of the
* statistics node. Note that this does not directly map onto the statistics
* output of the ISP HW. Instead, the MMAL firmware code maps the HW statistics
* to the bcm2835_isp_stats structure.
*/
#define DEFAULT_AWB_REGIONS_X 16
#define DEFAULT_AWB_REGIONS_Y 12
#define NUM_HISTOGRAMS 2
#define NUM_HISTOGRAM_BINS 128
#define AWB_REGIONS (DEFAULT_AWB_REGIONS_X * DEFAULT_AWB_REGIONS_Y)
#define FLOATING_REGIONS 16
#define AGC_REGIONS 16
#define FOCUS_REGIONS 12
/**
* struct bcm2835_isp_stats_hist - Histogram statistics
*
* @r_hist: Red channel histogram.
* @g_hist: Combined green channel histogram.
* @b_hist: Blue channel histogram.
*/
struct bcm2835_isp_stats_hist {
__u32 r_hist[NUM_HISTOGRAM_BINS];
__u32 g_hist[NUM_HISTOGRAM_BINS];
__u32 b_hist[NUM_HISTOGRAM_BINS];
};
/**
* struct bcm2835_isp_stats_region - Region sums.
*
* @counted: The number of 2x2 bayer tiles accumulated.
* @notcounted: The number of 2x2 bayer tiles not accumulated.
* @r_sum: Total sum of counted pixels in the red channel for a region.
* @g_sum: Total sum of counted pixels in the green channel for a region.
* @b_sum: Total sum of counted pixels in the blue channel for a region.
*/
struct bcm2835_isp_stats_region {
__u32 counted;
__u32 notcounted;
__u64 r_sum;
__u64 g_sum;
__u64 b_sum;
};
/**
* struct bcm2835_isp_stats_focus - Focus statistics.
*
* @contrast_val: Focus measure - accumulated output of the focus filter.
* In the first dimension, index [0] counts pixels below a
* preset threshold, and index [1] counts pixels above the
* threshold. In the second dimension, index [0] uses the
* first predefined filter, and index [1] uses the second
* predefined filter.
* @contrast_val_num: The number of counted pixels in the above accumulation.
*/
struct bcm2835_isp_stats_focus {
__u64 contrast_val[2][2];
__u32 contrast_val_num[2][2];
};
/**
* struct bcm2835_isp_stats - ISP statistics.
*
* @version: Version of the bcm2835_isp_stats structure.
* @size: Size of the bcm2835_isp_stats structure.
* @hist: Histogram statistics for the entire image.
* @awb_stats: Statistics for the regions defined for AWB calculations.
* @floating_stats: Statistics for arbitrarily placed (floating) regions.
* @agc_stats: Statistics for the regions defined for AGC calculations.
* @focus_stats: Focus filter statistics for the focus regions.
*/
struct bcm2835_isp_stats {
__u32 version;
__u32 size;
struct bcm2835_isp_stats_hist hist[NUM_HISTOGRAMS];
struct bcm2835_isp_stats_region awb_stats[AWB_REGIONS];
struct bcm2835_isp_stats_region floating_stats[FLOATING_REGIONS];
struct bcm2835_isp_stats_region agc_stats[AGC_REGIONS];
struct bcm2835_isp_stats_focus focus_stats[FOCUS_REGIONS];
};
#endif /* __BCM2835_ISP_H_ */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/v4l2-controls.h | /* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* Video for Linux Two controls header file
*
* Copyright (C) 1999-2012 the contributors
*
* The contents of this header was split off from videodev2.h. All control
* definitions should be added to this header, which is included by
* videodev2.h.
*/
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
#include <linux/const.h>
#include <linux/types.h>
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
#define V4L2_CTRL_CLASS_CODEC 0x00990000 /* Stateful codec controls */
#define V4L2_CTRL_CLASS_CAMERA 0x009a0000 /* Camera class controls */
#define V4L2_CTRL_CLASS_FM_TX 0x009b0000 /* FM Modulator controls */
#define V4L2_CTRL_CLASS_FLASH 0x009c0000 /* Camera flash controls */
#define V4L2_CTRL_CLASS_JPEG 0x009d0000 /* JPEG-compression controls */
#define V4L2_CTRL_CLASS_IMAGE_SOURCE 0x009e0000 /* Image source controls */
#define V4L2_CTRL_CLASS_IMAGE_PROC 0x009f0000 /* Image processing controls */
#define V4L2_CTRL_CLASS_DV 0x00a00000 /* Digital Video controls */
#define V4L2_CTRL_CLASS_FM_RX 0x00a10000 /* FM Receiver controls */
#define V4L2_CTRL_CLASS_RF_TUNER 0x00a20000 /* RF tuner controls */
#define V4L2_CTRL_CLASS_DETECT 0x00a30000 /* Detection controls */
#define V4L2_CTRL_CLASS_CODEC_STATELESS 0x00a40000 /* Stateless codecs controls */
#define V4L2_CTRL_CLASS_COLORIMETRY 0x00a50000 /* Colorimetry controls */
/* User-class control IDs */
#define V4L2_CID_BASE (V4L2_CTRL_CLASS_USER | 0x900)
#define V4L2_CID_USER_BASE V4L2_CID_BASE
#define V4L2_CID_USER_CLASS (V4L2_CTRL_CLASS_USER | 1)
#define V4L2_CID_BRIGHTNESS (V4L2_CID_BASE+0)
#define V4L2_CID_CONTRAST (V4L2_CID_BASE+1)
#define V4L2_CID_SATURATION (V4L2_CID_BASE+2)
#define V4L2_CID_HUE (V4L2_CID_BASE+3)
#define V4L2_CID_AUDIO_VOLUME (V4L2_CID_BASE+5)
#define V4L2_CID_AUDIO_BALANCE (V4L2_CID_BASE+6)
#define V4L2_CID_AUDIO_BASS (V4L2_CID_BASE+7)
#define V4L2_CID_AUDIO_TREBLE (V4L2_CID_BASE+8)
#define V4L2_CID_AUDIO_MUTE (V4L2_CID_BASE+9)
#define V4L2_CID_AUDIO_LOUDNESS (V4L2_CID_BASE+10)
#define V4L2_CID_BLACK_LEVEL (V4L2_CID_BASE+11) /* Deprecated */
#define V4L2_CID_AUTO_WHITE_BALANCE (V4L2_CID_BASE+12)
#define V4L2_CID_DO_WHITE_BALANCE (V4L2_CID_BASE+13)
#define V4L2_CID_RED_BALANCE (V4L2_CID_BASE+14)
#define V4L2_CID_BLUE_BALANCE (V4L2_CID_BASE+15)
#define V4L2_CID_GAMMA (V4L2_CID_BASE+16)
#define V4L2_CID_WHITENESS (V4L2_CID_GAMMA) /* Deprecated */
#define V4L2_CID_EXPOSURE (V4L2_CID_BASE+17)
#define V4L2_CID_AUTOGAIN (V4L2_CID_BASE+18)
#define V4L2_CID_GAIN (V4L2_CID_BASE+19)
#define V4L2_CID_HFLIP (V4L2_CID_BASE+20)
#define V4L2_CID_VFLIP (V4L2_CID_BASE+21)
#define V4L2_CID_POWER_LINE_FREQUENCY (V4L2_CID_BASE+24)
enum v4l2_power_line_frequency {
V4L2_CID_POWER_LINE_FREQUENCY_DISABLED = 0,
V4L2_CID_POWER_LINE_FREQUENCY_50HZ = 1,
V4L2_CID_POWER_LINE_FREQUENCY_60HZ = 2,
V4L2_CID_POWER_LINE_FREQUENCY_AUTO = 3,
};
#define V4L2_CID_HUE_AUTO (V4L2_CID_BASE+25)
#define V4L2_CID_WHITE_BALANCE_TEMPERATURE (V4L2_CID_BASE+26)
#define V4L2_CID_SHARPNESS (V4L2_CID_BASE+27)
#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
enum v4l2_colorfx {
V4L2_COLORFX_NONE = 0,
V4L2_COLORFX_BW = 1,
V4L2_COLORFX_SEPIA = 2,
V4L2_COLORFX_NEGATIVE = 3,
V4L2_COLORFX_EMBOSS = 4,
V4L2_COLORFX_SKETCH = 5,
V4L2_COLORFX_SKY_BLUE = 6,
V4L2_COLORFX_GRASS_GREEN = 7,
V4L2_COLORFX_SKIN_WHITEN = 8,
V4L2_COLORFX_VIVID = 9,
V4L2_COLORFX_AQUA = 10,
V4L2_COLORFX_ART_FREEZE = 11,
V4L2_COLORFX_SILHOUETTE = 12,
V4L2_COLORFX_SOLARIZATION = 13,
V4L2_COLORFX_ANTIQUE = 14,
V4L2_COLORFX_SET_CBCR = 15,
V4L2_COLORFX_SET_RGB = 16,
};
#define V4L2_CID_AUTOBRIGHTNESS (V4L2_CID_BASE+32)
#define V4L2_CID_BAND_STOP_FILTER (V4L2_CID_BASE+33)
#define V4L2_CID_ROTATE (V4L2_CID_BASE+34)
#define V4L2_CID_BG_COLOR (V4L2_CID_BASE+35)
#define V4L2_CID_CHROMA_GAIN (V4L2_CID_BASE+36)
#define V4L2_CID_ILLUMINATORS_1 (V4L2_CID_BASE+37)
#define V4L2_CID_ILLUMINATORS_2 (V4L2_CID_BASE+38)
#define V4L2_CID_MIN_BUFFERS_FOR_CAPTURE (V4L2_CID_BASE+39)
#define V4L2_CID_MIN_BUFFERS_FOR_OUTPUT (V4L2_CID_BASE+40)
#define V4L2_CID_ALPHA_COMPONENT (V4L2_CID_BASE+41)
#define V4L2_CID_COLORFX_CBCR (V4L2_CID_BASE+42)
#define V4L2_CID_COLORFX_RGB (V4L2_CID_BASE+43)
/* last CID + 1 */
#define V4L2_CID_LASTP1 (V4L2_CID_BASE+44)
/* USER-class private control IDs */
/*
* The base for the meye driver controls. This driver was removed, but
* we keep this define in case any software still uses it.
*/
#define V4L2_CID_USER_MEYE_BASE (V4L2_CID_USER_BASE + 0x1000)
/* The base for the bttv driver controls.
* We reserve 32 controls for this driver. */
#define V4L2_CID_USER_BTTV_BASE (V4L2_CID_USER_BASE + 0x1010)
/* The base for the s2255 driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_S2255_BASE (V4L2_CID_USER_BASE + 0x1030)
/*
* The base for the si476x driver controls. See include/media/drv-intf/si476x.h
* for the list of controls. Total of 16 controls is reserved for this driver
*/
#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040)
/* The base for the TI VPE driver controls. Total of 16 controls is reserved for
* this driver */
#define V4L2_CID_USER_TI_VPE_BASE (V4L2_CID_USER_BASE + 0x1050)
/* The base for the saa7134 driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_SAA7134_BASE (V4L2_CID_USER_BASE + 0x1060)
/* The base for the adv7180 driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_ADV7180_BASE (V4L2_CID_USER_BASE + 0x1070)
/* The base for the tc358743 driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
/* The base for the max217x driver controls.
* We reserve 32 controls for this driver
*/
#define V4L2_CID_USER_MAX217X_BASE (V4L2_CID_USER_BASE + 0x1090)
/* The base for the imx driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
/*
* The base for the atmel isc driver controls.
* We reserve 32 controls for this driver.
*/
#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0)
/*
* The base for the CODA driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_CODA_BASE (V4L2_CID_USER_BASE + 0x10e0)
/*
* The base for MIPI CCS driver controls.
* We reserve 128 controls for this driver.
*/
#define V4L2_CID_USER_CCS_BASE (V4L2_CID_USER_BASE + 0x10f0)
/* The base for the bcm2835-isp driver controls.
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_BCM2835_ISP_BASE (V4L2_CID_USER_BASE + 0x10e0)
/*
* The base for Allegro driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170)
/*
* The base for the isl7998x driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180)
/*
* The base for DW100 driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_DW100_BASE (V4L2_CID_USER_BASE + 0x1190)
/*
* The base for Aspeed driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
/*
* The base for Nuvoton NPCM driver controls.
* We reserve 16 controls for this driver.
*/
#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
/*
* The base for THine THP7312 driver controls.
* We reserve 32 controls for this driver.
*/
#define V4L2_CID_USER_THP7312_BASE (V4L2_CID_USER_BASE + 0x11c0)
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
#define V4L2_CID_CODEC_BASE (V4L2_CTRL_CLASS_CODEC | 0x900)
#define V4L2_CID_CODEC_CLASS (V4L2_CTRL_CLASS_CODEC | 1)
/* MPEG streams, specific to multiplexed streams */
#define V4L2_CID_MPEG_STREAM_TYPE (V4L2_CID_CODEC_BASE+0)
enum v4l2_mpeg_stream_type {
V4L2_MPEG_STREAM_TYPE_MPEG2_PS = 0, /* MPEG-2 program stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_TS = 1, /* MPEG-2 transport stream */
V4L2_MPEG_STREAM_TYPE_MPEG1_SS = 2, /* MPEG-1 system stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_DVD = 3, /* MPEG-2 DVD-compatible stream */
V4L2_MPEG_STREAM_TYPE_MPEG1_VCD = 4, /* MPEG-1 VCD-compatible stream */
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD = 5, /* MPEG-2 SVCD-compatible stream */
};
#define V4L2_CID_MPEG_STREAM_PID_PMT (V4L2_CID_CODEC_BASE+1)
#define V4L2_CID_MPEG_STREAM_PID_AUDIO (V4L2_CID_CODEC_BASE+2)
#define V4L2_CID_MPEG_STREAM_PID_VIDEO (V4L2_CID_CODEC_BASE+3)
#define V4L2_CID_MPEG_STREAM_PID_PCR (V4L2_CID_CODEC_BASE+4)
#define V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (V4L2_CID_CODEC_BASE+5)
#define V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (V4L2_CID_CODEC_BASE+6)
#define V4L2_CID_MPEG_STREAM_VBI_FMT (V4L2_CID_CODEC_BASE+7)
enum v4l2_mpeg_stream_vbi_fmt {
V4L2_MPEG_STREAM_VBI_FMT_NONE = 0, /* No VBI in the MPEG stream */
V4L2_MPEG_STREAM_VBI_FMT_IVTV = 1, /* VBI in private packets, IVTV format */
};
/* MPEG audio controls specific to multiplexed streams */
#define V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ (V4L2_CID_CODEC_BASE+100)
enum v4l2_mpeg_audio_sampling_freq {
V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100 = 0,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000 = 1,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000 = 2,
};
#define V4L2_CID_MPEG_AUDIO_ENCODING (V4L2_CID_CODEC_BASE+101)
enum v4l2_mpeg_audio_encoding {
V4L2_MPEG_AUDIO_ENCODING_LAYER_1 = 0,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2 = 1,
V4L2_MPEG_AUDIO_ENCODING_LAYER_3 = 2,
V4L2_MPEG_AUDIO_ENCODING_AAC = 3,
V4L2_MPEG_AUDIO_ENCODING_AC3 = 4,
};
#define V4L2_CID_MPEG_AUDIO_L1_BITRATE (V4L2_CID_CODEC_BASE+102)
enum v4l2_mpeg_audio_l1_bitrate {
V4L2_MPEG_AUDIO_L1_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L1_BITRATE_64K = 1,
V4L2_MPEG_AUDIO_L1_BITRATE_96K = 2,
V4L2_MPEG_AUDIO_L1_BITRATE_128K = 3,
V4L2_MPEG_AUDIO_L1_BITRATE_160K = 4,
V4L2_MPEG_AUDIO_L1_BITRATE_192K = 5,
V4L2_MPEG_AUDIO_L1_BITRATE_224K = 6,
V4L2_MPEG_AUDIO_L1_BITRATE_256K = 7,
V4L2_MPEG_AUDIO_L1_BITRATE_288K = 8,
V4L2_MPEG_AUDIO_L1_BITRATE_320K = 9,
V4L2_MPEG_AUDIO_L1_BITRATE_352K = 10,
V4L2_MPEG_AUDIO_L1_BITRATE_384K = 11,
V4L2_MPEG_AUDIO_L1_BITRATE_416K = 12,
V4L2_MPEG_AUDIO_L1_BITRATE_448K = 13,
};
#define V4L2_CID_MPEG_AUDIO_L2_BITRATE (V4L2_CID_CODEC_BASE+103)
enum v4l2_mpeg_audio_l2_bitrate {
V4L2_MPEG_AUDIO_L2_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L2_BITRATE_48K = 1,
V4L2_MPEG_AUDIO_L2_BITRATE_56K = 2,
V4L2_MPEG_AUDIO_L2_BITRATE_64K = 3,
V4L2_MPEG_AUDIO_L2_BITRATE_80K = 4,
V4L2_MPEG_AUDIO_L2_BITRATE_96K = 5,
V4L2_MPEG_AUDIO_L2_BITRATE_112K = 6,
V4L2_MPEG_AUDIO_L2_BITRATE_128K = 7,
V4L2_MPEG_AUDIO_L2_BITRATE_160K = 8,
V4L2_MPEG_AUDIO_L2_BITRATE_192K = 9,
V4L2_MPEG_AUDIO_L2_BITRATE_224K = 10,
V4L2_MPEG_AUDIO_L2_BITRATE_256K = 11,
V4L2_MPEG_AUDIO_L2_BITRATE_320K = 12,
V4L2_MPEG_AUDIO_L2_BITRATE_384K = 13,
};
#define V4L2_CID_MPEG_AUDIO_L3_BITRATE (V4L2_CID_CODEC_BASE+104)
enum v4l2_mpeg_audio_l3_bitrate {
V4L2_MPEG_AUDIO_L3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_L3_BITRATE_40K = 1,
V4L2_MPEG_AUDIO_L3_BITRATE_48K = 2,
V4L2_MPEG_AUDIO_L3_BITRATE_56K = 3,
V4L2_MPEG_AUDIO_L3_BITRATE_64K = 4,
V4L2_MPEG_AUDIO_L3_BITRATE_80K = 5,
V4L2_MPEG_AUDIO_L3_BITRATE_96K = 6,
V4L2_MPEG_AUDIO_L3_BITRATE_112K = 7,
V4L2_MPEG_AUDIO_L3_BITRATE_128K = 8,
V4L2_MPEG_AUDIO_L3_BITRATE_160K = 9,
V4L2_MPEG_AUDIO_L3_BITRATE_192K = 10,
V4L2_MPEG_AUDIO_L3_BITRATE_224K = 11,
V4L2_MPEG_AUDIO_L3_BITRATE_256K = 12,
V4L2_MPEG_AUDIO_L3_BITRATE_320K = 13,
};
#define V4L2_CID_MPEG_AUDIO_MODE (V4L2_CID_CODEC_BASE+105)
enum v4l2_mpeg_audio_mode {
V4L2_MPEG_AUDIO_MODE_STEREO = 0,
V4L2_MPEG_AUDIO_MODE_JOINT_STEREO = 1,
V4L2_MPEG_AUDIO_MODE_DUAL = 2,
V4L2_MPEG_AUDIO_MODE_MONO = 3,
};
#define V4L2_CID_MPEG_AUDIO_MODE_EXTENSION (V4L2_CID_CODEC_BASE+106)
enum v4l2_mpeg_audio_mode_extension {
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4 = 0,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8 = 1,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12 = 2,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16 = 3,
};
#define V4L2_CID_MPEG_AUDIO_EMPHASIS (V4L2_CID_CODEC_BASE+107)
enum v4l2_mpeg_audio_emphasis {
V4L2_MPEG_AUDIO_EMPHASIS_NONE = 0,
V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS = 1,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17 = 2,
};
#define V4L2_CID_MPEG_AUDIO_CRC (V4L2_CID_CODEC_BASE+108)
enum v4l2_mpeg_audio_crc {
V4L2_MPEG_AUDIO_CRC_NONE = 0,
V4L2_MPEG_AUDIO_CRC_CRC16 = 1,
};
#define V4L2_CID_MPEG_AUDIO_MUTE (V4L2_CID_CODEC_BASE+109)
#define V4L2_CID_MPEG_AUDIO_AAC_BITRATE (V4L2_CID_CODEC_BASE+110)
#define V4L2_CID_MPEG_AUDIO_AC3_BITRATE (V4L2_CID_CODEC_BASE+111)
enum v4l2_mpeg_audio_ac3_bitrate {
V4L2_MPEG_AUDIO_AC3_BITRATE_32K = 0,
V4L2_MPEG_AUDIO_AC3_BITRATE_40K = 1,
V4L2_MPEG_AUDIO_AC3_BITRATE_48K = 2,
V4L2_MPEG_AUDIO_AC3_BITRATE_56K = 3,
V4L2_MPEG_AUDIO_AC3_BITRATE_64K = 4,
V4L2_MPEG_AUDIO_AC3_BITRATE_80K = 5,
V4L2_MPEG_AUDIO_AC3_BITRATE_96K = 6,
V4L2_MPEG_AUDIO_AC3_BITRATE_112K = 7,
V4L2_MPEG_AUDIO_AC3_BITRATE_128K = 8,
V4L2_MPEG_AUDIO_AC3_BITRATE_160K = 9,
V4L2_MPEG_AUDIO_AC3_BITRATE_192K = 10,
V4L2_MPEG_AUDIO_AC3_BITRATE_224K = 11,
V4L2_MPEG_AUDIO_AC3_BITRATE_256K = 12,
V4L2_MPEG_AUDIO_AC3_BITRATE_320K = 13,
V4L2_MPEG_AUDIO_AC3_BITRATE_384K = 14,
V4L2_MPEG_AUDIO_AC3_BITRATE_448K = 15,
V4L2_MPEG_AUDIO_AC3_BITRATE_512K = 16,
V4L2_MPEG_AUDIO_AC3_BITRATE_576K = 17,
V4L2_MPEG_AUDIO_AC3_BITRATE_640K = 18,
};
#define V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK (V4L2_CID_CODEC_BASE+112)
enum v4l2_mpeg_audio_dec_playback {
V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO = 0,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO = 1,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT = 2,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT = 3,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO = 4,
V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO = 5,
};
#define V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK (V4L2_CID_CODEC_BASE+113)
/* MPEG video controls specific to multiplexed streams */
#define V4L2_CID_MPEG_VIDEO_ENCODING (V4L2_CID_CODEC_BASE+200)
enum v4l2_mpeg_video_encoding {
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 = 0,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2 = 1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC = 2,
};
#define V4L2_CID_MPEG_VIDEO_ASPECT (V4L2_CID_CODEC_BASE+201)
enum v4l2_mpeg_video_aspect {
V4L2_MPEG_VIDEO_ASPECT_1x1 = 0,
V4L2_MPEG_VIDEO_ASPECT_4x3 = 1,
V4L2_MPEG_VIDEO_ASPECT_16x9 = 2,
V4L2_MPEG_VIDEO_ASPECT_221x100 = 3,
};
#define V4L2_CID_MPEG_VIDEO_B_FRAMES (V4L2_CID_CODEC_BASE+202)
#define V4L2_CID_MPEG_VIDEO_GOP_SIZE (V4L2_CID_CODEC_BASE+203)
#define V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (V4L2_CID_CODEC_BASE+204)
#define V4L2_CID_MPEG_VIDEO_PULLDOWN (V4L2_CID_CODEC_BASE+205)
#define V4L2_CID_MPEG_VIDEO_BITRATE_MODE (V4L2_CID_CODEC_BASE+206)
enum v4l2_mpeg_video_bitrate_mode {
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR = 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR = 1,
V4L2_MPEG_VIDEO_BITRATE_MODE_CQ = 2,
};
#define V4L2_CID_MPEG_VIDEO_BITRATE (V4L2_CID_CODEC_BASE+207)
#define V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (V4L2_CID_CODEC_BASE+208)
#define V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (V4L2_CID_CODEC_BASE+209)
#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_CODEC_BASE+210)
#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_CODEC_BASE+211)
#define V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (V4L2_CID_CODEC_BASE+212)
#define V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (V4L2_CID_CODEC_BASE+213)
#define V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (V4L2_CID_CODEC_BASE+214)
#define V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (V4L2_CID_CODEC_BASE+215)
#define V4L2_CID_MPEG_VIDEO_HEADER_MODE (V4L2_CID_CODEC_BASE+216)
enum v4l2_mpeg_video_header_mode {
V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE = 0,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME = 1,
};
#define V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (V4L2_CID_CODEC_BASE+217)
#define V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (V4L2_CID_CODEC_BASE+218)
#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (V4L2_CID_CODEC_BASE+219)
#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (V4L2_CID_CODEC_BASE+220)
#define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE (V4L2_CID_CODEC_BASE+221)
enum v4l2_mpeg_video_multi_slice_mode {
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE = 0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB = 1,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES = 2,
/* Kept for backwards compatibility reasons. Stupid typo... */
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB = 1,
V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES = 2,
};
#define V4L2_CID_MPEG_VIDEO_VBV_SIZE (V4L2_CID_CODEC_BASE+222)
#define V4L2_CID_MPEG_VIDEO_DEC_PTS (V4L2_CID_CODEC_BASE+223)
#define V4L2_CID_MPEG_VIDEO_DEC_FRAME (V4L2_CID_CODEC_BASE+224)
#define V4L2_CID_MPEG_VIDEO_VBV_DELAY (V4L2_CID_CODEC_BASE+225)
#define V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (V4L2_CID_CODEC_BASE+226)
#define V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (V4L2_CID_CODEC_BASE+227)
#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_CODEC_BASE+228)
#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_CODEC_BASE+229)
#define V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (V4L2_CID_CODEC_BASE+230)
#define V4L2_CID_MPEG_VIDEO_AU_DELIMITER (V4L2_CID_CODEC_BASE+231)
#define V4L2_CID_MPEG_VIDEO_LTR_COUNT (V4L2_CID_CODEC_BASE+232)
#define V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX (V4L2_CID_CODEC_BASE+233)
#define V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES (V4L2_CID_CODEC_BASE+234)
#define V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR (V4L2_CID_CODEC_BASE+235)
#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD (V4L2_CID_CODEC_BASE+236)
#define V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE (V4L2_CID_CODEC_BASE+237)
enum v4l2_mpeg_video_intra_refresh_period_type {
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM = 0,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC = 1,
};
/* CIDs for the MPEG-2 Part 2 (H.262) codec */
#define V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL (V4L2_CID_CODEC_BASE+270)
enum v4l2_mpeg_video_mpeg2_level {
V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW = 0,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN = 1,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 = 2,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH = 3,
};
#define V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE (V4L2_CID_CODEC_BASE+271)
enum v4l2_mpeg_video_mpeg2_profile {
V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN = 1,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE = 2,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE = 3,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH = 4,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_MULTIVIEW = 5,
};
/* CIDs for the FWHT codec as used by the vicodec driver. */
#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_CODEC_BASE + 290)
#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_CODEC_BASE + 291)
#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_CODEC_BASE+300)
#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_CODEC_BASE+301)
#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_CODEC_BASE+302)
#define V4L2_CID_MPEG_VIDEO_H263_MIN_QP (V4L2_CID_CODEC_BASE+303)
#define V4L2_CID_MPEG_VIDEO_H263_MAX_QP (V4L2_CID_CODEC_BASE+304)
#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (V4L2_CID_CODEC_BASE+350)
#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (V4L2_CID_CODEC_BASE+351)
#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (V4L2_CID_CODEC_BASE+352)
#define V4L2_CID_MPEG_VIDEO_H264_MIN_QP (V4L2_CID_CODEC_BASE+353)
#define V4L2_CID_MPEG_VIDEO_H264_MAX_QP (V4L2_CID_CODEC_BASE+354)
#define V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (V4L2_CID_CODEC_BASE+355)
#define V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (V4L2_CID_CODEC_BASE+356)
#define V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE (V4L2_CID_CODEC_BASE+357)
enum v4l2_mpeg_video_h264_entropy_mode {
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC = 0,
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC = 1,
};
#define V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (V4L2_CID_CODEC_BASE+358)
#define V4L2_CID_MPEG_VIDEO_H264_LEVEL (V4L2_CID_CODEC_BASE+359)
enum v4l2_mpeg_video_h264_level {
V4L2_MPEG_VIDEO_H264_LEVEL_1_0 = 0,
V4L2_MPEG_VIDEO_H264_LEVEL_1B = 1,
V4L2_MPEG_VIDEO_H264_LEVEL_1_1 = 2,
V4L2_MPEG_VIDEO_H264_LEVEL_1_2 = 3,
V4L2_MPEG_VIDEO_H264_LEVEL_1_3 = 4,
V4L2_MPEG_VIDEO_H264_LEVEL_2_0 = 5,
V4L2_MPEG_VIDEO_H264_LEVEL_2_1 = 6,
V4L2_MPEG_VIDEO_H264_LEVEL_2_2 = 7,
V4L2_MPEG_VIDEO_H264_LEVEL_3_0 = 8,
V4L2_MPEG_VIDEO_H264_LEVEL_3_1 = 9,
V4L2_MPEG_VIDEO_H264_LEVEL_3_2 = 10,
V4L2_MPEG_VIDEO_H264_LEVEL_4_0 = 11,
V4L2_MPEG_VIDEO_H264_LEVEL_4_1 = 12,
V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13,
V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15,
V4L2_MPEG_VIDEO_H264_LEVEL_5_2 = 16,
V4L2_MPEG_VIDEO_H264_LEVEL_6_0 = 17,
V4L2_MPEG_VIDEO_H264_LEVEL_6_1 = 18,
V4L2_MPEG_VIDEO_H264_LEVEL_6_2 = 19,
};
#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_CODEC_BASE+360)
#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_CODEC_BASE+361)
#define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE+362)
enum v4l2_mpeg_video_h264_loop_filter_mode {
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED = 0,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED = 1,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
#define V4L2_CID_MPEG_VIDEO_H264_PROFILE (V4L2_CID_CODEC_BASE+363)
enum v4l2_mpeg_video_h264_profile {
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE = 0,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE = 1,
V4L2_MPEG_VIDEO_H264_PROFILE_MAIN = 2,
V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED = 3,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH = 4,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10 = 5,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422 = 6,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE = 7,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA = 8,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA = 9,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA = 10,
V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA = 11,
V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE = 12,
V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH = 13,
V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14,
V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15,
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17,
};
#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_CODEC_BASE+364)
#define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_CODEC_BASE+365)
#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (V4L2_CID_CODEC_BASE+366)
#define V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC (V4L2_CID_CODEC_BASE+367)
enum v4l2_mpeg_video_h264_vui_sar_idc {
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED = 0,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1 = 1,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11 = 2,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11 = 3,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11 = 4,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33 = 5,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11 = 6,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11 = 7,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11 = 8,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33 = 9,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11 = 10,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11 = 11,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33 = 12,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99 = 13,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3 = 14,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2 = 15,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1 = 16,
V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED = 17,
};
#define V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (V4L2_CID_CODEC_BASE+368)
#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (V4L2_CID_CODEC_BASE+369)
#define V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE (V4L2_CID_CODEC_BASE+370)
enum v4l2_mpeg_video_h264_sei_fp_arrangement_type {
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHECKERBOARD = 0,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN = 1,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW = 2,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE = 3,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM = 4,
V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL = 5,
};
#define V4L2_CID_MPEG_VIDEO_H264_FMO (V4L2_CID_CODEC_BASE+371)
#define V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE (V4L2_CID_CODEC_BASE+372)
enum v4l2_mpeg_video_h264_fmo_map_type {
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES = 0,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES = 1,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER = 2,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT = 3,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN = 4,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN = 5,
V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT = 6,
};
#define V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (V4L2_CID_CODEC_BASE+373)
#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION (V4L2_CID_CODEC_BASE+374)
enum v4l2_mpeg_video_h264_fmo_change_dir {
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT = 0,
V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT = 1,
};
#define V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (V4L2_CID_CODEC_BASE+375)
#define V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (V4L2_CID_CODEC_BASE+376)
#define V4L2_CID_MPEG_VIDEO_H264_ASO (V4L2_CID_CODEC_BASE+377)
#define V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (V4L2_CID_CODEC_BASE+378)
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (V4L2_CID_CODEC_BASE+379)
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE (V4L2_CID_CODEC_BASE+380)
enum v4l2_mpeg_video_h264_hierarchical_coding_type {
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P = 1,
};
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (V4L2_CID_CODEC_BASE+381)
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_CODEC_BASE+382)
#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_CODEC_BASE+383)
#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_CODEC_BASE+384)
#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+385)
#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+386)
#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+387)
#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+388)
#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE+389)
#define V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE+390)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE+391)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE+392)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE+393)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE+394)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE+395)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE+396)
#define V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE+397)
#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_CODEC_BASE+400)
#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_CODEC_BASE+401)
#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_CODEC_BASE+402)
#define V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (V4L2_CID_CODEC_BASE+403)
#define V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (V4L2_CID_CODEC_BASE+404)
#define V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL (V4L2_CID_CODEC_BASE+405)
enum v4l2_mpeg_video_mpeg4_level {
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 = 0,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B = 1,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 = 2,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 = 3,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 = 4,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B = 5,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 = 6,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 = 7,
};
#define V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE (V4L2_CID_CODEC_BASE+406)
enum v4l2_mpeg_video_mpeg4_profile {
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE = 0,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE = 1,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE = 2,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE = 3,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY = 4,
};
#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_CODEC_BASE+407)
/* Control IDs for VP8 streams
* Although VP8 is not part of MPEG we add these controls to the MPEG class
* as that class is already handling other video compression standards
*/
#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_CODEC_BASE+500)
enum v4l2_vp8_num_partitions {
V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1,
V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2,
V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3,
};
#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_CODEC_BASE+501)
#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_CODEC_BASE+502)
enum v4l2_vp8_num_ref_frames {
V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0,
V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1,
V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2,
};
#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_CODEC_BASE+503)
#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_CODEC_BASE+504)
#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_CODEC_BASE+505)
#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_CODEC_BASE+506)
enum v4l2_vp8_golden_frame_sel {
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0,
V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1,
};
#define V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (V4L2_CID_CODEC_BASE+507)
#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_CODEC_BASE+508)
#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_CODEC_BASE+509)
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_CODEC_BASE+510)
#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_CODEC_BASE+511)
enum v4l2_mpeg_video_vp8_profile {
V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
};
/* Deprecated alias for compatibility reasons. */
#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_CODEC_BASE+512)
enum v4l2_mpeg_video_vp9_profile {
V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
};
#define V4L2_CID_MPEG_VIDEO_VP9_LEVEL (V4L2_CID_CODEC_BASE+513)
enum v4l2_mpeg_video_vp9_level {
V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 = 0,
V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 = 1,
V4L2_MPEG_VIDEO_VP9_LEVEL_2_0 = 2,
V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 = 3,
V4L2_MPEG_VIDEO_VP9_LEVEL_3_0 = 4,
V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 = 5,
V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 = 6,
V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 = 7,
V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 = 8,
V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 = 9,
V4L2_MPEG_VIDEO_VP9_LEVEL_5_2 = 10,
V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 = 11,
V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 = 12,
V4L2_MPEG_VIDEO_VP9_LEVEL_6_2 = 13,
};
/* CIDs for HEVC encoding. */
#define V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (V4L2_CID_CODEC_BASE + 600)
#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (V4L2_CID_CODEC_BASE + 601)
#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (V4L2_CID_CODEC_BASE + 602)
#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (V4L2_CID_CODEC_BASE + 603)
#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (V4L2_CID_CODEC_BASE + 604)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (V4L2_CID_CODEC_BASE + 605)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE (V4L2_CID_CODEC_BASE + 606)
enum v4l2_mpeg_video_hevc_hier_coding_type {
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B = 0,
V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P = 1,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (V4L2_CID_CODEC_BASE + 607)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (V4L2_CID_CODEC_BASE + 608)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (V4L2_CID_CODEC_BASE + 609)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (V4L2_CID_CODEC_BASE + 610)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (V4L2_CID_CODEC_BASE + 611)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (V4L2_CID_CODEC_BASE + 612)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (V4L2_CID_CODEC_BASE + 613)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (V4L2_CID_CODEC_BASE + 614)
#define V4L2_CID_MPEG_VIDEO_HEVC_PROFILE (V4L2_CID_CODEC_BASE + 615)
enum v4l2_mpeg_video_hevc_profile {
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE = 1,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 = 2,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_LEVEL (V4L2_CID_CODEC_BASE + 616)
enum v4l2_mpeg_video_hevc_level {
V4L2_MPEG_VIDEO_HEVC_LEVEL_1 = 0,
V4L2_MPEG_VIDEO_HEVC_LEVEL_2 = 1,
V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 = 2,
V4L2_MPEG_VIDEO_HEVC_LEVEL_3 = 3,
V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 = 4,
V4L2_MPEG_VIDEO_HEVC_LEVEL_4 = 5,
V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 = 6,
V4L2_MPEG_VIDEO_HEVC_LEVEL_5 = 7,
V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 = 8,
V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 = 9,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6 = 10,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 = 11,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 = 12,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (V4L2_CID_CODEC_BASE + 617)
#define V4L2_CID_MPEG_VIDEO_HEVC_TIER (V4L2_CID_CODEC_BASE + 618)
enum v4l2_mpeg_video_hevc_tier {
V4L2_MPEG_VIDEO_HEVC_TIER_MAIN = 0,
V4L2_MPEG_VIDEO_HEVC_TIER_HIGH = 1,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (V4L2_CID_CODEC_BASE + 619)
#define V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE (V4L2_CID_CODEC_BASE + 620)
enum v4l2_cid_mpeg_video_hevc_loop_filter_mode {
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED = 0,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED = 1,
V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY = 2,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 621)
#define V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (V4L2_CID_CODEC_BASE + 622)
#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE (V4L2_CID_CODEC_BASE + 623)
enum v4l2_cid_mpeg_video_hevc_refresh_type {
V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE = 0,
V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA = 1,
V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR = 2,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (V4L2_CID_CODEC_BASE + 624)
#define V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (V4L2_CID_CODEC_BASE + 625)
#define V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (V4L2_CID_CODEC_BASE + 626)
#define V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (V4L2_CID_CODEC_BASE + 627)
#define V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (V4L2_CID_CODEC_BASE + 628)
#define V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (V4L2_CID_CODEC_BASE + 629)
#define V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (V4L2_CID_CODEC_BASE + 630)
#define V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (V4L2_CID_CODEC_BASE + 631)
#define V4L2_CID_MPEG_VIDEO_HEVC_INTRA_PU_SPLIT (V4L2_CID_CODEC_BASE + 632)
#define V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (V4L2_CID_CODEC_BASE + 633)
#define V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (V4L2_CID_CODEC_BASE + 634)
#define V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD (V4L2_CID_CODEC_BASE + 635)
enum v4l2_cid_mpeg_video_hevc_size_of_length_field {
V4L2_MPEG_VIDEO_HEVC_SIZE_0 = 0,
V4L2_MPEG_VIDEO_HEVC_SIZE_1 = 1,
V4L2_MPEG_VIDEO_HEVC_SIZE_2 = 2,
V4L2_MPEG_VIDEO_HEVC_SIZE_4 = 3,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (V4L2_CID_CODEC_BASE + 636)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (V4L2_CID_CODEC_BASE + 637)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (V4L2_CID_CODEC_BASE + 638)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (V4L2_CID_CODEC_BASE + 639)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (V4L2_CID_CODEC_BASE + 640)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (V4L2_CID_CODEC_BASE + 641)
#define V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (V4L2_CID_CODEC_BASE + 642)
#define V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (V4L2_CID_CODEC_BASE + 643)
#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_CODEC_BASE + 644)
#define V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY (V4L2_CID_CODEC_BASE + 645)
#define V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_BASE + 646)
enum v4l2_mpeg_video_frame_skip_mode {
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
};
#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 647)
#define V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 648)
#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 649)
#define V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 650)
#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (V4L2_CID_CODEC_BASE + 651)
#define V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (V4L2_CID_CODEC_BASE + 652)
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY (V4L2_CID_CODEC_BASE + 653)
#define V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_BASE + 654)
#define V4L2_CID_MPEG_VIDEO_AV1_PROFILE (V4L2_CID_CODEC_BASE + 655)
/**
* enum v4l2_mpeg_video_av1_profile - AV1 profiles
*
* @V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN: compliant decoders must be able to decode
* streams with seq_profile equal to 0.
* @V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH: compliant decoders must be able to decode
* streams with seq_profile equal less than or equal to 1.
* @V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL: compliant decoders must be able to
* decode streams with seq_profile less than or equal to 2.
*
* Conveys the highest profile a decoder can work with.
*/
enum v4l2_mpeg_video_av1_profile {
V4L2_MPEG_VIDEO_AV1_PROFILE_MAIN = 0,
V4L2_MPEG_VIDEO_AV1_PROFILE_HIGH = 1,
V4L2_MPEG_VIDEO_AV1_PROFILE_PROFESSIONAL = 2,
};
#define V4L2_CID_MPEG_VIDEO_AV1_LEVEL (V4L2_CID_CODEC_BASE + 656)
/**
* enum v4l2_mpeg_video_av1_level - AV1 levels
*
* @V4L2_MPEG_VIDEO_AV1_LEVEL_2_0: Level 2.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_2_1: Level 2.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_2_2: Level 2.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_2_3: Level 2.3.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_3_0: Level 3.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_3_1: Level 3.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_3_2: Level 3.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_3_3: Level 3.3.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_4_0: Level 4.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_4_1: Level 4.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_4_2: Level 4.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_4_3: Level 4.3.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_5_0: Level 5.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_5_1: Level 5.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_5_2: Level 5.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_5_3: Level 5.3.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_6_0: Level 6.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_6_1: Level 6.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_6_2: Level 6.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_6_3: Level 6.3.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_7_0: Level 7.0.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_7_1: Level 7.1.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_7_2: Level 7.2.
* @V4L2_MPEG_VIDEO_AV1_LEVEL_7_3: Level 7.3.
*
* Conveys the highest level a decoder can work with.
*/
enum v4l2_mpeg_video_av1_level {
V4L2_MPEG_VIDEO_AV1_LEVEL_2_0 = 0,
V4L2_MPEG_VIDEO_AV1_LEVEL_2_1 = 1,
V4L2_MPEG_VIDEO_AV1_LEVEL_2_2 = 2,
V4L2_MPEG_VIDEO_AV1_LEVEL_2_3 = 3,
V4L2_MPEG_VIDEO_AV1_LEVEL_3_0 = 4,
V4L2_MPEG_VIDEO_AV1_LEVEL_3_1 = 5,
V4L2_MPEG_VIDEO_AV1_LEVEL_3_2 = 6,
V4L2_MPEG_VIDEO_AV1_LEVEL_3_3 = 7,
V4L2_MPEG_VIDEO_AV1_LEVEL_4_0 = 8,
V4L2_MPEG_VIDEO_AV1_LEVEL_4_1 = 9,
V4L2_MPEG_VIDEO_AV1_LEVEL_4_2 = 10,
V4L2_MPEG_VIDEO_AV1_LEVEL_4_3 = 11,
V4L2_MPEG_VIDEO_AV1_LEVEL_5_0 = 12,
V4L2_MPEG_VIDEO_AV1_LEVEL_5_1 = 13,
V4L2_MPEG_VIDEO_AV1_LEVEL_5_2 = 14,
V4L2_MPEG_VIDEO_AV1_LEVEL_5_3 = 15,
V4L2_MPEG_VIDEO_AV1_LEVEL_6_0 = 16,
V4L2_MPEG_VIDEO_AV1_LEVEL_6_1 = 17,
V4L2_MPEG_VIDEO_AV1_LEVEL_6_2 = 18,
V4L2_MPEG_VIDEO_AV1_LEVEL_6_3 = 19,
V4L2_MPEG_VIDEO_AV1_LEVEL_7_0 = 20,
V4L2_MPEG_VIDEO_AV1_LEVEL_7_1 = 21,
V4L2_MPEG_VIDEO_AV1_LEVEL_7_2 = 22,
V4L2_MPEG_VIDEO_AV1_LEVEL_7_3 = 23
};
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_CODEC_CX2341X_BASE (V4L2_CTRL_CLASS_CODEC | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+0)
enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO = 1,
};
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+1)
#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+2)
enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT = 2,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE = 3,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE = 4,
};
#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+3)
enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type {
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR = 1,
};
#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE (V4L2_CID_CODEC_CX2341X_BASE+4)
enum v4l2_mpeg_cx2341x_video_temporal_filter_mode {
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL = 0,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO = 1,
};
#define V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (V4L2_CID_CODEC_CX2341X_BASE+5)
#define V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE (V4L2_CID_CODEC_CX2341X_BASE+6)
enum v4l2_mpeg_cx2341x_video_median_filter_type {
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF = 0,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR = 1,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT = 2,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT = 3,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG = 4,
};
#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+7)
#define V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+8)
#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (V4L2_CID_CODEC_CX2341X_BASE+9)
#define V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (V4L2_CID_CODEC_CX2341X_BASE+10)
#define V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (V4L2_CID_CODEC_CX2341X_BASE+11)
/* MPEG-class control IDs specific to the Samsung MFC 5.1 driver as defined by V4L2 */
#define V4L2_CID_CODEC_MFC51_BASE (V4L2_CTRL_CLASS_CODEC | 0x1100)
#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (V4L2_CID_CODEC_MFC51_BASE+0)
#define V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (V4L2_CID_CODEC_MFC51_BASE+1)
#define V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE (V4L2_CID_CODEC_MFC51_BASE+2)
enum v4l2_mpeg_mfc51_video_frame_skip_mode {
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_LEVEL_LIMIT = 1,
V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT = 2,
};
#define V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE (V4L2_CID_CODEC_MFC51_BASE+3)
enum v4l2_mpeg_mfc51_video_force_frame_type {
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_DISABLED = 0,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_I_FRAME = 1,
V4L2_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE_NOT_CODED = 2,
};
#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING (V4L2_CID_CODEC_MFC51_BASE+4)
#define V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (V4L2_CID_CODEC_MFC51_BASE+5)
#define V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (V4L2_CID_CODEC_MFC51_BASE+6)
#define V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (V4L2_CID_CODEC_MFC51_BASE+7)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (V4L2_CID_CODEC_MFC51_BASE+50)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (V4L2_CID_CODEC_MFC51_BASE+51)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (V4L2_CID_CODEC_MFC51_BASE+52)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (V4L2_CID_CODEC_MFC51_BASE+53)
#define V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (V4L2_CID_CODEC_MFC51_BASE+54)
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
#define V4L2_CID_CAMERA_CLASS (V4L2_CTRL_CLASS_CAMERA | 1)
#define V4L2_CID_EXPOSURE_AUTO (V4L2_CID_CAMERA_CLASS_BASE+1)
enum v4l2_exposure_auto_type {
V4L2_EXPOSURE_AUTO = 0,
V4L2_EXPOSURE_MANUAL = 1,
V4L2_EXPOSURE_SHUTTER_PRIORITY = 2,
V4L2_EXPOSURE_APERTURE_PRIORITY = 3
};
#define V4L2_CID_EXPOSURE_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+2)
#define V4L2_CID_EXPOSURE_AUTO_PRIORITY (V4L2_CID_CAMERA_CLASS_BASE+3)
#define V4L2_CID_PAN_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+4)
#define V4L2_CID_TILT_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+5)
#define V4L2_CID_PAN_RESET (V4L2_CID_CAMERA_CLASS_BASE+6)
#define V4L2_CID_TILT_RESET (V4L2_CID_CAMERA_CLASS_BASE+7)
#define V4L2_CID_PAN_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+8)
#define V4L2_CID_TILT_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+9)
#define V4L2_CID_FOCUS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+10)
#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11)
#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12)
#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13)
#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14)
#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15)
#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
#define V4L2_CID_IRIS_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+17)
#define V4L2_CID_IRIS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+18)
#define V4L2_CID_AUTO_EXPOSURE_BIAS (V4L2_CID_CAMERA_CLASS_BASE+19)
#define V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE (V4L2_CID_CAMERA_CLASS_BASE+20)
enum v4l2_auto_n_preset_white_balance {
V4L2_WHITE_BALANCE_MANUAL = 0,
V4L2_WHITE_BALANCE_AUTO = 1,
V4L2_WHITE_BALANCE_INCANDESCENT = 2,
V4L2_WHITE_BALANCE_FLUORESCENT = 3,
V4L2_WHITE_BALANCE_FLUORESCENT_H = 4,
V4L2_WHITE_BALANCE_HORIZON = 5,
V4L2_WHITE_BALANCE_DAYLIGHT = 6,
V4L2_WHITE_BALANCE_FLASH = 7,
V4L2_WHITE_BALANCE_CLOUDY = 8,
V4L2_WHITE_BALANCE_SHADE = 9,
};
#define V4L2_CID_WIDE_DYNAMIC_RANGE (V4L2_CID_CAMERA_CLASS_BASE+21)
#define V4L2_CID_IMAGE_STABILIZATION (V4L2_CID_CAMERA_CLASS_BASE+22)
#define V4L2_CID_ISO_SENSITIVITY (V4L2_CID_CAMERA_CLASS_BASE+23)
#define V4L2_CID_ISO_SENSITIVITY_AUTO (V4L2_CID_CAMERA_CLASS_BASE+24)
enum v4l2_iso_sensitivity_auto_type {
V4L2_ISO_SENSITIVITY_MANUAL = 0,
V4L2_ISO_SENSITIVITY_AUTO = 1,
};
#define V4L2_CID_EXPOSURE_METERING (V4L2_CID_CAMERA_CLASS_BASE+25)
enum v4l2_exposure_metering {
V4L2_EXPOSURE_METERING_AVERAGE = 0,
V4L2_EXPOSURE_METERING_CENTER_WEIGHTED = 1,
V4L2_EXPOSURE_METERING_SPOT = 2,
V4L2_EXPOSURE_METERING_MATRIX = 3,
};
#define V4L2_CID_SCENE_MODE (V4L2_CID_CAMERA_CLASS_BASE+26)
enum v4l2_scene_mode {
V4L2_SCENE_MODE_NONE = 0,
V4L2_SCENE_MODE_BACKLIGHT = 1,
V4L2_SCENE_MODE_BEACH_SNOW = 2,
V4L2_SCENE_MODE_CANDLE_LIGHT = 3,
V4L2_SCENE_MODE_DAWN_DUSK = 4,
V4L2_SCENE_MODE_FALL_COLORS = 5,
V4L2_SCENE_MODE_FIREWORKS = 6,
V4L2_SCENE_MODE_LANDSCAPE = 7,
V4L2_SCENE_MODE_NIGHT = 8,
V4L2_SCENE_MODE_PARTY_INDOOR = 9,
V4L2_SCENE_MODE_PORTRAIT = 10,
V4L2_SCENE_MODE_SPORTS = 11,
V4L2_SCENE_MODE_SUNSET = 12,
V4L2_SCENE_MODE_TEXT = 13,
};
#define V4L2_CID_3A_LOCK (V4L2_CID_CAMERA_CLASS_BASE+27)
#define V4L2_LOCK_EXPOSURE (1 << 0)
#define V4L2_LOCK_WHITE_BALANCE (1 << 1)
#define V4L2_LOCK_FOCUS (1 << 2)
#define V4L2_CID_AUTO_FOCUS_START (V4L2_CID_CAMERA_CLASS_BASE+28)
#define V4L2_CID_AUTO_FOCUS_STOP (V4L2_CID_CAMERA_CLASS_BASE+29)
#define V4L2_CID_AUTO_FOCUS_STATUS (V4L2_CID_CAMERA_CLASS_BASE+30)
#define V4L2_AUTO_FOCUS_STATUS_IDLE (0 << 0)
#define V4L2_AUTO_FOCUS_STATUS_BUSY (1 << 0)
#define V4L2_AUTO_FOCUS_STATUS_REACHED (1 << 1)
#define V4L2_AUTO_FOCUS_STATUS_FAILED (1 << 2)
#define V4L2_CID_AUTO_FOCUS_RANGE (V4L2_CID_CAMERA_CLASS_BASE+31)
enum v4l2_auto_focus_range {
V4L2_AUTO_FOCUS_RANGE_AUTO = 0,
V4L2_AUTO_FOCUS_RANGE_NORMAL = 1,
V4L2_AUTO_FOCUS_RANGE_MACRO = 2,
V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
};
#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
#define V4L2_CID_CAMERA_ORIENTATION (V4L2_CID_CAMERA_CLASS_BASE+34)
#define V4L2_CAMERA_ORIENTATION_FRONT 0
#define V4L2_CAMERA_ORIENTATION_BACK 1
#define V4L2_CAMERA_ORIENTATION_EXTERNAL 2
#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35)
#define V4L2_CID_HDR_SENSOR_MODE (V4L2_CID_CAMERA_CLASS_BASE+36)
/* FM Modulator class control IDs */
#define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900)
#define V4L2_CID_FM_TX_CLASS (V4L2_CTRL_CLASS_FM_TX | 1)
#define V4L2_CID_RDS_TX_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 1)
#define V4L2_CID_RDS_TX_PI (V4L2_CID_FM_TX_CLASS_BASE + 2)
#define V4L2_CID_RDS_TX_PTY (V4L2_CID_FM_TX_CLASS_BASE + 3)
#define V4L2_CID_RDS_TX_PS_NAME (V4L2_CID_FM_TX_CLASS_BASE + 5)
#define V4L2_CID_RDS_TX_RADIO_TEXT (V4L2_CID_FM_TX_CLASS_BASE + 6)
#define V4L2_CID_RDS_TX_MONO_STEREO (V4L2_CID_FM_TX_CLASS_BASE + 7)
#define V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (V4L2_CID_FM_TX_CLASS_BASE + 8)
#define V4L2_CID_RDS_TX_COMPRESSED (V4L2_CID_FM_TX_CLASS_BASE + 9)
#define V4L2_CID_RDS_TX_DYNAMIC_PTY (V4L2_CID_FM_TX_CLASS_BASE + 10)
#define V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_TX_CLASS_BASE + 11)
#define V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (V4L2_CID_FM_TX_CLASS_BASE + 12)
#define V4L2_CID_RDS_TX_MUSIC_SPEECH (V4L2_CID_FM_TX_CLASS_BASE + 13)
#define V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (V4L2_CID_FM_TX_CLASS_BASE + 14)
#define V4L2_CID_RDS_TX_ALT_FREQS (V4L2_CID_FM_TX_CLASS_BASE + 15)
#define V4L2_CID_AUDIO_LIMITER_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 64)
#define V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 65)
#define V4L2_CID_AUDIO_LIMITER_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 66)
#define V4L2_CID_AUDIO_COMPRESSION_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 80)
#define V4L2_CID_AUDIO_COMPRESSION_GAIN (V4L2_CID_FM_TX_CLASS_BASE + 81)
#define V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (V4L2_CID_FM_TX_CLASS_BASE + 82)
#define V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (V4L2_CID_FM_TX_CLASS_BASE + 83)
#define V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (V4L2_CID_FM_TX_CLASS_BASE + 84)
#define V4L2_CID_PILOT_TONE_ENABLED (V4L2_CID_FM_TX_CLASS_BASE + 96)
#define V4L2_CID_PILOT_TONE_DEVIATION (V4L2_CID_FM_TX_CLASS_BASE + 97)
#define V4L2_CID_PILOT_TONE_FREQUENCY (V4L2_CID_FM_TX_CLASS_BASE + 98)
#define V4L2_CID_TUNE_PREEMPHASIS (V4L2_CID_FM_TX_CLASS_BASE + 112)
enum v4l2_preemphasis {
V4L2_PREEMPHASIS_DISABLED = 0,
V4L2_PREEMPHASIS_50_uS = 1,
V4L2_PREEMPHASIS_75_uS = 2,
};
#define V4L2_CID_TUNE_POWER_LEVEL (V4L2_CID_FM_TX_CLASS_BASE + 113)
#define V4L2_CID_TUNE_ANTENNA_CAPACITOR (V4L2_CID_FM_TX_CLASS_BASE + 114)
/* Flash and privacy (indicator) light controls */
#define V4L2_CID_FLASH_CLASS_BASE (V4L2_CTRL_CLASS_FLASH | 0x900)
#define V4L2_CID_FLASH_CLASS (V4L2_CTRL_CLASS_FLASH | 1)
#define V4L2_CID_FLASH_LED_MODE (V4L2_CID_FLASH_CLASS_BASE + 1)
enum v4l2_flash_led_mode {
V4L2_FLASH_LED_MODE_NONE,
V4L2_FLASH_LED_MODE_FLASH,
V4L2_FLASH_LED_MODE_TORCH,
};
#define V4L2_CID_FLASH_STROBE_SOURCE (V4L2_CID_FLASH_CLASS_BASE + 2)
enum v4l2_flash_strobe_source {
V4L2_FLASH_STROBE_SOURCE_SOFTWARE,
V4L2_FLASH_STROBE_SOURCE_EXTERNAL,
};
#define V4L2_CID_FLASH_STROBE (V4L2_CID_FLASH_CLASS_BASE + 3)
#define V4L2_CID_FLASH_STROBE_STOP (V4L2_CID_FLASH_CLASS_BASE + 4)
#define V4L2_CID_FLASH_STROBE_STATUS (V4L2_CID_FLASH_CLASS_BASE + 5)
#define V4L2_CID_FLASH_TIMEOUT (V4L2_CID_FLASH_CLASS_BASE + 6)
#define V4L2_CID_FLASH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 7)
#define V4L2_CID_FLASH_TORCH_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 8)
#define V4L2_CID_FLASH_INDICATOR_INTENSITY (V4L2_CID_FLASH_CLASS_BASE + 9)
#define V4L2_CID_FLASH_FAULT (V4L2_CID_FLASH_CLASS_BASE + 10)
#define V4L2_FLASH_FAULT_OVER_VOLTAGE (1 << 0)
#define V4L2_FLASH_FAULT_TIMEOUT (1 << 1)
#define V4L2_FLASH_FAULT_OVER_TEMPERATURE (1 << 2)
#define V4L2_FLASH_FAULT_SHORT_CIRCUIT (1 << 3)
#define V4L2_FLASH_FAULT_OVER_CURRENT (1 << 4)
#define V4L2_FLASH_FAULT_INDICATOR (1 << 5)
#define V4L2_FLASH_FAULT_UNDER_VOLTAGE (1 << 6)
#define V4L2_FLASH_FAULT_INPUT_VOLTAGE (1 << 7)
#define V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE (1 << 8)
#define V4L2_CID_FLASH_CHARGE (V4L2_CID_FLASH_CLASS_BASE + 11)
#define V4L2_CID_FLASH_READY (V4L2_CID_FLASH_CLASS_BASE + 12)
/* JPEG-class control IDs */
#define V4L2_CID_JPEG_CLASS_BASE (V4L2_CTRL_CLASS_JPEG | 0x900)
#define V4L2_CID_JPEG_CLASS (V4L2_CTRL_CLASS_JPEG | 1)
#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1)
enum v4l2_jpeg_chroma_subsampling {
V4L2_JPEG_CHROMA_SUBSAMPLING_444 = 0,
V4L2_JPEG_CHROMA_SUBSAMPLING_422 = 1,
V4L2_JPEG_CHROMA_SUBSAMPLING_420 = 2,
V4L2_JPEG_CHROMA_SUBSAMPLING_411 = 3,
V4L2_JPEG_CHROMA_SUBSAMPLING_410 = 4,
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY = 5,
};
#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2)
#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3)
#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4)
#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0)
#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1)
#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16)
#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
/* Image source controls */
#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
#define V4L2_CID_IMAGE_SOURCE_CLASS (V4L2_CTRL_CLASS_IMAGE_SOURCE | 1)
#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
#define V4L2_CID_NOTIFY_GAINS (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 9)
/* Image processing controls */
#define V4L2_CID_IMAGE_PROC_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_PROC | 0x900)
#define V4L2_CID_IMAGE_PROC_CLASS (V4L2_CTRL_CLASS_IMAGE_PROC | 1)
#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
#define V4L2_CID_DEINTERLACING_MODE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 4)
#define V4L2_CID_DIGITAL_GAIN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 5)
/* DV-class control IDs defined by V4L2 */
#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1)
#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
enum v4l2_dv_tx_mode {
V4L2_DV_TX_MODE_DVI_D = 0,
V4L2_DV_TX_MODE_HDMI = 1,
};
#define V4L2_CID_DV_TX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 5)
enum v4l2_dv_rgb_range {
V4L2_DV_RGB_RANGE_AUTO = 0,
V4L2_DV_RGB_RANGE_LIMITED = 1,
V4L2_DV_RGB_RANGE_FULL = 2,
};
#define V4L2_CID_DV_TX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 6)
enum v4l2_dv_it_content_type {
V4L2_DV_IT_CONTENT_TYPE_GRAPHICS = 0,
V4L2_DV_IT_CONTENT_TYPE_PHOTO = 1,
V4L2_DV_IT_CONTENT_TYPE_CINEMA = 2,
V4L2_DV_IT_CONTENT_TYPE_GAME = 3,
V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4,
};
#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101)
#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102)
#define V4L2_CID_FM_RX_CLASS_BASE (V4L2_CTRL_CLASS_FM_RX | 0x900)
#define V4L2_CID_FM_RX_CLASS (V4L2_CTRL_CLASS_FM_RX | 1)
#define V4L2_CID_TUNE_DEEMPHASIS (V4L2_CID_FM_RX_CLASS_BASE + 1)
enum v4l2_deemphasis {
V4L2_DEEMPHASIS_DISABLED = V4L2_PREEMPHASIS_DISABLED,
V4L2_DEEMPHASIS_50_uS = V4L2_PREEMPHASIS_50_uS,
V4L2_DEEMPHASIS_75_uS = V4L2_PREEMPHASIS_75_uS,
};
#define V4L2_CID_RDS_RECEPTION (V4L2_CID_FM_RX_CLASS_BASE + 2)
#define V4L2_CID_RDS_RX_PTY (V4L2_CID_FM_RX_CLASS_BASE + 3)
#define V4L2_CID_RDS_RX_PS_NAME (V4L2_CID_FM_RX_CLASS_BASE + 4)
#define V4L2_CID_RDS_RX_RADIO_TEXT (V4L2_CID_FM_RX_CLASS_BASE + 5)
#define V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (V4L2_CID_FM_RX_CLASS_BASE + 6)
#define V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (V4L2_CID_FM_RX_CLASS_BASE + 7)
#define V4L2_CID_RDS_RX_MUSIC_SPEECH (V4L2_CID_FM_RX_CLASS_BASE + 8)
#define V4L2_CID_RF_TUNER_CLASS_BASE (V4L2_CTRL_CLASS_RF_TUNER | 0x900)
#define V4L2_CID_RF_TUNER_CLASS (V4L2_CTRL_CLASS_RF_TUNER | 1)
#define V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 11)
#define V4L2_CID_RF_TUNER_BANDWIDTH (V4L2_CID_RF_TUNER_CLASS_BASE + 12)
#define V4L2_CID_RF_TUNER_RF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 32)
#define V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 41)
#define V4L2_CID_RF_TUNER_LNA_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 42)
#define V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 51)
#define V4L2_CID_RF_TUNER_MIXER_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 52)
#define V4L2_CID_RF_TUNER_IF_GAIN_AUTO (V4L2_CID_RF_TUNER_CLASS_BASE + 61)
#define V4L2_CID_RF_TUNER_IF_GAIN (V4L2_CID_RF_TUNER_CLASS_BASE + 62)
#define V4L2_CID_RF_TUNER_PLL_LOCK (V4L2_CID_RF_TUNER_CLASS_BASE + 91)
/* Detection-class control IDs defined by V4L2 */
#define V4L2_CID_DETECT_CLASS_BASE (V4L2_CTRL_CLASS_DETECT | 0x900)
#define V4L2_CID_DETECT_CLASS (V4L2_CTRL_CLASS_DETECT | 1)
#define V4L2_CID_DETECT_MD_MODE (V4L2_CID_DETECT_CLASS_BASE + 1)
enum v4l2_detect_md_mode {
V4L2_DETECT_MD_MODE_DISABLED = 0,
V4L2_DETECT_MD_MODE_GLOBAL = 1,
V4L2_DETECT_MD_MODE_THRESHOLD_GRID = 2,
V4L2_DETECT_MD_MODE_REGION_GRID = 3,
};
#define V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (V4L2_CID_DETECT_CLASS_BASE + 2)
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
/* Stateless CODECs controls */
#define V4L2_CID_CODEC_STATELESS_BASE (V4L2_CTRL_CLASS_CODEC_STATELESS | 0x900)
#define V4L2_CID_CODEC_STATELESS_CLASS (V4L2_CTRL_CLASS_CODEC_STATELESS | 1)
#define V4L2_CID_STATELESS_H264_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 0)
/**
* enum v4l2_stateless_h264_decode_mode - Decoding mode
*
* @V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED: indicates that decoding
* is performed one slice at a time. In this mode,
* V4L2_CID_STATELESS_H264_SLICE_PARAMS must contain the parsed slice
* parameters and the OUTPUT buffer must contain a single slice.
* V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF feature is used
* in order to support multislice frames.
* @V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED: indicates that
* decoding is performed per frame. The OUTPUT buffer must contain
* all slices and also both fields. This mode is typically supported
* by device drivers that are able to parse the slice(s) header(s)
* in hardware. When this mode is selected,
* V4L2_CID_STATELESS_H264_SLICE_PARAMS is not used.
*/
enum v4l2_stateless_h264_decode_mode {
V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED,
V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
};
#define V4L2_CID_STATELESS_H264_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 1)
/**
* enum v4l2_stateless_h264_start_code - Start code
*
* @V4L2_STATELESS_H264_START_CODE_NONE: slices are passed
* to the driver without any start code.
* @V4L2_STATELESS_H264_START_CODE_ANNEX_B: slices are passed
* to the driver with an Annex B start code prefix
* (legal start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001).
* This mode is typically supported by device drivers that parse
* the start code in hardware.
*/
enum v4l2_stateless_h264_start_code {
V4L2_STATELESS_H264_START_CODE_NONE,
V4L2_STATELESS_H264_START_CODE_ANNEX_B,
};
#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01
#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02
#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04
#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08
#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10
#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20
#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01
#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02
#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04
#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08
#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10
#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20
#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40
#define V4L2_H264_SPS_HAS_CHROMA_FORMAT(sps) \
((sps)->profile_idc == 100 || (sps)->profile_idc == 110 || \
(sps)->profile_idc == 122 || (sps)->profile_idc == 244 || \
(sps)->profile_idc == 44 || (sps)->profile_idc == 83 || \
(sps)->profile_idc == 86 || (sps)->profile_idc == 118 || \
(sps)->profile_idc == 128 || (sps)->profile_idc == 138 || \
(sps)->profile_idc == 139 || (sps)->profile_idc == 134 || \
(sps)->profile_idc == 135)
#define V4L2_CID_STATELESS_H264_SPS (V4L2_CID_CODEC_STATELESS_BASE + 2)
/**
* struct v4l2_ctrl_h264_sps - H264 sequence parameter set
*
* All the members on this sequence parameter set structure match the
* sequence parameter set syntax as specified by the H264 specification.
*
* @profile_idc: see H264 specification.
* @constraint_set_flags: see H264 specification.
* @level_idc: see H264 specification.
* @seq_parameter_set_id: see H264 specification.
* @chroma_format_idc: see H264 specification.
* @bit_depth_luma_minus8: see H264 specification.
* @bit_depth_chroma_minus8: see H264 specification.
* @log2_max_frame_num_minus4: see H264 specification.
* @pic_order_cnt_type: see H264 specification.
* @log2_max_pic_order_cnt_lsb_minus4: see H264 specification.
* @max_num_ref_frames: see H264 specification.
* @num_ref_frames_in_pic_order_cnt_cycle: see H264 specification.
* @offset_for_ref_frame: see H264 specification.
* @offset_for_non_ref_pic: see H264 specification.
* @offset_for_top_to_bottom_field: see H264 specification.
* @pic_width_in_mbs_minus1: see H264 specification.
* @pic_height_in_map_units_minus1: see H264 specification.
* @flags: see V4L2_H264_SPS_FLAG_{}.
*/
struct v4l2_ctrl_h264_sps {
__u8 profile_idc;
__u8 constraint_set_flags;
__u8 level_idc;
__u8 seq_parameter_set_id;
__u8 chroma_format_idc;
__u8 bit_depth_luma_minus8;
__u8 bit_depth_chroma_minus8;
__u8 log2_max_frame_num_minus4;
__u8 pic_order_cnt_type;
__u8 log2_max_pic_order_cnt_lsb_minus4;
__u8 max_num_ref_frames;
__u8 num_ref_frames_in_pic_order_cnt_cycle;
__s32 offset_for_ref_frame[255];
__s32 offset_for_non_ref_pic;
__s32 offset_for_top_to_bottom_field;
__u16 pic_width_in_mbs_minus1;
__u16 pic_height_in_map_units_minus1;
__u32 flags;
};
#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001
#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002
#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004
#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008
#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010
#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020
#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040
#define V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT 0x0080
#define V4L2_CID_STATELESS_H264_PPS (V4L2_CID_CODEC_STATELESS_BASE + 3)
/**
* struct v4l2_ctrl_h264_pps - H264 picture parameter set
*
* Except where noted, all the members on this picture parameter set
* structure match the picture parameter set syntax as specified
* by the H264 specification.
*
* In particular, V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT flag
* has a specific meaning. This flag should be set if a non-flat
* scaling matrix applies to the picture. In this case, applications
* are expected to use V4L2_CID_STATELESS_H264_SCALING_MATRIX,
* to pass the values of the non-flat matrices.
*
* @pic_parameter_set_id: see H264 specification.
* @seq_parameter_set_id: see H264 specification.
* @num_slice_groups_minus1: see H264 specification.
* @num_ref_idx_l0_default_active_minus1: see H264 specification.
* @num_ref_idx_l1_default_active_minus1: see H264 specification.
* @weighted_bipred_idc: see H264 specification.
* @pic_init_qp_minus26: see H264 specification.
* @pic_init_qs_minus26: see H264 specification.
* @chroma_qp_index_offset: see H264 specification.
* @second_chroma_qp_index_offset: see H264 specification.
* @flags: see V4L2_H264_PPS_FLAG_{}.
*/
struct v4l2_ctrl_h264_pps {
__u8 pic_parameter_set_id;
__u8 seq_parameter_set_id;
__u8 num_slice_groups_minus1;
__u8 num_ref_idx_l0_default_active_minus1;
__u8 num_ref_idx_l1_default_active_minus1;
__u8 weighted_bipred_idc;
__s8 pic_init_qp_minus26;
__s8 pic_init_qs_minus26;
__s8 chroma_qp_index_offset;
__s8 second_chroma_qp_index_offset;
__u16 flags;
};
#define V4L2_CID_STATELESS_H264_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 4)
/**
* struct v4l2_ctrl_h264_scaling_matrix - H264 scaling matrices
*
* @scaling_list_4x4: scaling matrix after applying the inverse
* scanning process. Expected list order is Intra Y, Intra Cb,
* Intra Cr, Inter Y, Inter Cb, Inter Cr. The values on each
* scaling list are expected in raster scan order.
* @scaling_list_8x8: scaling matrix after applying the inverse
* scanning process. Expected list order is Intra Y, Inter Y,
* Intra Cb, Inter Cb, Intra Cr, Inter Cr. The values on each
* scaling list are expected in raster scan order.
*
* Note that the list order is different for the 4x4 and 8x8
* matrices as per the H264 specification, see table 7-2 "Assignment
* of mnemonic names to scaling list indices and specification of
* fall-back rule".
*/
struct v4l2_ctrl_h264_scaling_matrix {
__u8 scaling_list_4x4[6][16];
__u8 scaling_list_8x8[6][64];
};
struct v4l2_h264_weight_factors {
__s16 luma_weight[32];
__s16 luma_offset[32];
__s16 chroma_weight[32][2];
__s16 chroma_offset[32][2];
};
#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \
((((pps)->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) && \
((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \
(slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \
((pps)->weighted_bipred_idc == 1 && \
(slice)->slice_type == V4L2_H264_SLICE_TYPE_B))
#define V4L2_CID_STATELESS_H264_PRED_WEIGHTS (V4L2_CID_CODEC_STATELESS_BASE + 5)
/**
* struct v4l2_ctrl_h264_pred_weights - Prediction weight table
*
* Prediction weight table, which matches the syntax specified
* by the H264 specification.
*
* @luma_log2_weight_denom: see H264 specification.
* @chroma_log2_weight_denom: see H264 specification.
* @weight_factors: luma and chroma weight factors.
*/
struct v4l2_ctrl_h264_pred_weights {
__u16 luma_log2_weight_denom;
__u16 chroma_log2_weight_denom;
struct v4l2_h264_weight_factors weight_factors[2];
};
#define V4L2_H264_SLICE_TYPE_P 0
#define V4L2_H264_SLICE_TYPE_B 1
#define V4L2_H264_SLICE_TYPE_I 2
#define V4L2_H264_SLICE_TYPE_SP 3
#define V4L2_H264_SLICE_TYPE_SI 4
#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01
#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02
#define V4L2_H264_TOP_FIELD_REF 0x1
#define V4L2_H264_BOTTOM_FIELD_REF 0x2
#define V4L2_H264_FRAME_REF 0x3
/**
* struct v4l2_h264_reference - H264 picture reference
*
* @fields: indicates how the picture is referenced.
* Valid values are V4L2_H264_{}_REF.
* @index: index into v4l2_ctrl_h264_decode_params.dpb[].
*/
struct v4l2_h264_reference {
__u8 fields;
__u8 index;
};
/*
* Maximum DPB size, as specified by section 'A.3.1 Level limits
* common to the Baseline, Main, and Extended profiles'.
*/
#define V4L2_H264_NUM_DPB_ENTRIES 16
#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES)
#define V4L2_CID_STATELESS_H264_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 6)
/**
* struct v4l2_ctrl_h264_slice_params - H264 slice parameters
*
* This structure holds the H264 syntax elements that are specified
* as non-invariant for the slices in a given frame.
*
* Slice invariant syntax elements are contained in struct
* v4l2_ctrl_h264_decode_params. This is done to reduce the API surface
* on frame-based decoders, where slice header parsing is done by the
* hardware.
*
* Slice invariant syntax elements are specified in specification section
* "7.4.3 Slice header semantics".
*
* Except where noted, the members on this struct match the slice header syntax.
*
* @header_bit_size: offset in bits to slice_data() from the beginning of this slice.
* @first_mb_in_slice: see H264 specification.
* @slice_type: see H264 specification.
* @colour_plane_id: see H264 specification.
* @redundant_pic_cnt: see H264 specification.
* @cabac_init_idc: see H264 specification.
* @slice_qp_delta: see H264 specification.
* @slice_qs_delta: see H264 specification.
* @disable_deblocking_filter_idc: see H264 specification.
* @slice_alpha_c0_offset_div2: see H264 specification.
* @slice_beta_offset_div2: see H264 specification.
* @num_ref_idx_l0_active_minus1: see H264 specification.
* @num_ref_idx_l1_active_minus1: see H264 specification.
* @reserved: padding field. Should be zeroed by applications.
* @ref_pic_list0: reference picture list 0 after applying the per-slice modifications.
* @ref_pic_list1: reference picture list 1 after applying the per-slice modifications.
* @flags: see V4L2_H264_SLICE_FLAG_{}.
*/
struct v4l2_ctrl_h264_slice_params {
__u32 header_bit_size;
__u32 first_mb_in_slice;
__u8 slice_type;
__u8 colour_plane_id;
__u8 redundant_pic_cnt;
__u8 cabac_init_idc;
__s8 slice_qp_delta;
__s8 slice_qs_delta;
__u8 disable_deblocking_filter_idc;
__s8 slice_alpha_c0_offset_div2;
__s8 slice_beta_offset_div2;
__u8 num_ref_idx_l0_active_minus1;
__u8 num_ref_idx_l1_active_minus1;
__u8 reserved;
struct v4l2_h264_reference ref_pic_list0[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference ref_pic_list1[V4L2_H264_REF_LIST_LEN];
__u32 flags;
};
#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08
/**
* struct v4l2_h264_dpb_entry - H264 decoded picture buffer entry
*
* @reference_ts: timestamp of the V4L2 capture buffer to use as reference.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @pic_num: matches PicNum variable assigned during the reference
* picture lists construction process.
* @frame_num: frame identifier which matches frame_num syntax element.
* @fields: indicates how the DPB entry is referenced. Valid values are
* V4L2_H264_{}_REF.
* @reserved: padding field. Should be zeroed by applications.
* @top_field_order_cnt: matches TopFieldOrderCnt picture value.
* @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
* Note that picture field is indicated by v4l2_buffer.field.
* @flags: see V4L2_H264_DPB_ENTRY_FLAG_{}.
*/
struct v4l2_h264_dpb_entry {
__u64 reference_ts;
__u32 pic_num;
__u16 frame_num;
__u8 fields;
__u8 reserved[5];
__s32 top_field_order_cnt;
__s32 bottom_field_order_cnt;
__u32 flags;
};
#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01
#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02
#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04
#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08
#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10
#define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7)
/**
* struct v4l2_ctrl_h264_decode_params - H264 decoding parameters
*
* @dpb: decoded picture buffer.
* @nal_ref_idc: slice header syntax element.
* @frame_num: slice header syntax element.
* @top_field_order_cnt: matches TopFieldOrderCnt picture value.
* @bottom_field_order_cnt: matches BottomFieldOrderCnt picture value.
* Note that picture field is indicated by v4l2_buffer.field.
* @idr_pic_id: slice header syntax element.
* @pic_order_cnt_lsb: slice header syntax element.
* @delta_pic_order_cnt_bottom: slice header syntax element.
* @delta_pic_order_cnt0: slice header syntax element.
* @delta_pic_order_cnt1: slice header syntax element.
* @dec_ref_pic_marking_bit_size: size in bits of dec_ref_pic_marking()
* syntax element.
* @pic_order_cnt_bit_size: size in bits of pic order count syntax.
* @slice_group_change_cycle: slice header syntax element.
* @reserved: padding field. Should be zeroed by applications.
* @flags: see V4L2_H264_DECODE_PARAM_FLAG_{}.
*/
struct v4l2_ctrl_h264_decode_params {
struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES];
__u16 nal_ref_idc;
__u16 frame_num;
__s32 top_field_order_cnt;
__s32 bottom_field_order_cnt;
__u16 idr_pic_id;
__u16 pic_order_cnt_lsb;
__s32 delta_pic_order_cnt_bottom;
__s32 delta_pic_order_cnt0;
__s32 delta_pic_order_cnt1;
__u32 dec_ref_pic_marking_bit_size;
__u32 pic_order_cnt_bit_size;
__u32 slice_group_change_cycle;
__u32 reserved;
__u32 flags;
};
/* Stateless FWHT control, used by the vicodec driver */
/* Current FWHT version */
#define V4L2_FWHT_VERSION 3
/* Set if this is an interlaced format */
#define V4L2_FWHT_FL_IS_INTERLACED _BITUL(0)
/* Set if this is a bottom-first (NTSC) interlaced format */
#define V4L2_FWHT_FL_IS_BOTTOM_FIRST _BITUL(1)
/* Set if each 'frame' contains just one field */
#define V4L2_FWHT_FL_IS_ALTERNATE _BITUL(2)
/*
* If V4L2_FWHT_FL_IS_ALTERNATE was set, then this is set if this
* 'frame' is the bottom field, else it is the top field.
*/
#define V4L2_FWHT_FL_IS_BOTTOM_FIELD _BITUL(3)
/* Set if the Y' plane is uncompressed */
#define V4L2_FWHT_FL_LUMA_IS_UNCOMPRESSED _BITUL(4)
/* Set if the Cb plane is uncompressed */
#define V4L2_FWHT_FL_CB_IS_UNCOMPRESSED _BITUL(5)
/* Set if the Cr plane is uncompressed */
#define V4L2_FWHT_FL_CR_IS_UNCOMPRESSED _BITUL(6)
/* Set if the chroma plane is full height, if cleared it is half height */
#define V4L2_FWHT_FL_CHROMA_FULL_HEIGHT _BITUL(7)
/* Set if the chroma plane is full width, if cleared it is half width */
#define V4L2_FWHT_FL_CHROMA_FULL_WIDTH _BITUL(8)
/* Set if the alpha plane is uncompressed */
#define V4L2_FWHT_FL_ALPHA_IS_UNCOMPRESSED _BITUL(9)
/* Set if this is an I Frame */
#define V4L2_FWHT_FL_I_FRAME _BITUL(10)
/* A 4-values flag - the number of components - 1 */
#define V4L2_FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16)
#define V4L2_FWHT_FL_COMPONENTS_NUM_OFFSET 16
/* A 4-values flag - the pixel encoding type */
#define V4L2_FWHT_FL_PIXENC_MSK GENMASK(20, 19)
#define V4L2_FWHT_FL_PIXENC_OFFSET 19
#define V4L2_FWHT_FL_PIXENC_YUV (1 << V4L2_FWHT_FL_PIXENC_OFFSET)
#define V4L2_FWHT_FL_PIXENC_RGB (2 << V4L2_FWHT_FL_PIXENC_OFFSET)
#define V4L2_FWHT_FL_PIXENC_HSV (3 << V4L2_FWHT_FL_PIXENC_OFFSET)
#define V4L2_CID_STATELESS_FWHT_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 100)
/**
* struct v4l2_ctrl_fwht_params - FWHT parameters
*
* @backward_ref_ts: timestamp of the V4L2 capture buffer to use as reference.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @version: must be V4L2_FWHT_VERSION.
* @width: width of frame.
* @height: height of frame.
* @flags: FWHT flags (see V4L2_FWHT_FL_*).
* @colorspace: the colorspace (enum v4l2_colorspace).
* @xfer_func: the transfer function (enum v4l2_xfer_func).
* @ycbcr_enc: the Y'CbCr encoding (enum v4l2_ycbcr_encoding).
* @quantization: the quantization (enum v4l2_quantization).
*/
struct v4l2_ctrl_fwht_params {
__u64 backward_ref_ts;
__u32 version;
__u32 width;
__u32 height;
__u32 flags;
__u32 colorspace;
__u32 xfer_func;
__u32 ycbcr_enc;
__u32 quantization;
};
/* Stateless VP8 control */
#define V4L2_VP8_SEGMENT_FLAG_ENABLED 0x01
#define V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP 0x02
#define V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA 0x04
#define V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE 0x08
/**
* struct v4l2_vp8_segment - VP8 segment-based adjustments parameters
*
* @quant_update: update values for the segment quantizer.
* @lf_update: update values for the loop filter level.
* @segment_probs: branch probabilities of the segment_id decoding tree.
* @padding: padding field. Should be zeroed by applications.
* @flags: see V4L2_VP8_SEGMENT_FLAG_{}.
*
* This structure contains segment-based adjustments related parameters.
* See the 'update_segmentation()' part of the frame header syntax,
* and section '9.3. Segment-Based Adjustments' of the VP8 specification
* for more details.
*/
struct v4l2_vp8_segment {
__s8 quant_update[4];
__s8 lf_update[4];
__u8 segment_probs[3];
__u8 padding;
__u32 flags;
};
#define V4L2_VP8_LF_ADJ_ENABLE 0x01
#define V4L2_VP8_LF_DELTA_UPDATE 0x02
#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04
/**
* struct v4l2_vp8_loop_filter - VP8 loop filter parameters
*
* @ref_frm_delta: Reference frame signed delta values.
* @mb_mode_delta: MB prediction mode signed delta values.
* @sharpness_level: matches sharpness_level syntax element.
* @level: matches loop_filter_level syntax element.
* @padding: padding field. Should be zeroed by applications.
* @flags: see V4L2_VP8_LF_{}.
*
* This structure contains loop filter related parameters.
* See the 'mb_lf_adjustments()' part of the frame header syntax,
* and section '9.4. Loop Filter Type and Levels' of the VP8 specification
* for more details.
*/
struct v4l2_vp8_loop_filter {
__s8 ref_frm_delta[4];
__s8 mb_mode_delta[4];
__u8 sharpness_level;
__u8 level;
__u16 padding;
__u32 flags;
};
/**
* struct v4l2_vp8_quantization - VP8 quantizattion indices
*
* @y_ac_qi: luma AC coefficient table index.
* @y_dc_delta: luma DC delta vaue.
* @y2_dc_delta: y2 block DC delta value.
* @y2_ac_delta: y2 block AC delta value.
* @uv_dc_delta: chroma DC delta value.
* @uv_ac_delta: chroma AC delta value.
* @padding: padding field. Should be zeroed by applications.
*
* This structure contains the quantization indices present
* in 'quant_indices()' part of the frame header syntax.
* See section '9.6. Dequantization Indices' of the VP8 specification
* for more details.
*/
struct v4l2_vp8_quantization {
__u8 y_ac_qi;
__s8 y_dc_delta;
__s8 y2_dc_delta;
__s8 y2_ac_delta;
__s8 uv_dc_delta;
__s8 uv_ac_delta;
__u16 padding;
};
#define V4L2_VP8_COEFF_PROB_CNT 11
#define V4L2_VP8_MV_PROB_CNT 19
/**
* struct v4l2_vp8_entropy - VP8 update probabilities
*
* @coeff_probs: coefficient probability update values.
* @y_mode_probs: luma intra-prediction probabilities.
* @uv_mode_probs: chroma intra-prediction probabilities.
* @mv_probs: mv decoding probability.
* @padding: padding field. Should be zeroed by applications.
*
* This structure contains the update probabilities present in
* 'token_prob_update()' and 'mv_prob_update()' part of the frame header.
* See section '17.2. Probability Updates' of the VP8 specification
* for more details.
*/
struct v4l2_vp8_entropy {
__u8 coeff_probs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
__u8 y_mode_probs[4];
__u8 uv_mode_probs[3];
__u8 mv_probs[2][V4L2_VP8_MV_PROB_CNT];
__u8 padding[3];
};
/**
* struct v4l2_vp8_entropy_coder_state - VP8 boolean coder state
*
* @range: coder state value for "Range"
* @value: coder state value for "Value"
* @bit_count: number of bits left in range "Value".
* @padding: padding field. Should be zeroed by applications.
*
* This structure contains the state for the boolean coder, as
* explained in section '7. Boolean Entropy Decoder' of the VP8 specification.
*/
struct v4l2_vp8_entropy_coder_state {
__u8 range;
__u8 value;
__u8 bit_count;
__u8 padding;
};
#define V4L2_VP8_FRAME_FLAG_KEY_FRAME 0x01
#define V4L2_VP8_FRAME_FLAG_EXPERIMENTAL 0x02
#define V4L2_VP8_FRAME_FLAG_SHOW_FRAME 0x04
#define V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF 0x08
#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN 0x10
#define V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT 0x20
#define V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) \
(!!((hdr)->flags & V4L2_VP8_FRAME_FLAG_KEY_FRAME))
#define V4L2_CID_STATELESS_VP8_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 200)
/**
* struct v4l2_ctrl_vp8_frame - VP8 frame parameters
*
* @segment: segmentation parameters. See &v4l2_vp8_segment for more details
* @lf: loop filter parameters. See &v4l2_vp8_loop_filter for more details
* @quant: quantization parameters. See &v4l2_vp8_quantization for more details
* @entropy: update probabilities. See &v4l2_vp8_entropy for more details
* @coder_state: boolean coder state. See &v4l2_vp8_entropy_coder_state for more details
* @width: frame width.
* @height: frame height.
* @horizontal_scale: horizontal scaling factor.
* @vertical_scale: vertical scaling factor.
* @version: bitstream version.
* @prob_skip_false: frame header syntax element.
* @prob_intra: frame header syntax element.
* @prob_last: frame header syntax element.
* @prob_gf: frame header syntax element.
* @num_dct_parts: number of DCT coefficients partitions.
* @first_part_size: size of the first partition, i.e. the control partition.
* @first_part_header_bits: size in bits of the first partition header portion.
* @dct_part_sizes: DCT coefficients sizes.
* @last_frame_ts: "last" reference buffer timestamp.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @golden_frame_ts: "golden" reference buffer timestamp.
* @alt_frame_ts: "alt" reference buffer timestamp.
* @flags: see V4L2_VP8_FRAME_FLAG_{}.
*/
struct v4l2_ctrl_vp8_frame {
struct v4l2_vp8_segment segment;
struct v4l2_vp8_loop_filter lf;
struct v4l2_vp8_quantization quant;
struct v4l2_vp8_entropy entropy;
struct v4l2_vp8_entropy_coder_state coder_state;
__u16 width;
__u16 height;
__u8 horizontal_scale;
__u8 vertical_scale;
__u8 version;
__u8 prob_skip_false;
__u8 prob_intra;
__u8 prob_last;
__u8 prob_gf;
__u8 num_dct_parts;
__u32 first_part_size;
__u32 first_part_header_bits;
__u32 dct_part_sizes[8];
__u64 last_frame_ts;
__u64 golden_frame_ts;
__u64 alt_frame_ts;
__u64 flags;
};
/* Stateless MPEG-2 controls */
#define V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE 0x01
#define V4L2_CID_STATELESS_MPEG2_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE+220)
/**
* struct v4l2_ctrl_mpeg2_sequence - MPEG-2 sequence header
*
* All the members on this structure match the sequence header and sequence
* extension syntaxes as specified by the MPEG-2 specification.
*
* Fields horizontal_size, vertical_size and vbv_buffer_size are a
* combination of respective _value and extension syntax elements,
* as described in section 6.3.3 "Sequence header".
*
* @horizontal_size: combination of elements horizontal_size_value and
* horizontal_size_extension.
* @vertical_size: combination of elements vertical_size_value and
* vertical_size_extension.
* @vbv_buffer_size: combination of elements vbv_buffer_size_value and
* vbv_buffer_size_extension.
* @profile_and_level_indication: see MPEG-2 specification.
* @chroma_format: see MPEG-2 specification.
* @flags: see V4L2_MPEG2_SEQ_FLAG_{}.
*/
struct v4l2_ctrl_mpeg2_sequence {
__u16 horizontal_size;
__u16 vertical_size;
__u32 vbv_buffer_size;
__u16 profile_and_level_indication;
__u8 chroma_format;
__u8 flags;
};
#define V4L2_MPEG2_PIC_CODING_TYPE_I 1
#define V4L2_MPEG2_PIC_CODING_TYPE_P 2
#define V4L2_MPEG2_PIC_CODING_TYPE_B 3
#define V4L2_MPEG2_PIC_CODING_TYPE_D 4
#define V4L2_MPEG2_PIC_TOP_FIELD 0x1
#define V4L2_MPEG2_PIC_BOTTOM_FIELD 0x2
#define V4L2_MPEG2_PIC_FRAME 0x3
#define V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST 0x0001
#define V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT 0x0002
#define V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV 0x0004
#define V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE 0x0008
#define V4L2_MPEG2_PIC_FLAG_INTRA_VLC 0x0010
#define V4L2_MPEG2_PIC_FLAG_ALT_SCAN 0x0020
#define V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST 0x0040
#define V4L2_MPEG2_PIC_FLAG_PROGRESSIVE 0x0080
#define V4L2_CID_STATELESS_MPEG2_PICTURE (V4L2_CID_CODEC_STATELESS_BASE+221)
/**
* struct v4l2_ctrl_mpeg2_picture - MPEG-2 picture header
*
* All the members on this structure match the picture header and picture
* coding extension syntaxes as specified by the MPEG-2 specification.
*
* @backward_ref_ts: timestamp of the V4L2 capture buffer to use as
* reference for backward prediction.
* @forward_ref_ts: timestamp of the V4L2 capture buffer to use as
* reference for forward prediction. These timestamp refers to the
* timestamp field in struct v4l2_buffer. Use v4l2_timeval_to_ns()
* to convert the struct timeval to a __u64.
* @flags: see V4L2_MPEG2_PIC_FLAG_{}.
* @f_code: see MPEG-2 specification.
* @picture_coding_type: see MPEG-2 specification.
* @picture_structure: see V4L2_MPEG2_PIC_{}_FIELD.
* @intra_dc_precision: see MPEG-2 specification.
* @reserved: padding field. Should be zeroed by applications.
*/
struct v4l2_ctrl_mpeg2_picture {
__u64 backward_ref_ts;
__u64 forward_ref_ts;
__u32 flags;
__u8 f_code[2][2];
__u8 picture_coding_type;
__u8 picture_structure;
__u8 intra_dc_precision;
__u8 reserved[5];
};
#define V4L2_CID_STATELESS_MPEG2_QUANTISATION (V4L2_CID_CODEC_STATELESS_BASE+222)
/**
* struct v4l2_ctrl_mpeg2_quantisation - MPEG-2 quantisation
*
* Quantisation matrices as specified by section 6.3.7
* "Quant matrix extension".
*
* @intra_quantiser_matrix: The quantisation matrix coefficients
* for intra-coded frames, in zigzag scanning order. It is relevant
* for both luma and chroma components, although it can be superseded
* by the chroma-specific matrix for non-4:2:0 YUV formats.
* @non_intra_quantiser_matrix: The quantisation matrix coefficients
* for non-intra-coded frames, in zigzag scanning order. It is relevant
* for both luma and chroma components, although it can be superseded
* by the chroma-specific matrix for non-4:2:0 YUV formats.
* @chroma_intra_quantiser_matrix: The quantisation matrix coefficients
* for the chominance component of intra-coded frames, in zigzag scanning
* order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
* @chroma_non_intra_quantiser_matrix: The quantisation matrix coefficients
* for the chrominance component of non-intra-coded frames, in zigzag scanning
* order. Only relevant for 4:2:2 and 4:4:4 YUV formats.
*/
struct v4l2_ctrl_mpeg2_quantisation {
__u8 intra_quantiser_matrix[64];
__u8 non_intra_quantiser_matrix[64];
__u8 chroma_intra_quantiser_matrix[64];
__u8 chroma_non_intra_quantiser_matrix[64];
};
#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400)
#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401)
#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402)
#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403)
#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404)
#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405)
#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406)
#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407)
enum v4l2_stateless_hevc_decode_mode {
V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED,
V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
};
enum v4l2_stateless_hevc_start_code {
V4L2_STATELESS_HEVC_START_CODE_NONE,
V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
};
#define V4L2_HEVC_SLICE_TYPE_B 0
#define V4L2_HEVC_SLICE_TYPE_P 1
#define V4L2_HEVC_SLICE_TYPE_I 2
#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
/**
* struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set
*
* @video_parameter_set_id: specifies the value of the
* vps_video_parameter_set_id of the active VPS
* @seq_parameter_set_id: provides an identifier for the SPS for
* reference by other syntax elements
* @pic_width_in_luma_samples: specifies the width of each decoded picture
* in units of luma samples
* @pic_height_in_luma_samples: specifies the height of each decoded picture
* in units of luma samples
* @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the
* samples of the luma array
* @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the
* samples of the chroma arrays
* @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of
* the variable MaxPicOrderCntLsb
* @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum
* required size of the decoded picture
* buffer for the codec video sequence
* @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures
* @sps_max_latency_increase_plus1: not equal to 0 is used to compute the
* value of SpsMaxLatencyPictures array
* @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum
* luma coding block size
* @log2_diff_max_min_luma_coding_block_size: specifies the difference between
* the maximum and minimum luma
* coding block size
* @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma
* transform block size
* @log2_diff_max_min_luma_transform_block_size: specifies the difference between
* the maximum and minimum luma
* transform block size
* @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy
* depth for transform units of
* coding units coded in inter
* prediction mode
* @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy
* depth for transform units of
* coding units coded in intra
* prediction mode
* @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of
* bits used to represent each of PCM sample
* values of the luma component
* @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number
* of bits used to represent each of PCM
* sample values of the chroma components
* @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the
* minimum size of coding blocks
* @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between
* the maximum and minimum size of
* coding blocks
* @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set()
* syntax structures included in the SPS
* @num_long_term_ref_pics_sps: specifies the number of candidate long-term
* reference pictures that are specified in the SPS
* @chroma_format_idc: specifies the chroma sampling
* @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number
* of temporal sub-layers
* @reserved: padding field. Should be zeroed by applications.
* @flags: see V4L2_HEVC_SPS_FLAG_{}
*/
struct v4l2_ctrl_hevc_sps {
__u8 video_parameter_set_id;
__u8 seq_parameter_set_id;
__u16 pic_width_in_luma_samples;
__u16 pic_height_in_luma_samples;
__u8 bit_depth_luma_minus8;
__u8 bit_depth_chroma_minus8;
__u8 log2_max_pic_order_cnt_lsb_minus4;
__u8 sps_max_dec_pic_buffering_minus1;
__u8 sps_max_num_reorder_pics;
__u8 sps_max_latency_increase_plus1;
__u8 log2_min_luma_coding_block_size_minus3;
__u8 log2_diff_max_min_luma_coding_block_size;
__u8 log2_min_luma_transform_block_size_minus2;
__u8 log2_diff_max_min_luma_transform_block_size;
__u8 max_transform_hierarchy_depth_inter;
__u8 max_transform_hierarchy_depth_intra;
__u8 pcm_sample_bit_depth_luma_minus1;
__u8 pcm_sample_bit_depth_chroma_minus1;
__u8 log2_min_pcm_luma_coding_block_size_minus3;
__u8 log2_diff_max_min_pcm_luma_coding_block_size;
__u8 num_short_term_ref_pic_sets;
__u8 num_long_term_ref_pics_sps;
__u8 chroma_format_idc;
__u8 sps_max_sub_layers_minus1;
__u8 reserved[6];
__u64 flags;
};
#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0)
#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19)
#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20)
/**
* struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set
*
* @pic_parameter_set_id: identifies the PPS for reference by other
* syntax elements
* @num_extra_slice_header_bits: specifies the number of extra slice header
* bits that are present in the slice header RBSP
* for coded pictures referring to the PPS.
* @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the
* inferred value of num_ref_idx_l0_active_minus1
* @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the
* inferred value of num_ref_idx_l1_active_minus1
* @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for
* each slice referring to the PPS
* @diff_cu_qp_delta_depth: specifies the difference between the luma coding
* tree block size and the minimum luma coding block
* size of coding units that convey cu_qp_delta_abs
* and cu_qp_delta_sign_flag
* @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb
* @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr
* @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns
* partitioning the picture
* @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning
* the picture
* @column_width_minus1: this value plus 1 specifies the width of the each tile column in
* units of coding tree blocks
* @row_height_minus1: this value plus 1 specifies the height of the each tile row in
* units of coding tree blocks
* @pps_beta_offset_div2: specify the default deblocking parameter offsets for
* beta divided by 2
* @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC
* divided by 2
* @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of
* the variable Log2ParMrgLevel
* @reserved: padding field. Should be zeroed by applications.
* @flags: see V4L2_HEVC_PPS_FLAG_{}
*/
struct v4l2_ctrl_hevc_pps {
__u8 pic_parameter_set_id;
__u8 num_extra_slice_header_bits;
__u8 num_ref_idx_l0_default_active_minus1;
__u8 num_ref_idx_l1_default_active_minus1;
__s8 init_qp_minus26;
__u8 diff_cu_qp_delta_depth;
__s8 pps_cb_qp_offset;
__s8 pps_cr_qp_offset;
__u8 num_tile_columns_minus1;
__u8 num_tile_rows_minus1;
__u8 column_width_minus1[20];
__u8 row_height_minus1[22];
__s8 pps_beta_offset_div2;
__s8 pps_tc_offset_div2;
__u8 log2_parallel_merge_level_minus2;
__u8 reserved;
__u64 flags;
};
#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01
#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0
#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1
#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2
#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3
#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4
#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5
#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6
#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7
#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8
#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9
#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10
#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11
#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12
#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
/**
* struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry
*
* @timestamp: timestamp of the V4L2 capture buffer to use as reference.
* @flags: long term flag for the reference frame
* @field_pic: whether the reference is a field picture or a frame.
* @reserved: padding field. Should be zeroed by applications.
* @pic_order_cnt_val: the picture order count of the current picture.
*/
struct v4l2_hevc_dpb_entry {
__u64 timestamp;
__u8 flags;
__u8 field_pic;
__u16 reserved;
__s32 pic_order_cnt_val;
};
/**
* struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters
*
* @delta_luma_weight_l0: the difference of the weighting factor applied
* to the luma prediction value for list 0
* @luma_offset_l0: the additive offset applied to the luma prediction value
* for list 0
* @delta_chroma_weight_l0: the difference of the weighting factor applied
* to the chroma prediction values for list 0
* @chroma_offset_l0: the difference of the additive offset applied to
* the chroma prediction values for list 0
* @delta_luma_weight_l1: the difference of the weighting factor applied
* to the luma prediction value for list 1
* @luma_offset_l1: the additive offset applied to the luma prediction value
* for list 1
* @delta_chroma_weight_l1: the difference of the weighting factor applied
* to the chroma prediction values for list 1
* @chroma_offset_l1: the difference of the additive offset applied to
* the chroma prediction values for list 1
* @luma_log2_weight_denom: the base 2 logarithm of the denominator for
* all luma weighting factors
* @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm
* of the denominator for all chroma
* weighting factors
*/
struct v4l2_hevc_pred_weight_table {
__s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
__s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
__s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
__s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
__u8 luma_log2_weight_denom;
__s8 delta_chroma_log2_weight_denom;
};
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9)
/**
* struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters
*
* This control is a dynamically sized 1-dimensional array,
* V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it.
*
* @bit_size: size (in bits) of the current slice data
* @data_byte_offset: offset (in bytes) to the video data in the current slice data
* @num_entry_point_offsets: specifies the number of entry point offset syntax
* elements in the slice header.
* @nal_unit_type: specifies the coding type of the slice (B, P or I)
* @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit
* @slice_type: see V4L2_HEVC_SLICE_TYPE_{}
* @colour_plane_id: specifies the colour plane associated with the current slice
* @slice_pic_order_cnt: specifies the picture order count
* @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum
* reference index for reference picture list 0
* that may be used to decode the slice
* @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum
* reference index for reference picture list 1
* that may be used to decode the slice
* @collocated_ref_idx: specifies the reference index of the collocated picture used
* for temporal motion vector prediction
* @five_minus_max_num_merge_cand: specifies the maximum number of merging
* motion vector prediction candidates supported in
* the slice subtracted from 5
* @slice_qp_delta: specifies the initial value of QpY to be used for the coding
* blocks in the slice
* @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset
* @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset
* @slice_act_y_qp_offset: screen content extension parameters
* @slice_act_cb_qp_offset: screen content extension parameters
* @slice_act_cr_qp_offset: screen content extension parameters
* @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2
* @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2
* @pic_struct: indicates whether a picture should be displayed as a frame or as one or
* more fields
* @reserved0: padding field. Should be zeroed by applications.
* @slice_segment_addr: specifies the address of the first coding tree block in
* the slice segment
* @ref_idx_l0: the list of L0 reference elements as indices in the DPB
* @ref_idx_l1: the list of L1 reference elements as indices in the DPB
* @short_term_ref_pic_set_size: specifies the size of short-term reference
* pictures set included in the SPS
* @long_term_ref_pic_set_size: specifies the size of long-term reference
* pictures set include in the SPS
* @pred_weight_table: the prediction weight coefficients for inter-picture
* prediction
* @reserved1: padding field. Should be zeroed by applications.
* @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{}
*/
struct v4l2_ctrl_hevc_slice_params {
__u32 bit_size;
__u32 data_byte_offset;
__u32 num_entry_point_offsets;
/* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
__u8 nal_unit_type;
__u8 nuh_temporal_id_plus1;
/* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
__u8 slice_type;
__u8 colour_plane_id;
__s32 slice_pic_order_cnt;
__u8 num_ref_idx_l0_active_minus1;
__u8 num_ref_idx_l1_active_minus1;
__u8 collocated_ref_idx;
__u8 five_minus_max_num_merge_cand;
__s8 slice_qp_delta;
__s8 slice_cb_qp_offset;
__s8 slice_cr_qp_offset;
__s8 slice_act_y_qp_offset;
__s8 slice_act_cb_qp_offset;
__s8 slice_act_cr_qp_offset;
__s8 slice_beta_offset_div2;
__s8 slice_tc_offset_div2;
/* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
__u8 pic_struct;
__u8 reserved0[3];
/* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
__u32 slice_segment_addr;
__u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u16 short_term_ref_pic_set_size;
__u16 long_term_ref_pic_set_size;
/* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
struct v4l2_hevc_pred_weight_table pred_weight_table;
__u8 reserved1[2];
__u64 flags;
};
#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1
#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2
#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4
/**
* struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters
*
* @pic_order_cnt_val: picture order count
* @short_term_ref_pic_set_size: specifies the size of short-term reference
* pictures set included in the SPS of the first slice
* @long_term_ref_pic_set_size: specifies the size of long-term reference
* pictures set include in the SPS of the first slice
* @num_active_dpb_entries: the number of entries in dpb
* @num_poc_st_curr_before: the number of reference pictures in the short-term
* set that come before the current frame
* @num_poc_st_curr_after: the number of reference pictures in the short-term
* set that come after the current frame
* @num_poc_lt_curr: the number of reference pictures in the long-term set
* @poc_st_curr_before: provides the index of the short term before references
* in DPB array
* @poc_st_curr_after: provides the index of the short term after references
* in DPB array
* @poc_lt_curr: provides the index of the long term references in DPB array
* @num_delta_pocs_of_ref_rps_idx: same as the derived value NumDeltaPocs[RefRpsIdx],
* can be used to parse the RPS data in slice headers
* instead of skipping it with @short_term_ref_pic_set_size.
* @reserved: padding field. Should be zeroed by applications.
* @dpb: the decoded picture buffer, for meta-data about reference frames
* @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{}
*/
struct v4l2_ctrl_hevc_decode_params {
__s32 pic_order_cnt_val;
__u16 short_term_ref_pic_set_size;
__u16 long_term_ref_pic_set_size;
__u8 num_active_dpb_entries;
__u8 num_poc_st_curr_before;
__u8 num_poc_st_curr_after;
__u8 num_poc_lt_curr;
__u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u8 num_delta_pocs_of_ref_rps_idx;
__u8 reserved[3];
struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
__u64 flags;
};
/**
* struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters
*
* @scaling_list_4x4: scaling list is used for the scaling process for
* transform coefficients. The values on each scaling
* list are expected in raster scan order
* @scaling_list_8x8: scaling list is used for the scaling process for
* transform coefficients. The values on each scaling
* list are expected in raster scan order
* @scaling_list_16x16: scaling list is used for the scaling process for
* transform coefficients. The values on each scaling
* list are expected in raster scan order
* @scaling_list_32x32: scaling list is used for the scaling process for
* transform coefficients. The values on each scaling
* list are expected in raster scan order
* @scaling_list_dc_coef_16x16: scaling list is used for the scaling process
* for transform coefficients. The values on each
* scaling list are expected in raster scan order.
* @scaling_list_dc_coef_32x32: scaling list is used for the scaling process
* for transform coefficients. The values on each
* scaling list are expected in raster scan order.
*/
struct v4l2_ctrl_hevc_scaling_matrix {
__u8 scaling_list_4x4[6][16];
__u8 scaling_list_8x8[6][64];
__u8 scaling_list_16x16[6][64];
__u8 scaling_list_32x32[2][64];
__u8 scaling_list_dc_coef_16x16[6];
__u8 scaling_list_dc_coef_32x32[2];
};
#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
struct v4l2_ctrl_hdr10_cll_info {
__u16 max_content_light_level;
__u16 max_pic_average_light_level;
};
#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5
#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000
#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5
#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000
#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5
#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000
#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5
#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000
#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000
#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000
#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1
#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000
struct v4l2_ctrl_hdr10_mastering_display {
__u16 display_primaries_x[3];
__u16 display_primaries_y[3];
__u16 white_point_x;
__u16 white_point_y;
__u32 max_display_mastering_luminance;
__u32 min_display_mastering_luminance;
};
/* Stateless VP9 controls */
#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
/**
* struct v4l2_vp9_loop_filter - VP9 loop filter parameters
*
* @ref_deltas: contains the adjustment needed for the filter level based on the
* chosen reference frame. If this syntax element is not present in the bitstream,
* users should pass its last value.
* @mode_deltas: contains the adjustment needed for the filter level based on the
* chosen mode. If this syntax element is not present in the bitstream, users should
* pass its last value.
* @level: indicates the loop filter strength.
* @sharpness: indicates the sharpness level.
* @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_{} flags.
* @reserved: padding field. Should be zeroed by applications.
*
* This structure contains all loop filter related parameters. See sections
* '7.2.8 Loop filter semantics' of the VP9 specification for more details.
*/
struct v4l2_vp9_loop_filter {
__s8 ref_deltas[4];
__s8 mode_deltas[2];
__u8 level;
__u8 sharpness;
__u8 flags;
__u8 reserved[7];
};
/**
* struct v4l2_vp9_quantization - VP9 quantization parameters
*
* @base_q_idx: indicates the base frame qindex.
* @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
* @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx.
* @delta_q_uv_ac: indicates the UV AC quantizer relative to base_q_idx.
* @reserved: padding field. Should be zeroed by applications.
*
* Encodes the quantization parameters. See section '7.2.9 Quantization params
* syntax' of the VP9 specification for more details.
*/
struct v4l2_vp9_quantization {
__u8 base_q_idx;
__s8 delta_q_y_dc;
__s8 delta_q_uv_dc;
__s8 delta_q_uv_ac;
__u8 reserved[4];
};
#define V4L2_VP9_SEGMENTATION_FLAG_ENABLED 0x01
#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP 0x02
#define V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x04
#define V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA 0x08
#define V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE 0x10
#define V4L2_VP9_SEG_LVL_ALT_Q 0
#define V4L2_VP9_SEG_LVL_ALT_L 1
#define V4L2_VP9_SEG_LVL_REF_FRAME 2
#define V4L2_VP9_SEG_LVL_SKIP 3
#define V4L2_VP9_SEG_LVL_MAX 4
#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf
/**
* struct v4l2_vp9_segmentation - VP9 segmentation parameters
*
* @feature_data: data attached to each feature. Data entry is only valid if
* the feature is enabled. The array shall be indexed with segment number as
* the first dimension (0..7) and one of V4L2_VP9_SEG_{} as the second dimension.
* @feature_enabled: bitmask defining which features are enabled in each segment.
* The value for each segment is a combination of V4L2_VP9_SEGMENT_FEATURE_ENABLED(id)
* values where id is one of V4L2_VP9_SEG_LVL_{}.
* @tree_probs: specifies the probability values to be used when decoding a
* Segment-ID. See '5.15. Segmentation map' section of the VP9 specification
* for more details.
* @pred_probs: specifies the probability values to be used when decoding a
* Predicted-Segment-ID. See '6.4.14. Get segment id syntax' section of :ref:`vp9`
* for more details.
* @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_{} flags.
* @reserved: padding field. Should be zeroed by applications.
*
* Encodes the quantization parameters. See section '7.2.10 Segmentation params syntax' of
* the VP9 specification for more details.
*/
struct v4l2_vp9_segmentation {
__s16 feature_data[8][4];
__u8 feature_enabled[8];
__u8 tree_probs[7];
__u8 pred_probs[3];
__u8 flags;
__u8 reserved[5];
};
#define V4L2_VP9_FRAME_FLAG_KEY_FRAME 0x001
#define V4L2_VP9_FRAME_FLAG_SHOW_FRAME 0x002
#define V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT 0x004
#define V4L2_VP9_FRAME_FLAG_INTRA_ONLY 0x008
#define V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV 0x010
#define V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX 0x020
#define V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE 0x040
#define V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING 0x080
#define V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING 0x100
#define V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING 0x200
#define V4L2_VP9_SIGN_BIAS_LAST 0x1
#define V4L2_VP9_SIGN_BIAS_GOLDEN 0x2
#define V4L2_VP9_SIGN_BIAS_ALT 0x4
#define V4L2_VP9_RESET_FRAME_CTX_NONE 0
#define V4L2_VP9_RESET_FRAME_CTX_SPEC 1
#define V4L2_VP9_RESET_FRAME_CTX_ALL 2
#define V4L2_VP9_INTERP_FILTER_EIGHTTAP 0
#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH 1
#define V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP 2
#define V4L2_VP9_INTERP_FILTER_BILINEAR 3
#define V4L2_VP9_INTERP_FILTER_SWITCHABLE 4
#define V4L2_VP9_REFERENCE_MODE_SINGLE_REFERENCE 0
#define V4L2_VP9_REFERENCE_MODE_COMPOUND_REFERENCE 1
#define V4L2_VP9_REFERENCE_MODE_SELECT 2
#define V4L2_VP9_PROFILE_MAX 3
#define V4L2_CID_STATELESS_VP9_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 300)
/**
* struct v4l2_ctrl_vp9_frame - VP9 frame decoding control
*
* @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details.
* @quant: quantization parameters. See &v4l2_vp9_quantization for more details.
* @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details.
* @flags: combination of V4L2_VP9_FRAME_FLAG_{} flags.
* @compressed_header_size: compressed header size in bytes.
* @uncompressed_header_size: uncompressed header size in bytes.
* @frame_width_minus_1: add 1 to it and you'll get the frame width expressed in pixels.
* @frame_height_minus_1: add 1 to it and you'll get the frame height expressed in pixels.
* @render_width_minus_1: add 1 to it and you'll get the expected render width expressed in
* pixels. This is not used during the decoding process but might be used by HW scalers
* to prepare a frame that's ready for scanout.
* @render_height_minus_1: add 1 to it and you'll get the expected render height expressed in
* pixels. This is not used during the decoding process but might be used by HW scalers
* to prepare a frame that's ready for scanout.
* @last_frame_ts: "last" reference buffer timestamp.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @golden_frame_ts: "golden" reference buffer timestamp.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @alt_frame_ts: "alt" reference buffer timestamp.
* The timestamp refers to the timestamp field in struct v4l2_buffer.
* Use v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @ref_frame_sign_bias: a bitfield specifying whether the sign bias is set for a given
* reference frame. Either of V4L2_VP9_SIGN_BIAS_{}.
* @reset_frame_context: specifies whether the frame context should be reset to default values.
* Either of V4L2_VP9_RESET_FRAME_CTX_{}.
* @frame_context_idx: frame context that should be used/updated.
* @profile: VP9 profile. Can be 0, 1, 2 or 3.
* @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all profiles support
* 10 and/or 12 bits depths.
* @interpolation_filter: specifies the filter selection used for performing inter prediction.
* Set to one of V4L2_VP9_INTERP_FILTER_{}.
* @tile_cols_log2: specifies the base 2 logarithm of the width of each tile (where the width
* is measured in units of 8x8 blocks). Shall be less than or equal to 6.
* @tile_rows_log2: specifies the base 2 logarithm of the height of each tile (where the height
* is measured in units of 8x8 blocks).
* @reference_mode: specifies the type of inter prediction to be used.
* Set to one of V4L2_VP9_REFERENCE_MODE_{}.
* @reserved: padding field. Should be zeroed by applications.
*/
struct v4l2_ctrl_vp9_frame {
struct v4l2_vp9_loop_filter lf;
struct v4l2_vp9_quantization quant;
struct v4l2_vp9_segmentation seg;
__u32 flags;
__u16 compressed_header_size;
__u16 uncompressed_header_size;
__u16 frame_width_minus_1;
__u16 frame_height_minus_1;
__u16 render_width_minus_1;
__u16 render_height_minus_1;
__u64 last_frame_ts;
__u64 golden_frame_ts;
__u64 alt_frame_ts;
__u8 ref_frame_sign_bias;
__u8 reset_frame_context;
__u8 frame_context_idx;
__u8 profile;
__u8 bit_depth;
__u8 interpolation_filter;
__u8 tile_cols_log2;
__u8 tile_rows_log2;
__u8 reference_mode;
__u8 reserved[7];
};
#define V4L2_VP9_NUM_FRAME_CTX 4
/**
* struct v4l2_vp9_mv_probs - VP9 Motion vector probability updates
* @joint: motion vector joint probability updates.
* @sign: motion vector sign probability updates.
* @classes: motion vector class probability updates.
* @class0_bit: motion vector class0 bit probability updates.
* @bits: motion vector bits probability updates.
* @class0_fr: motion vector class0 fractional bit probability updates.
* @fr: motion vector fractional bit probability updates.
* @class0_hp: motion vector class0 high precision fractional bit probability updates.
* @hp: motion vector high precision fractional bit probability updates.
*
* This structure contains new values of motion vector probabilities.
* A value of zero in an array element means there is no update of the relevant probability.
* See `struct v4l2_vp9_prob_updates` for details.
*/
struct v4l2_vp9_mv_probs {
__u8 joint[3];
__u8 sign[2];
__u8 classes[2][10];
__u8 class0_bit[2];
__u8 bits[2][10];
__u8 class0_fr[2][2][3];
__u8 fr[2][3];
__u8 class0_hp[2];
__u8 hp[2];
};
#define V4L2_CID_STATELESS_VP9_COMPRESSED_HDR (V4L2_CID_CODEC_STATELESS_BASE + 301)
#define V4L2_VP9_TX_MODE_ONLY_4X4 0
#define V4L2_VP9_TX_MODE_ALLOW_8X8 1
#define V4L2_VP9_TX_MODE_ALLOW_16X16 2
#define V4L2_VP9_TX_MODE_ALLOW_32X32 3
#define V4L2_VP9_TX_MODE_SELECT 4
/**
* struct v4l2_ctrl_vp9_compressed_hdr - VP9 probability updates control
* @tx_mode: specifies the TX mode. Set to one of V4L2_VP9_TX_MODE_{}.
* @tx8: TX 8x8 probability updates.
* @tx16: TX 16x16 probability updates.
* @tx32: TX 32x32 probability updates.
* @coef: coefficient probability updates.
* @skip: skip probability updates.
* @inter_mode: inter mode probability updates.
* @interp_filter: interpolation filter probability updates.
* @is_inter: is inter-block probability updates.
* @comp_mode: compound prediction mode probability updates.
* @single_ref: single ref probability updates.
* @comp_ref: compound ref probability updates.
* @y_mode: Y prediction mode probability updates.
* @uv_mode: UV prediction mode probability updates.
* @partition: partition probability updates.
* @mv: motion vector probability updates.
*
* This structure holds the probabilities update as parsed in the compressed
* header (Spec 6.3). These values represent the value of probability update after
* being translated with inv_map_table[] (see 6.3.5). A value of zero in an array element
* means that there is no update of the relevant probability.
*
* This control is optional and needs to be used when dealing with the hardware which is
* not capable of parsing the compressed header itself. Only drivers which need it will
* implement it.
*/
struct v4l2_ctrl_vp9_compressed_hdr {
__u8 tx_mode;
__u8 tx8[2][1];
__u8 tx16[2][2];
__u8 tx32[2][3];
__u8 coef[4][2][2][6][6][3];
__u8 skip[3];
__u8 inter_mode[7][3];
__u8 interp_filter[4][2];
__u8 is_inter[4];
__u8 comp_mode[5];
__u8 single_ref[5][2];
__u8 comp_ref[5];
__u8 y_mode[4][9];
__u8 uv_mode[10][9];
__u8 partition[16][3];
struct v4l2_vp9_mv_probs mv;
};
/* Stateless AV1 controls */
#define V4L2_AV1_TOTAL_REFS_PER_FRAME 8
#define V4L2_AV1_CDEF_MAX 8
#define V4L2_AV1_NUM_PLANES_MAX 3 /* 1 if monochrome, 3 otherwise */
#define V4L2_AV1_MAX_SEGMENTS 8
#define V4L2_AV1_MAX_OPERATING_POINTS (1 << 5) /* 5 bits to encode */
#define V4L2_AV1_REFS_PER_FRAME 7
#define V4L2_AV1_MAX_NUM_Y_POINTS (1 << 4) /* 4 bits to encode */
#define V4L2_AV1_MAX_NUM_CB_POINTS (1 << 4) /* 4 bits to encode */
#define V4L2_AV1_MAX_NUM_CR_POINTS (1 << 4) /* 4 bits to encode */
#define V4L2_AV1_AR_COEFFS_SIZE 25 /* (2 * 3 * (3 + 1)) + 1 */
#define V4L2_AV1_MAX_NUM_PLANES 3
#define V4L2_AV1_MAX_TILE_COLS 64
#define V4L2_AV1_MAX_TILE_ROWS 64
#define V4L2_AV1_MAX_TILE_COUNT 512
#define V4L2_AV1_SEQUENCE_FLAG_STILL_PICTURE 0x00000001
#define V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK 0x00000002
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA 0x00000004
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER 0x00000008
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND 0x00000010
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND 0x00000020
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_WARPED_MOTION 0x00000040
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER 0x00000080
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_ORDER_HINT 0x00000100
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP 0x00000200
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_REF_FRAME_MVS 0x00000400
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_SUPERRES 0x00000800
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF 0x00001000
#define V4L2_AV1_SEQUENCE_FLAG_ENABLE_RESTORATION 0x00002000
#define V4L2_AV1_SEQUENCE_FLAG_MONO_CHROME 0x00004000
#define V4L2_AV1_SEQUENCE_FLAG_COLOR_RANGE 0x00008000
#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_X 0x00010000
#define V4L2_AV1_SEQUENCE_FLAG_SUBSAMPLING_Y 0x00020000
#define V4L2_AV1_SEQUENCE_FLAG_FILM_GRAIN_PARAMS_PRESENT 0x00040000
#define V4L2_AV1_SEQUENCE_FLAG_SEPARATE_UV_DELTA_Q 0x00080000
#define V4L2_CID_STATELESS_AV1_SEQUENCE (V4L2_CID_CODEC_STATELESS_BASE + 500)
/**
* struct v4l2_ctrl_av1_sequence - AV1 Sequence
*
* Represents an AV1 Sequence OBU. See section 5.5 "Sequence header OBU syntax"
* for more details.
*
* @flags: See V4L2_AV1_SEQUENCE_FLAG_{}.
* @seq_profile: specifies the features that can be used in the coded video
* sequence.
* @order_hint_bits: specifies the number of bits used for the order_hint field
* at each frame.
* @bit_depth: the bitdepth to use for the sequence as described in section
* 5.5.2 "Color config syntax".
* @reserved: padding field. Should be zeroed by applications.
* @max_frame_width_minus_1: specifies the maximum frame width minus 1 for the
* frames represented by this sequence header.
* @max_frame_height_minus_1: specifies the maximum frame height minus 1 for the
* frames represented by this sequence header.
*/
struct v4l2_ctrl_av1_sequence {
__u32 flags;
__u8 seq_profile;
__u8 order_hint_bits;
__u8 bit_depth;
__u8 reserved;
__u16 max_frame_width_minus_1;
__u16 max_frame_height_minus_1;
};
#define V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY (V4L2_CID_CODEC_STATELESS_BASE + 501)
/**
* struct v4l2_ctrl_av1_tile_group_entry - AV1 Tile Group entry
*
* Represents a single AV1 tile inside an AV1 Tile Group. Note that MiRowStart,
* MiRowEnd, MiColStart and MiColEnd can be retrieved from struct
* v4l2_av1_tile_info in struct v4l2_ctrl_av1_frame using tile_row and
* tile_col. See section 6.10.1 "General tile group OBU semantics" for more
* details.
*
* @tile_offset: offset from the OBU data, i.e. where the coded tile data
* actually starts.
* @tile_size: specifies the size in bytes of the coded tile. Equivalent to
* "TileSize" in the AV1 Specification.
* @tile_row: specifies the row of the current tile. Equivalent to "TileRow" in
* the AV1 Specification.
* @tile_col: specifies the col of the current tile. Equivalent to "TileCol" in
* the AV1 Specification.
*/
struct v4l2_ctrl_av1_tile_group_entry {
__u32 tile_offset;
__u32 tile_size;
__u32 tile_row;
__u32 tile_col;
};
/**
* enum v4l2_av1_warp_model - AV1 Warp Model as described in section 3
* "Symbols and abbreviated terms" of the AV1 Specification.
*
* @V4L2_AV1_WARP_MODEL_IDENTITY: Warp model is just an identity transform.
* @V4L2_AV1_WARP_MODEL_TRANSLATION: Warp model is a pure translation.
* @V4L2_AV1_WARP_MODEL_ROTZOOM: Warp model is a rotation + symmetric zoom +
* translation.
* @V4L2_AV1_WARP_MODEL_AFFINE: Warp model is a general affine transform.
*/
enum v4l2_av1_warp_model {
V4L2_AV1_WARP_MODEL_IDENTITY = 0,
V4L2_AV1_WARP_MODEL_TRANSLATION = 1,
V4L2_AV1_WARP_MODEL_ROTZOOM = 2,
V4L2_AV1_WARP_MODEL_AFFINE = 3,
};
/**
* enum v4l2_av1_reference_frame - AV1 reference frames
*
* @V4L2_AV1_REF_INTRA_FRAME: Intra Frame Reference
* @V4L2_AV1_REF_LAST_FRAME: Last Reference Frame
* @V4L2_AV1_REF_LAST2_FRAME: Last2 Reference Frame
* @V4L2_AV1_REF_LAST3_FRAME: Last3 Reference Frame
* @V4L2_AV1_REF_GOLDEN_FRAME: Golden Reference Frame
* @V4L2_AV1_REF_BWDREF_FRAME: BWD Reference Frame
* @V4L2_AV1_REF_ALTREF2_FRAME: Alternative2 Reference Frame
* @V4L2_AV1_REF_ALTREF_FRAME: Alternative Reference Frame
*/
enum v4l2_av1_reference_frame {
V4L2_AV1_REF_INTRA_FRAME = 0,
V4L2_AV1_REF_LAST_FRAME = 1,
V4L2_AV1_REF_LAST2_FRAME = 2,
V4L2_AV1_REF_LAST3_FRAME = 3,
V4L2_AV1_REF_GOLDEN_FRAME = 4,
V4L2_AV1_REF_BWDREF_FRAME = 5,
V4L2_AV1_REF_ALTREF2_FRAME = 6,
V4L2_AV1_REF_ALTREF_FRAME = 7,
};
#define V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) (1 << (ref))
#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_GLOBAL 0x1
#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_ROT_ZOOM 0x2
#define V4L2_AV1_GLOBAL_MOTION_FLAG_IS_TRANSLATION 0x4
/**
* struct v4l2_av1_global_motion - AV1 Global Motion parameters as described in
* section 6.8.17 "Global motion params semantics" of the AV1 specification.
*
* @flags: A bitfield containing the flags per reference frame. See
* V4L2_AV1_GLOBAL_MOTION_FLAG_{}
* @type: The type of global motion transform used.
* @params: this field has the same meaning as "gm_params" in the AV1
* specification.
* @invalid: bitfield indicating whether the global motion params are invalid
* for a given reference frame. See section 7.11.3.6 Setup shear process and
* the variable "warpValid". Use V4L2_AV1_GLOBAL_MOTION_IS_INVALID(ref) to
* create a suitable mask.
* @reserved: padding field. Should be zeroed by applications.
*/
struct v4l2_av1_global_motion {
__u8 flags[V4L2_AV1_TOTAL_REFS_PER_FRAME];
enum v4l2_av1_warp_model type[V4L2_AV1_TOTAL_REFS_PER_FRAME];
__s32 params[V4L2_AV1_TOTAL_REFS_PER_FRAME][6];
__u8 invalid;
__u8 reserved[3];
};
/**
* enum v4l2_av1_frame_restoration_type - AV1 Frame Restoration Type
* @V4L2_AV1_FRAME_RESTORE_NONE: no filtering is applied.
* @V4L2_AV1_FRAME_RESTORE_WIENER: Wiener filter process is invoked.
* @V4L2_AV1_FRAME_RESTORE_SGRPROJ: self guided filter process is invoked.
* @V4L2_AV1_FRAME_RESTORE_SWITCHABLE: restoration filter is swichtable.
*/
enum v4l2_av1_frame_restoration_type {
V4L2_AV1_FRAME_RESTORE_NONE = 0,
V4L2_AV1_FRAME_RESTORE_WIENER = 1,
V4L2_AV1_FRAME_RESTORE_SGRPROJ = 2,
V4L2_AV1_FRAME_RESTORE_SWITCHABLE = 3,
};
#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR 0x1
#define V4L2_AV1_LOOP_RESTORATION_FLAG_USES_CHROMA_LR 0x2
/**
* struct v4l2_av1_loop_restoration - AV1 Loop Restauration as described in
* section 6.10.15 "Loop restoration params semantics" of the AV1 specification.
*
* @flags: See V4L2_AV1_LOOP_RESTORATION_FLAG_{}.
* @lr_unit_shift: specifies if the luma restoration size should be halved.
* @lr_uv_shift: specifies if the chroma size should be half the luma size.
* @reserved: padding field. Should be zeroed by applications.
* @frame_restoration_type: specifies the type of restoration used for each
* plane. See enum v4l2_av1_frame_restoration_type.
* @loop_restoration_size: specifies the size of loop restoration units in units
* of samples in the current plane.
*/
struct v4l2_av1_loop_restoration {
__u8 flags;
__u8 lr_unit_shift;
__u8 lr_uv_shift;
__u8 reserved;
enum v4l2_av1_frame_restoration_type frame_restoration_type[V4L2_AV1_NUM_PLANES_MAX];
__u32 loop_restoration_size[V4L2_AV1_MAX_NUM_PLANES];
};
/**
* struct v4l2_av1_cdef - AV1 CDEF params semantics as described in section
* 6.10.14 "CDEF params semantics" of the AV1 specification
*
* @damping_minus_3: controls the amount of damping in the deringing filter.
* @bits: specifies the number of bits needed to specify which CDEF filter to
* apply.
* @y_pri_strength: specifies the strength of the primary filter.
* @y_sec_strength: specifies the strength of the secondary filter.
* @uv_pri_strength: specifies the strength of the primary filter.
* @uv_sec_strength: specifies the strength of the secondary filter.
*/
struct v4l2_av1_cdef {
__u8 damping_minus_3;
__u8 bits;
__u8 y_pri_strength[V4L2_AV1_CDEF_MAX];
__u8 y_sec_strength[V4L2_AV1_CDEF_MAX];
__u8 uv_pri_strength[V4L2_AV1_CDEF_MAX];
__u8 uv_sec_strength[V4L2_AV1_CDEF_MAX];
};
#define V4L2_AV1_SEGMENTATION_FLAG_ENABLED 0x1
#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP 0x2
#define V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE 0x4
#define V4L2_AV1_SEGMENTATION_FLAG_UPDATE_DATA 0x8
#define V4L2_AV1_SEGMENTATION_FLAG_SEG_ID_PRE_SKIP 0x10
/**
* enum v4l2_av1_segment_feature - AV1 segment features as described in section
* 3 "Symbols and abbreviated terms" of the AV1 specification.
*
* @V4L2_AV1_SEG_LVL_ALT_Q: Index for quantizer segment feature.
* @V4L2_AV1_SEG_LVL_ALT_LF_Y_V: Index for vertical luma loop filter segment
* feature.
* @V4L2_AV1_SEG_LVL_REF_FRAME: Index for reference frame segment feature.
* @V4L2_AV1_SEG_LVL_REF_SKIP: Index for skip segment feature.
* @V4L2_AV1_SEG_LVL_REF_GLOBALMV: Index for global mv feature.
* @V4L2_AV1_SEG_LVL_MAX: Number of segment features.
*/
enum v4l2_av1_segment_feature {
V4L2_AV1_SEG_LVL_ALT_Q = 0,
V4L2_AV1_SEG_LVL_ALT_LF_Y_V = 1,
V4L2_AV1_SEG_LVL_REF_FRAME = 5,
V4L2_AV1_SEG_LVL_REF_SKIP = 6,
V4L2_AV1_SEG_LVL_REF_GLOBALMV = 7,
V4L2_AV1_SEG_LVL_MAX = 8
};
#define V4L2_AV1_SEGMENT_FEATURE_ENABLED(id) (1 << (id))
/**
* struct v4l2_av1_segmentation - AV1 Segmentation params as defined in section
* 6.8.13 "Segmentation params semantics" of the AV1 specification.
*
* @flags: see V4L2_AV1_SEGMENTATION_FLAG_{}.
* @last_active_seg_id: indicates the highest numbered segment id that has some
* enabled feature. This is used when decoding the segment id to only decode
* choices corresponding to used segments.
* @feature_enabled: bitmask defining which features are enabled in each
* segment. Use V4L2_AV1_SEGMENT_FEATURE_ENABLED to build a suitable mask.
* @feature_data: data attached to each feature. Data entry is only valid if the
* feature is enabled
*/
struct v4l2_av1_segmentation {
__u8 flags;
__u8 last_active_seg_id;
__u8 feature_enabled[V4L2_AV1_MAX_SEGMENTS];
__s16 feature_data[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX];
};
#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT 0x4
#define V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI 0x8
/**
* struct v4l2_av1_loop_filter - AV1 Loop filter params as defined in section
* 6.8.10 "Loop filter semantics" and 6.8.16 "Loop filter delta parameters
* semantics" of the AV1 specification.
*
* @flags: see V4L2_AV1_LOOP_FILTER_FLAG_{}
* @level: an array containing loop filter strength values. Different loop
* filter strength values from the array are used depending on the image plane
* being filtered, and the edge direction (vertical or horizontal) being
* filtered.
* @sharpness: indicates the sharpness level. The loop_filter_level and
* loop_filter_sharpness together determine when a block edge is filtered, and
* by how much the filtering can change the sample values. The loop filter
* process is described in section 7.14 of the AV1 specification.
* @ref_deltas: contains the adjustment needed for the filter level based on the
* chosen reference frame. If this syntax element is not present, it maintains
* its previous value.
* @mode_deltas: contains the adjustment needed for the filter level based on
* the chosen mode. If this syntax element is not present, it maintains its
* previous value.
* @delta_lf_res: specifies the left shift which should be applied to decoded
* loop filter delta values.
*/
struct v4l2_av1_loop_filter {
__u8 flags;
__u8 level[4];
__u8 sharpness;
__s8 ref_deltas[V4L2_AV1_TOTAL_REFS_PER_FRAME];
__s8 mode_deltas[2];
__u8 delta_lf_res;
};
#define V4L2_AV1_QUANTIZATION_FLAG_DIFF_UV_DELTA 0x1
#define V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX 0x2
#define V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT 0x4
/**
* struct v4l2_av1_quantization - AV1 Quantization params as defined in section
* 6.8.11 "Quantization params semantics" of the AV1 specification.
*
* @flags: see V4L2_AV1_QUANTIZATION_FLAG_{}
* @base_q_idx: indicates the base frame qindex. This is used for Y AC
* coefficients and as the base value for the other quantizers.
* @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx.
* @delta_q_u_dc: indicates the U DC quantizer relative to base_q_idx.
* @delta_q_u_ac: indicates the U AC quantizer relative to base_q_idx.
* @delta_q_v_dc: indicates the V DC quantizer relative to base_q_idx.
* @delta_q_v_ac: indicates the V AC quantizer relative to base_q_idx.
* @qm_y: specifies the level in the quantizer matrix that should be used for
* luma plane decoding.
* @qm_u: specifies the level in the quantizer matrix that should be used for
* chroma U plane decoding.
* @qm_v: specifies the level in the quantizer matrix that should be used for
* chroma V plane decoding.
* @delta_q_res: specifies the left shift which should be applied to decoded
* quantizer index delta values.
*/
struct v4l2_av1_quantization {
__u8 flags;
__u8 base_q_idx;
__s8 delta_q_y_dc;
__s8 delta_q_u_dc;
__s8 delta_q_u_ac;
__s8 delta_q_v_dc;
__s8 delta_q_v_ac;
__u8 qm_y;
__u8 qm_u;
__u8 qm_v;
__u8 delta_q_res;
};
#define V4L2_AV1_TILE_INFO_FLAG_UNIFORM_TILE_SPACING 0x1
/**
* struct v4l2_av1_tile_info - AV1 Tile info as defined in section 6.8.14 "Tile
* info semantics" of the AV1 specification.
*
* @flags: see V4L2_AV1_TILE_INFO_FLAG_{}
* @context_update_tile_id: specifies which tile to use for the CDF update.
* @tile_rows: specifies the number of tiles down the frame.
* @tile_cols: specifies the number of tiles across the frame.
* @mi_col_starts: an array specifying the start column (in units of 4x4 luma
* samples) for each tile across the image.
* @mi_row_starts: an array specifying the start row (in units of 4x4 luma
* samples) for each tile down the image.
* @width_in_sbs_minus_1: specifies the width of a tile minus 1 in units of
* superblocks.
* @height_in_sbs_minus_1: specifies the height of a tile minus 1 in units of
* superblocks.
* @tile_size_bytes: specifies the number of bytes needed to code each tile
* size.
* @reserved: padding field. Should be zeroed by applications.
*/
struct v4l2_av1_tile_info {
__u8 flags;
__u8 context_update_tile_id;
__u8 tile_cols;
__u8 tile_rows;
__u32 mi_col_starts[V4L2_AV1_MAX_TILE_COLS + 1];
__u32 mi_row_starts[V4L2_AV1_MAX_TILE_ROWS + 1];
__u32 width_in_sbs_minus_1[V4L2_AV1_MAX_TILE_COLS];
__u32 height_in_sbs_minus_1[V4L2_AV1_MAX_TILE_ROWS];
__u8 tile_size_bytes;
__u8 reserved[3];
};
/**
* enum v4l2_av1_frame_type - AV1 Frame Type
*
* @V4L2_AV1_KEY_FRAME: Key frame
* @V4L2_AV1_INTER_FRAME: Inter frame
* @V4L2_AV1_INTRA_ONLY_FRAME: Intra-only frame
* @V4L2_AV1_SWITCH_FRAME: Switch frame
*/
enum v4l2_av1_frame_type {
V4L2_AV1_KEY_FRAME = 0,
V4L2_AV1_INTER_FRAME = 1,
V4L2_AV1_INTRA_ONLY_FRAME = 2,
V4L2_AV1_SWITCH_FRAME = 3
};
/**
* enum v4l2_av1_interpolation_filter - AV1 interpolation filter types
*
* @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP: eight tap filter
* @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH: eight tap smooth filter
* @V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP: eight tap sharp filter
* @V4L2_AV1_INTERPOLATION_FILTER_BILINEAR: bilinear filter
* @V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE: filter selection is signaled at
* the block level
*
* See section 6.8.9 "Interpolation filter semantics" of the AV1 specification
* for more details.
*/
enum v4l2_av1_interpolation_filter {
V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP = 0,
V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SMOOTH = 1,
V4L2_AV1_INTERPOLATION_FILTER_EIGHTTAP_SHARP = 2,
V4L2_AV1_INTERPOLATION_FILTER_BILINEAR = 3,
V4L2_AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
};
/**
* enum v4l2_av1_tx_mode - AV1 Tx mode as described in section 6.8.21 "TX mode
* semantics" of the AV1 specification.
* @V4L2_AV1_TX_MODE_ONLY_4X4: the inverse transform will use only 4x4
* transforms
* @V4L2_AV1_TX_MODE_LARGEST: the inverse transform will use the largest
* transform size that fits inside the block
* @V4L2_AV1_TX_MODE_SELECT: the choice of transform size is specified
* explicitly for each block.
*/
enum v4l2_av1_tx_mode {
V4L2_AV1_TX_MODE_ONLY_4X4 = 0,
V4L2_AV1_TX_MODE_LARGEST = 1,
V4L2_AV1_TX_MODE_SELECT = 2
};
#define V4L2_AV1_FRAME_FLAG_SHOW_FRAME 0x00000001
#define V4L2_AV1_FRAME_FLAG_SHOWABLE_FRAME 0x00000002
#define V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE 0x00000004
#define V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE 0x00000008
#define V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS 0x00000010
#define V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV 0x00000020
#define V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC 0x00000040
#define V4L2_AV1_FRAME_FLAG_USE_SUPERRES 0x00000080
#define V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV 0x00000100
#define V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE 0x00000200
#define V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS 0x00000400
#define V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF 0x00000800
#define V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION 0x00001000
#define V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT 0x00002000
#define V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET 0x00004000
#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_ALLOWED 0x00008000
#define V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT 0x00010000
#define V4L2_AV1_FRAME_FLAG_FRAME_SIZE_OVERRIDE 0x00020000
#define V4L2_AV1_FRAME_FLAG_BUFFER_REMOVAL_TIME_PRESENT 0x00040000
#define V4L2_AV1_FRAME_FLAG_FRAME_REFS_SHORT_SIGNALING 0x00080000
#define V4L2_CID_STATELESS_AV1_FRAME (V4L2_CID_CODEC_STATELESS_BASE + 502)
/**
* struct v4l2_ctrl_av1_frame - Represents an AV1 Frame Header OBU.
*
* @tile_info: tile info
* @quantization: quantization params
* @segmentation: segmentation params
* @superres_denom: the denominator for the upscaling ratio.
* @loop_filter: loop filter params
* @cdef: cdef params
* @skip_mode_frame: specifies the frames to use for compound prediction when
* skip_mode is equal to 1.
* @primary_ref_frame: specifies which reference frame contains the CDF values
* and other state that should be loaded at the start of the frame.
* @loop_restoration: loop restoration params
* @global_motion: global motion params
* @flags: see V4L2_AV1_FRAME_FLAG_{}
* @frame_type: specifies the AV1 frame type
* @order_hint: specifies OrderHintBits least significant bits of the expected
* output order for this frame.
* @upscaled_width: the upscaled width.
* @interpolation_filter: specifies the filter selection used for performing
* inter prediction.
* @tx_mode: specifies how the transform size is determined.
* @frame_width_minus_1: add 1 to get the frame's width.
* @frame_height_minus_1: add 1 to get the frame's height
* @render_width_minus_1: add 1 to get the render width of the frame in luma
* samples.
* @render_height_minus_1: add 1 to get the render height of the frame in luma
* samples.
* @current_frame_id: specifies the frame id number for the current frame. Frame
* id numbers are additional information that do not affect the decoding
* process, but provide decoders with a way of detecting missing reference
* frames so that appropriate action can be taken.
* @buffer_removal_time: specifies the frame removal time in units of DecCT clock
* ticks counted from the removal time of the last random access point for
* operating point opNum.
* @reserved: padding field. Should be zeroed by applications.
* @order_hints: specifies the expected output order hint for each reference
* frame. This field corresponds to the OrderHints variable from the
* specification (section 5.9.2 "Uncompressed header syntax"). As such, this is
* only used for non-intra frames and ignored otherwise. order_hints[0] is
* always ignored.
* @reference_frame_ts: the V4L2 timestamp of the reference frame slots.
* @ref_frame_idx: used to index into @reference_frame_ts when decoding
* inter-frames. The meaning of this array is the same as in the specification.
* The timestamp refers to the timestamp field in struct v4l2_buffer. Use
* v4l2_timeval_to_ns() to convert the struct timeval to a __u64.
* @refresh_frame_flags: contains a bitmask that specifies which reference frame
* slots will be updated with the current frame after it is decoded.
*/
struct v4l2_ctrl_av1_frame {
struct v4l2_av1_tile_info tile_info;
struct v4l2_av1_quantization quantization;
__u8 superres_denom;
struct v4l2_av1_segmentation segmentation;
struct v4l2_av1_loop_filter loop_filter;
struct v4l2_av1_cdef cdef;
__u8 skip_mode_frame[2];
__u8 primary_ref_frame;
struct v4l2_av1_loop_restoration loop_restoration;
struct v4l2_av1_global_motion global_motion;
__u32 flags;
enum v4l2_av1_frame_type frame_type;
__u32 order_hint;
__u32 upscaled_width;
enum v4l2_av1_interpolation_filter interpolation_filter;
enum v4l2_av1_tx_mode tx_mode;
__u32 frame_width_minus_1;
__u32 frame_height_minus_1;
__u16 render_width_minus_1;
__u16 render_height_minus_1;
__u32 current_frame_id;
__u32 buffer_removal_time[V4L2_AV1_MAX_OPERATING_POINTS];
__u8 reserved[4];
__u32 order_hints[V4L2_AV1_TOTAL_REFS_PER_FRAME];
__u64 reference_frame_ts[V4L2_AV1_TOTAL_REFS_PER_FRAME];
__s8 ref_frame_idx[V4L2_AV1_REFS_PER_FRAME];
__u8 refresh_frame_flags;
};
#define V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN 0x1
#define V4L2_AV1_FILM_GRAIN_FLAG_UPDATE_GRAIN 0x2
#define V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA 0x4
#define V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP 0x8
#define V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE 0x10
#define V4L2_CID_STATELESS_AV1_FILM_GRAIN (V4L2_CID_CODEC_STATELESS_BASE + 505)
/**
* struct v4l2_ctrl_av1_film_grain - AV1 Film Grain parameters.
*
* Film grain parameters as specified by section 6.8.20 of the AV1 Specification.
*
* @flags: see V4L2_AV1_FILM_GRAIN_{}.
* @cr_mult: represents a multiplier for the cr component used in derivation of
* the input index to the cr component scaling function.
* @grain_seed: specifies the starting value for the pseudo-random numbers used
* during film grain synthesis.
* @film_grain_params_ref_idx: indicates which reference frame contains the
* film grain parameters to be used for this frame.
* @num_y_points: specifies the number of points for the piece-wise linear
* scaling function of the luma component.
* @point_y_value: represents the x (luma value) coordinate for the i-th point
* of the piecewise linear scaling function for luma component. The values are
* signaled on the scale of 0..255. In case of 10 bit video, these values
* correspond to luma values divided by 4. In case of 12 bit video, these values
* correspond to luma values divided by 16.
* @point_y_scaling: represents the scaling (output) value for the i-th point
* of the piecewise linear scaling function for luma component.
* @num_cb_points: specifies the number of points for the piece-wise linear
* scaling function of the cb component.
* @point_cb_value: represents the x coordinate for the i-th point of the
* piece-wise linear scaling function for cb component. The values are signaled
* on the scale of 0..255.
* @point_cb_scaling: represents the scaling (output) value for the i-th point
* of the piecewise linear scaling function for cb component.
* @num_cr_points: specifies represents the number of points for the piece-wise
* linear scaling function of the cr component.
* @point_cr_value: represents the x coordinate for the i-th point of the
* piece-wise linear scaling function for cr component. The values are signaled
* on the scale of 0..255.
* @point_cr_scaling: represents the scaling (output) value for the i-th point
* of the piecewise linear scaling function for cr component.
* @grain_scaling_minus_8: represents the shift – 8 applied to the values of the
* chroma component. The grain_scaling_minus_8 can take values of 0..3 and
* determines the range and quantization step of the standard deviation of film
* grain.
* @ar_coeff_lag: specifies the number of auto-regressive coefficients for luma
* and chroma.
* @ar_coeffs_y_plus_128: specifies auto-regressive coefficients used for the Y
* plane.
* @ar_coeffs_cb_plus_128: specifies auto-regressive coefficients used for the U
* plane.
* @ar_coeffs_cr_plus_128: specifies auto-regressive coefficients used for the V
* plane.
* @ar_coeff_shift_minus_6: specifies the range of the auto-regressive
* coefficients. Values of 0, 1, 2, and 3 correspond to the ranges for
* auto-regressive coefficients of [-2, 2), [-1, 1), [-0.5, 0.5) and [-0.25,
* 0.25) respectively.
* @grain_scale_shift: specifies how much the Gaussian random numbers should be
* scaled down during the grain synthesis process.
* @cb_mult: represents a multiplier for the cb component used in derivation of
* the input index to the cb component scaling function.
* @cb_luma_mult: represents a multiplier for the average luma component used in
* derivation of the input index to the cb component scaling function.
* @cr_luma_mult: represents a multiplier for the average luma component used in
* derivation of the input index to the cr component scaling function.
* @cb_offset: represents an offset used in derivation of the input index to the
* cb component scaling function.
* @cr_offset: represents an offset used in derivation of the input index to the
* cr component scaling function.
* @reserved: padding field. Should be zeroed by applications.
*/
struct v4l2_ctrl_av1_film_grain {
__u8 flags;
__u8 cr_mult;
__u16 grain_seed;
__u8 film_grain_params_ref_idx;
__u8 num_y_points;
__u8 point_y_value[V4L2_AV1_MAX_NUM_Y_POINTS];
__u8 point_y_scaling[V4L2_AV1_MAX_NUM_Y_POINTS];
__u8 num_cb_points;
__u8 point_cb_value[V4L2_AV1_MAX_NUM_CB_POINTS];
__u8 point_cb_scaling[V4L2_AV1_MAX_NUM_CB_POINTS];
__u8 num_cr_points;
__u8 point_cr_value[V4L2_AV1_MAX_NUM_CR_POINTS];
__u8 point_cr_scaling[V4L2_AV1_MAX_NUM_CR_POINTS];
__u8 grain_scaling_minus_8;
__u8 ar_coeff_lag;
__u8 ar_coeffs_y_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
__u8 ar_coeffs_cb_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
__u8 ar_coeffs_cr_plus_128[V4L2_AV1_AR_COEFFS_SIZE];
__u8 ar_coeff_shift_minus_6;
__u8 grain_scale_shift;
__u8 cb_mult;
__u8 cb_luma_mult;
__u8 cr_luma_mult;
__u16 cb_offset;
__u16 cr_offset;
__u8 reserved[4];
};
/* MPEG-compression definitions kept for backwards compatibility */
#define V4L2_CTRL_CLASS_MPEG V4L2_CTRL_CLASS_CODEC
#define V4L2_CID_MPEG_CLASS V4L2_CID_CODEC_CLASS
#define V4L2_CID_MPEG_BASE V4L2_CID_CODEC_BASE
#define V4L2_CID_MPEG_CX2341X_BASE V4L2_CID_CODEC_CX2341X_BASE
#define V4L2_CID_MPEG_MFC51_BASE V4L2_CID_CODEC_MFC51_BASE
#endif
|
0 | repos/libcamera/include | repos/libcamera/include/linux/drm_fourcc.h | /*
* Copyright 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
/**
* DOC: overview
*
* In the DRM subsystem, framebuffer pixel formats are described using the
* fourcc codes defined in `include/uapi/drm/drm_fourcc.h`. In addition to the
* fourcc code, a Format Modifier may optionally be provided, in order to
* further describe the buffer's format - for example tiling or compression.
*
* Format Modifiers
* ----------------
*
* Format modifiers are used in conjunction with a fourcc code, forming a
* unique fourcc:modifier pair. This format:modifier pair must fully define the
* format and data layout of the buffer, and should be the only way to describe
* that particular buffer.
*
* Having multiple fourcc:modifier pairs which describe the same layout should
* be avoided, as such aliases run the risk of different drivers exposing
* different names for the same data format, forcing userspace to understand
* that they are aliases.
*
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
* modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
* Modifiers must uniquely encode buffer layout. In other words, a buffer must
* match only a single modifier. A modifier must not be a subset of layouts of
* another modifier. For instance, it's incorrect to encode pitch alignment in
* a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
* aligned modifier. That said, modifiers can have implicit minimal
* requirements.
*
* For modifiers where the combination of fourcc code and modifier can alias,
* a canonical pair needs to be defined and used by all drivers. Preferred
* combinations are also encouraged where all combinations might lead to
* confusion and unnecessarily reduced interoperability. An example for the
* latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
*
* There are two kinds of modifier users:
*
* - Kernel and user-space drivers: for drivers it's important that modifiers
* don't alias, otherwise two drivers might support the same format but use
* different aliases, preventing them from sharing buffers in an efficient
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
* These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
*
* The authoritative list of format modifier codes is found in
* `include/uapi/drm/drm_fourcc.h`
*
* Open Source User Waiver
* -----------------------
*
* Because this is the authoritative source for pixel formats and modifiers
* referenced by GL, Vulkan extensions and other standards and hence used both
* by open source and closed source driver stacks, the usual requirement for an
* upstream in-kernel or open source userspace user does not apply.
*
* To ensure, as much as feasible, compatibility across stacks and avoid
* confusion with incompatible enumerations stakeholders for all relevant driver
* stacks should approve additions.
*/
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
/* Reserve 0 for the invalid format specifier */
#define DRM_FORMAT_INVALID 0
/* color index */
#define DRM_FORMAT_C1 fourcc_code('C', '1', ' ', ' ') /* [7:0] C0:C1:C2:C3:C4:C5:C6:C7 1:1:1:1:1:1:1:1 eight pixels/byte */
#define DRM_FORMAT_C2 fourcc_code('C', '2', ' ', ' ') /* [7:0] C0:C1:C2:C3 2:2:2:2 four pixels/byte */
#define DRM_FORMAT_C4 fourcc_code('C', '4', ' ', ' ') /* [7:0] C0:C1 4:4 two pixels/byte */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
/* 1 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D1 fourcc_code('D', '1', ' ', ' ') /* [7:0] D0:D1:D2:D3:D4:D5:D6:D7 1:1:1:1:1:1:1:1 eight pixels/byte */
/* 2 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D2 fourcc_code('D', '2', ' ', ' ') /* [7:0] D0:D1:D2:D3 2:2:2:2 four pixels/byte */
/* 4 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D4 fourcc_code('D', '4', ' ', ' ') /* [7:0] D0:D1 4:4 two pixels/byte */
/* 8 bpp Darkness (inverse relationship between channel value and brightness) */
#define DRM_FORMAT_D8 fourcc_code('D', '8', ' ', ' ') /* [7:0] D */
/* 1 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R1 fourcc_code('R', '1', ' ', ' ') /* [7:0] R0:R1:R2:R3:R4:R5:R6:R7 1:1:1:1:1:1:1:1 eight pixels/byte */
/* 2 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R2 fourcc_code('R', '2', ' ', ' ') /* [7:0] R0:R1:R2:R3 2:2:2:2 four pixels/byte */
/* 4 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R4 fourcc_code('R', '4', ' ', ' ') /* [7:0] R0:R1 4:4 two pixels/byte */
/* 8 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */
/* 10 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R10 fourcc_code('R', '1', '0', ' ') /* [15:0] x:R 6:10 little endian */
/* 12 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R12 fourcc_code('R', '1', '2', ' ') /* [15:0] x:R 4:12 little endian */
/* 16 bpp Red (direct relationship between channel value and brightness) */
#define DRM_FORMAT_R16 fourcc_code('R', '1', '6', ' ') /* [15:0] R little endian */
/* 16 bpp RG */
#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */
#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */
/* 32 bpp RG */
#define DRM_FORMAT_RG1616 fourcc_code('R', 'G', '3', '2') /* [31:0] R:G 16:16 little endian */
#define DRM_FORMAT_GR1616 fourcc_code('G', 'R', '3', '2') /* [31:0] G:R 16:16 little endian */
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
/* 16 bpp RGB */
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
/* 24 bpp RGB */
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
/* 32 bpp RGB */
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
/* 48 bpp RGB */
#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
#define DRM_FORMAT_ARGB16161616 fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
* Floating point 64bpp RGB
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
*/
#define DRM_FORMAT_XRGB16161616F fourcc_code('X', 'R', '4', 'H') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616F fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
* RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
* of unused padding per component:
*/
#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
#define DRM_FORMAT_AVUY8888 fourcc_code('A', 'V', 'U', 'Y') /* [31:0] A:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
#define DRM_FORMAT_XVUY8888 fourcc_code('X', 'V', 'U', 'Y') /* [31:0] X:Cr:Cb:Y 8:8:8:8 little endian */
#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
/*
* packed Y2xx indicate for each component, xx valid data occupy msb
* 16-xx padding occupy lsb
*/
#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 10:6:10:6:10:6:10:6 little endian per 2 Y pixels */
#define DRM_FORMAT_Y212 fourcc_code('Y', '2', '1', '2') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 12:4:12:4:12:4:12:4 little endian per 2 Y pixels */
#define DRM_FORMAT_Y216 fourcc_code('Y', '2', '1', '6') /* [63:0] Cr0:Y1:Cb0:Y0 16:16:16:16 little endian per 2 Y pixels */
/*
* packed Y4xx indicate for each component, xx valid data occupy msb
* 16-xx padding occupy lsb except Y410
*/
#define DRM_FORMAT_Y410 fourcc_code('Y', '4', '1', '0') /* [31:0] A:Cr:Y:Cb 2:10:10:10 little endian */
#define DRM_FORMAT_Y412 fourcc_code('Y', '4', '1', '2') /* [63:0] A:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
#define DRM_FORMAT_Y416 fourcc_code('Y', '4', '1', '6') /* [63:0] A:Cr:Y:Cb 16:16:16:16 little endian */
#define DRM_FORMAT_XVYU2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] X:Cr:Y:Cb 2:10:10:10 little endian */
#define DRM_FORMAT_XVYU12_16161616 fourcc_code('X', 'V', '3', '6') /* [63:0] X:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
#define DRM_FORMAT_XVYU16161616 fourcc_code('X', 'V', '4', '8') /* [63:0] X:Cr:Y:Cb 16:16:16:16 little endian */
/*
* packed YCbCr420 2x2 tiled formats
* first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
*/
/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
* 1-plane YUV 4:2:0
* In these formats, the component ordering is specified (Y, followed by U
* then V), but the exact Linear layout is undefined.
* These formats can only be used with a non-Linear modifier.
*/
#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
/*
* 2 plane RGB + A
* index 0 = RGB plane, same format as the corresponding non _A8 format has
* index 1 = A plane, [7:0] A
*/
#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8')
#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8')
#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8')
#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8')
#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8')
#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8')
#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8')
#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8')
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/*
* 2 plane YCbCr
* index 0 = Y plane, [39:0] Y3:Y2:Y1:Y0 little endian
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*/
#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') /* 2x1 subsampled Cr:Cb plane, 10 bit per channel */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*/
#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y:x [12:4] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [12:4:12:4] little endian
*/
#define DRM_FORMAT_P012 fourcc_code('P', '0', '1', '2') /* 2x2 subsampled Cr:Cb plane 12 bits per channel */
/*
* 2 plane YCbCr MSB aligned
* index 0 = Y plane, [15:0] Y little endian
* index 1 = Cr:Cb plane, [31:0] Cr:Cb [16:16] little endian
*/
#define DRM_FORMAT_P016 fourcc_code('P', '0', '1', '6') /* 2x2 subsampled Cr:Cb plane 16 bits per channel */
/* 2 plane YCbCr420.
* 3 10 bit components and 2 padding bits packed into 4 bytes.
* index 0 = Y plane, [31:0] x:Y2:Y1:Y0 2:10:10:10 little endian
* index 1 = Cr:Cb plane, [63:0] x:Cr2:Cb2:Cr1:x:Cb1:Cr0:Cb0 [2:10:10:10:2:10:10:10] little endian
*/
#define DRM_FORMAT_P030 fourcc_code('P', '0', '3', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel packed */
/* 3 plane non-subsampled (444) YCbCr
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
* index 1: Cb plane, [15:0] Cb:x [10:6] little endian
* index 2: Cr plane, [15:0] Cr:x [10:6] little endian
*/
#define DRM_FORMAT_Q410 fourcc_code('Q', '4', '1', '0')
/* 3 plane non-subsampled (444) YCrCb
* 16 bits per component, but only 10 bits are used and 6 bits are padded
* index 0: Y plane, [15:0] Y:x [10:6] little endian
* index 1: Cr plane, [15:0] Cr:x [10:6] little endian
* index 2: Cb plane, [15:0] Cb:x [10:6] little endian
*/
#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
* index 2: Cr plane, [7:0] Cr
* or
* index 1: Cr plane, [7:0] Cr
* index 2: Cb plane, [7:0] Cb
*/
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
/* Compressed formats */
#define DRM_FORMAT_MJPEG fourcc_code('M', 'J', 'P', 'G') /* Motion-JPEG */
/*
* Bayer formats
*
* Bayer formats contain green, red and blue components, with alternating lines
* of red and green, and blue and green pixels in different orders. For each
* block of 2x2 pixels there is one pixel with a red filter, two with a green
* filter, and one with a blue filter. The filters can be arranged in different
* patterns.
*
* For example, RGGB:
* row0: RGRGRGRG...
* row1: GBGBGBGB...
* row3: RGRGRGRG...
* row4: GBGBGBGB...
* ...
*
* Vendors have different methods to pack the sampling formats to increase data
* density. For this reason the fourcc only describes pixel sample size and the
* filter pattern for each block of 2x2 pixels. A modifier is needed to
* describe the memory layout.
*
* In addition to vendor modifiers for memory layout DRM_FORMAT_MOD_LINEAR may
* be used to describe a layout where all samples are placed consecutively in
* memory. If the sample does not fit inside a single byte, the sample storage
* is extended to the minimum number of (little endian) bytes that can hold the
* sample and any unused most-significant bits are defined as padding.
*
* For example, SRGGB10:
* Each 10-bit sample is contained in 2 consecutive little endian bytes, where
* the 6 most-significant bits are unused.
*/
/* 8-bit Bayer formats */
#define DRM_FORMAT_SRGGB8 fourcc_code('R', 'G', 'G', 'B')
#define DRM_FORMAT_SGRBG8 fourcc_code('G', 'R', 'B', 'G')
#define DRM_FORMAT_SGBRG8 fourcc_code('G', 'B', 'R', 'G')
#define DRM_FORMAT_SBGGR8 fourcc_code('B', 'A', '8', '1')
/* 10-bit Bayer formats */
#define DRM_FORMAT_SRGGB10 fourcc_code('R', 'G', '1', '0')
#define DRM_FORMAT_SGRBG10 fourcc_code('B', 'A', '1', '0')
#define DRM_FORMAT_SGBRG10 fourcc_code('G', 'B', '1', '0')
#define DRM_FORMAT_SBGGR10 fourcc_code('B', 'G', '1', '0')
/* 12-bit Bayer formats */
#define DRM_FORMAT_SRGGB12 fourcc_code('R', 'G', '1', '2')
#define DRM_FORMAT_SGRBG12 fourcc_code('B', 'A', '1', '2')
#define DRM_FORMAT_SGBRG12 fourcc_code('G', 'B', '1', '2')
#define DRM_FORMAT_SBGGR12 fourcc_code('B', 'G', '1', '2')
/* 14-bit Bayer formats */
#define DRM_FORMAT_SRGGB14 fourcc_code('R', 'G', '1', '4')
#define DRM_FORMAT_SGRBG14 fourcc_code('B', 'A', '1', '4')
#define DRM_FORMAT_SGBRG14 fourcc_code('G', 'B', '1', '4')
#define DRM_FORMAT_SBGGR14 fourcc_code('B', 'G', '1', '4')
/* 16-bit Bayer formats */
#define DRM_FORMAT_SRGGB16 fourcc_code('R', 'G', 'B', '6')
#define DRM_FORMAT_SGRBG16 fourcc_code('G', 'R', '1', '6')
#define DRM_FORMAT_SGBRG16 fourcc_code('G', 'B', '1', '6')
#define DRM_FORMAT_SBGGR16 fourcc_code('B', 'Y', 'R', '2')
/*
* Format Modifiers:
*
* Format modifiers describe, typically, a re-ordering or modification
* of the data in a plane of an FB. This can be used to express tiled/
* swizzled formats, or compression, or a combination of the two.
*
* The upper 8 bits of the format modifier are a vendor-id as assigned
* below. The lower 56 bits are assigned as vendor sees fit.
*/
/* Vendor Ids: */
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
#define DRM_FORMAT_MOD_VENDOR_NVIDIA 0x03
#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
#define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09
#define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a
#define DRM_FORMAT_MOD_VENDOR_MIPI 0x0b
#define DRM_FORMAT_MOD_VENDOR_RPI 0x0c
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
#define fourcc_mod_get_vendor(modifier) \
(((modifier) >> 56) & 0xff)
#define fourcc_mod_is_vendor(modifier, vendor) \
(fourcc_mod_get_vendor(modifier) == DRM_FORMAT_MOD_VENDOR_## vendor)
#define fourcc_mod_code(vendor, val) \
((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | ((val) & 0x00ffffffffffffffULL))
/*
* Format Modifier tokens:
*
* When adding a new token please document the layout with a code comment,
* similar to the fourcc codes above. drm_fourcc.h is considered the
* authoritative source for all of these.
*
* Generic modifier names:
*
* DRM_FORMAT_MOD_GENERIC_* definitions are used to provide vendor-neutral names
* for layouts which are common across multiple vendors. To preserve
* compatibility, in cases where a vendor-specific definition already exists and
* a generic name for it is desired, the common name is a purely symbolic alias
* and must use the same numerical value as the original definition.
*
* Note that generic names should only be used for modifiers which describe
* generic layouts (such as pixel re-ordering), which may have
* independently-developed support across multiple vendors.
*
* In future cases where a generic layout is identified before merging with a
* vendor-specific modifier, a new 'GENERIC' vendor or modifier using vendor
* 'NONE' could be considered. This should only be for obvious, exceptional
* cases to avoid polluting the 'GENERIC' namespace with modifiers which only
* apply to a single vendor.
*
* Generic names should not be used for cases where multiple hardware vendors
* have implementations of the same standardised compression scheme (such as
* AFBC). In those cases, all implementations should use the same format
* modifier(s), reflecting the vendor of the standard.
*/
#define DRM_FORMAT_MOD_GENERIC_16_16_TILE DRM_FORMAT_MOD_SAMSUNG_16_16_TILE
/*
* Invalid Modifier
*
* This modifier can be used as a sentinel to terminate the format modifiers
* list, or to initialize a variable with an invalid modifier. It might also be
* used to report an error back to userspace for certain APIs.
*/
#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED)
/*
* Linear Layout
*
* Just plain linear layout. Note that this is different from no specifying any
* modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl),
* which tells the driver to also take driver-internal information into account
* and so might actually result in a tiled framebuffer.
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
/*
* Deprecated: use DRM_FORMAT_MOD_LINEAR instead
*
* The "none" format modifier doesn't actually mean that the modifier is
* implicit, instead it means that the layout is linear. Whether modifiers are
* used is out-of-band information carried in an API-specific way (e.g. in a
* flag for drm_mode_fb_cmd2).
*/
#define DRM_FORMAT_MOD_NONE 0
/* Intel framebuffer modifiers */
/*
* Intel X-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out row-major, with
* a platform-dependent stride. On top of that the memory can apply
* platform-depending swizzling of some higher address bits into bit6.
*
* Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
* On earlier platforms the is highly platforms specific and not useful for
* cross-driver sharing. It exists since on a given platform it does uniquely
* identify the layout in a simple way for i915-specific userspace, which
* facilitated conversion of userspace to modifiers. Additionally the exact
* format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1)
/*
* Intel Y-tiling layout
*
* This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb)
* in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes)
* chunks column-major, with a platform-dependent height. On top of that the
* memory can apply platform-depending swizzling of some higher address bits
* into bit6.
*
* Note that this layout is only accurate on intel gen 8+ or valleyview chipsets.
* On earlier platforms the is highly platforms specific and not useful for
* cross-driver sharing. It exists since on a given platform it does uniquely
* identify the layout in a simple way for i915-specific userspace, which
* facilitated conversion of userspace to modifiers. Additionally the exact
* format on some really old platforms is not known.
*/
#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2)
/*
* Intel Yf-tiling layout
*
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
* Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
* 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width
* in pixel depends on the pixel depth.
*/
#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3)
/*
* Intel color control surface (CCS) for render compression
*
* The framebuffer format must be one of the 8:8:8:8 RGB formats.
* The main surface will be plane index 0 and must be Y/Yf-tiled,
* the CCS will be plane index 1.
*
* Each CCS tile matches a 1024x512 pixel area of the main surface.
* To match certain aspects of the 3D hardware the CCS is
* considered to be made up of normal 128Bx32 Y tiles, Thus
* the CCS pitch must be specified in multiples of 128 bytes.
*
* In reality the CCS tile appears to be a 64Bx64 Y tile, composed
* of QWORD (8 bytes) chunks instead of OWORD (16 bytes) chunks.
* But that fact is not relevant unless the memory is accessed
* directly.
*/
#define I915_FORMAT_MOD_Y_TILED_CCS fourcc_mod_code(INTEL, 4)
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
* Intel color control surfaces (CCS) for Gen-12 render compression.
*
* The main surface is Y-tiled and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* Y-tile widths.
*/
#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
/*
* Intel color control surfaces (CCS) for Gen-12 media compression
*
* The main surface is Y-tiled and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
* planes 2 and 3 for the respective CCS.
*/
#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
/*
* Intel Color Control Surface with Clear Color (CCS) for Gen-12 render
* compression.
*
* The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* Converted Clear Color value and the next 32 bits store the Higher Converted
* Clear Color value when applicable. The Converted Clear Color values are
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC fourcc_mod_code(INTEL, 8)
/*
* Intel Tile 4 layout
*
* This is a tiled layout using 4KB tiles in a row-major layout. It has the same
* shape as Tile Y at two granularities: 4KB (128B x 32) and 64B (16B x 4). It
* only differs from Tile Y at the 256B granularity in between. At this
* granularity, Tile Y has a shape of 16B x 32 rows, but this tiling has a shape
* of 64B x 8 rows.
*/
#define I915_FORMAT_MOD_4_TILED fourcc_mod_code(INTEL, 9)
/*
* Intel color control surfaces (CCS) for DG2 render compression.
*
* The main surface is Tile 4 and at plane index 0. The CCS data is stored
* outside of the GEM object in a reserved memory area dedicated for the
* storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
* main surface pitch is required to be a multiple of four Tile 4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS fourcc_mod_code(INTEL, 10)
/*
* Intel color control surfaces (CCS) for DG2 media compression.
*
* The main surface is Tile 4 and at plane index 0. For semi-planar formats
* like NV12, the Y and UV planes are Tile 4 and are located at plane indices
* 0 and 1, respectively. The CCS for all planes are stored outside of the
* GEM object in a reserved memory area dedicated for the storage of the
* CCS data for all RC/RC_CC/MC compressible GEM objects. The main surface
* pitch is required to be a multiple of four Tile 4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_MC_CCS fourcc_mod_code(INTEL, 11)
/*
* Intel Color Control Surface with Clear Color (CCS) for DG2 render compression.
*
* The main surface is Tile 4 and at plane index 0. The CCS data is stored
* outside of the GEM object in a reserved memory area dedicated for the
* storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
* main surface pitch is required to be a multiple of four Tile 4 widths. The
* clear color is stored at plane index 1 and the pitch should be 64 bytes
* aligned. The format of the 256 bits of clear color data matches the one used
* for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
* for details.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 render compression.
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS fourcc_mod_code(INTEL, 13)
/*
* Intel Color Control Surfaces (CCS) for display ver. 14 media compression
*
* The main surface is tile4 and at plane index 0, the CCS is linear and
* at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
* main surface. In other words, 4 bits in CCS map to a main surface cache
* line pair. The main surface pitch is required to be a multiple of four
* tile4 widths. For semi-planar formats like NV12, CCS planes follow the
* Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
* planes 2 and 3 for the respective CCS.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_MC_CCS fourcc_mod_code(INTEL, 14)
/*
* Intel Color Control Surface with Clear Color (CCS) for display ver. 14 render
* compression.
*
* The main surface is tile4 and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
* be ignored. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* Converted Clear Color value and the next 32 bits store the Higher Converted
* Clear Color value when applicable. The Converted Clear Color values are
* consumed by the DE. The last 64 bits are used to store Color Discard Enable
* and Depth Clear Value Valid which are ignored by the DE. A CCS cache line
* corresponds to an area of 4x1 tiles in the main surface. The main surface
* pitch is required to be a multiple of 4 tile widths.
*/
#define I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC fourcc_mod_code(INTEL, 15)
/*
* IPU3 Bayer packing layout
*
* The IPU3 raw Bayer formats use a custom packing layout where there are no
* gaps between each 10-bit sample. It packs 25 pixels into 32 bytes leaving
* the 6 most significant bits in the last byte unused. The format is little
* endian.
*/
#define IPU3_FORMAT_MOD_PACKED fourcc_mod_code(INTEL, 13)
/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
* standard NV12 style.
* As for NV12, an image is the result of two frame buffers: one for Y,
* one for the interleaved Cb/Cr components (1/2 the height of the Y buffer).
* Alignment requirements are (for each buffer):
* - multiple of 128 pixels for the width
* - multiple of 32 pixels for the height
*
* For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
/*
* Tiled, 16 (pixels) x 16 (lines) - sized macroblocks
*
* This is a simple tiled layout using tiles of 16x16 pixels in a row-major
* layout. For YCbCr formats Cb/Cr components are taken in such a way that
* they correspond to their 16x16 luma block.
*/
#define DRM_FORMAT_MOD_SAMSUNG_16_16_TILE fourcc_mod_code(SAMSUNG, 2)
/*
* Qualcomm Compressed Format
*
* Refers to a compressed variant of the base format that is compressed.
* Implementation may be platform and base-format specific.
*
* Each macrotile consists of m x n (mostly 4 x 4) tiles.
* Pixel data pitch/stride is aligned with macrotile width.
* Pixel data height is aligned with macrotile height.
* Entire pixel data buffer is aligned with 4k(bytes).
*/
#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
/*
* Qualcomm Tiled Format
*
* Similar to DRM_FORMAT_MOD_QCOM_COMPRESSED but not compressed.
* Implementation may be platform and base-format specific.
*
* Each macrotile consists of m x n (mostly 4 x 4) tiles.
* Pixel data pitch/stride is aligned with macrotile width.
* Pixel data height is aligned with macrotile height.
* Entire pixel data buffer is aligned with 4k(bytes).
*/
#define DRM_FORMAT_MOD_QCOM_TILED3 fourcc_mod_code(QCOM, 3)
/*
* Qualcomm Alternate Tiled Format
*
* Alternate tiled format typically only used within GMEM.
* Implementation may be platform and base-format specific.
*/
#define DRM_FORMAT_MOD_QCOM_TILED2 fourcc_mod_code(QCOM, 2)
/* Vivante framebuffer modifiers */
/*
* Vivante 4x4 tiling layout
*
* This is a simple tiled layout using tiles of 4x4 pixels in a row-major
* layout.
*/
#define DRM_FORMAT_MOD_VIVANTE_TILED fourcc_mod_code(VIVANTE, 1)
/*
* Vivante 64x64 super-tiling layout
*
* This is a tiled layout using 64x64 pixel super-tiles, where each super-tile
* contains 8x4 groups of 2x4 tiles of 4x4 pixels (like above) each, all in row-
* major layout.
*
* For more information: see
* https://github.com/etnaviv/etna_viv/blob/master/doc/hardware.md#texture-tiling
*/
#define DRM_FORMAT_MOD_VIVANTE_SUPER_TILED fourcc_mod_code(VIVANTE, 2)
/*
* Vivante 4x4 tiling layout for dual-pipe
*
* Same as the 4x4 tiling layout, except every second 4x4 pixel tile starts at a
* different base address. Offsets from the base addresses are therefore halved
* compared to the non-split tiled layout.
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_TILED fourcc_mod_code(VIVANTE, 3)
/*
* Vivante 64x64 super-tiling layout for dual-pipe
*
* Same as the 64x64 super-tiling layout, except every second 4x4 pixel tile
* starts at a different base address. Offsets from the base addresses are
* therefore halved compared to the non-split super-tiled layout.
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
/*
* Vivante TS (tile-status) buffer modifiers. They can be combined with all of
* the color buffer tiling modifiers defined above. When TS is present it's a
* separate buffer containing the clear/compression status of each tile. The
* modifiers are defined as VIVANTE_MOD_TS_c_s, where c is the color buffer
* tile size in bytes covered by one entry in the status buffer and s is the
* number of status bits per entry.
* We reserve the top 8 bits of the Vivante modifier space for tile status
* clear/compression modifiers, as future cores might add some more TS layout
* variations.
*/
#define VIVANTE_MOD_TS_64_4 (1ULL << 48)
#define VIVANTE_MOD_TS_64_2 (2ULL << 48)
#define VIVANTE_MOD_TS_128_4 (3ULL << 48)
#define VIVANTE_MOD_TS_256_4 (4ULL << 48)
#define VIVANTE_MOD_TS_MASK (0xfULL << 48)
/*
* Vivante compression modifiers. Those depend on a TS modifier being present
* as the TS bits get reinterpreted as compression tags instead of simple
* clear markers when compression is enabled.
*/
#define VIVANTE_MOD_COMP_DEC400 (1ULL << 52)
#define VIVANTE_MOD_COMP_MASK (0xfULL << 52)
/* Masking out the extension bits will yield the base modifier. */
#define VIVANTE_MOD_EXT_MASK (VIVANTE_MOD_TS_MASK | \
VIVANTE_MOD_COMP_MASK)
/* NVIDIA frame buffer modifiers */
/*
* Tegra Tiled Layout, used by Tegra 2, 3 and 4.
*
* Pixels are arranged in simple tiles of 16 x 16 bytes.
*/
#define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1)
/*
* Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80,
* and Tegra GPUs starting with Tegra K1.
*
* Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies
* based on the architecture generation. GOBs themselves are then arranged in
* 3D blocks, with the block dimensions (in terms of GOBs) always being a power
* of two, and hence expressible as their log2 equivalent (E.g., "2" represents
* a block depth or height of "4").
*
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
* in full detail.
*
* Macro
* Bits Param Description
* ---- ----- -----------------------------------------------------------------
*
* 3:0 h log2(height) of each block, in GOBs. Placed here for
* compatibility with the existing
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
*
* 4:4 - Must be 1, to indicate block-linear layout. Necessary for
* compatibility with the existing
* DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers.
*
* 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block
* size). Must be zero.
*
* Note there is no log2(width) parameter. Some portions of the
* hardware support a block width of two gobs, but it is impractical
* to use due to lack of support elsewhere, and has no known
* benefits.
*
* 11:9 - Reserved (To support 2D-array textures with variable array stride
* in blocks, specified via log2(tile width in blocks)). Must be
* zero.
*
* 19:12 k Page Kind. This value directly maps to a field in the page
* tables of all GPUs >= NV50. It affects the exact layout of bits
* in memory and can be derived from the tuple
*
* (format, GPU model, compression type, samples per pixel)
*
* Where compression type is defined below. If GPU model were
* implied by the format modifier, format, or memory buffer, page
* kind would not need to be included in the modifier itself, but
* since the modifier should define the layout of the associated
* memory buffer independent from any device or other context, it
* must be included here.
*
* 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed
* starting with Fermi GPUs. Additionally, the mapping between page
* kind and bit layout has changed at various points.
*
* 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping
* 1 = Gob Height 4, G80 - GT2XX Page Kind mapping
* 2 = Gob Height 8, Turing+ Page Kind mapping
* 3 = Reserved for future use.
*
* 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further
* bit remapping step that occurs at an even lower level than the
* page kind and block linear swizzles. This causes the layout of
* surfaces mapped in those SOC's GPUs to be incompatible with the
* equivalent mapping on other GPUs in the same system.
*
* 0 = Tegra K1 - Tegra Parker/TX2 Layout.
* 1 = Desktop GPU and Tegra Xavier+ Layout
*
* 25:23 c Lossless Framebuffer Compression type.
*
* 0 = none
* 1 = ROP/3D, layout 1, exact compression format implied by Page
* Kind field
* 2 = ROP/3D, layout 2, exact compression format implied by Page
* Kind field
* 3 = CDE horizontal
* 4 = CDE vertical
* 5 = Reserved for future use
* 6 = Reserved for future use
* 7 = Reserved for future use
*
* 55:25 - Reserved for future use. Must be zero.
*/
#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \
fourcc_mod_code(NVIDIA, (0x10 | \
((h) & 0xf) | \
(((k) & 0xff) << 12) | \
(((g) & 0x3) << 20) | \
(((s) & 0x1) << 22) | \
(((c) & 0x7) << 23)))
/* To grandfather in prior block linear format modifiers to the above layout,
* the page kind "0", which corresponds to "pitch/linear" and hence is unusable
* with block-linear layouts, is remapped within drivers to the value 0xfe,
* which corresponds to the "generic" kind used for simple single-sample
* uncompressed color formats on Fermi - Volta GPUs.
*/
static __inline__ __u64
drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
{
if (!(modifier & 0x10) || (modifier & (0xff << 12)))
return modifier;
else
return modifier | (0xfe << 12);
}
/*
* 16Bx2 Block Linear layout, used by Tegra K1 and later
*
* Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
* vertically by a power of 2 (1 to 32 GOBs) to form a block.
*
* Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
*
* Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
* Valid values are:
*
* 0 == ONE_GOB
* 1 == TWO_GOBS
* 2 == FOUR_GOBS
* 3 == EIGHT_GOBS
* 4 == SIXTEEN_GOBS
* 5 == THIRTYTWO_GOBS
*
* Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
* in full detail.
*/
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v))
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4)
#define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5)
/*
* Some Broadcom modifiers take parameters, for example the number of
* vertical lines in the image. Reserve the lower 32 bits for modifier
* type, and the next 24 bits for parameters. Top 8 bits are the
* vendor code.
*/
#define __fourcc_mod_broadcom_param_shift 8
#define __fourcc_mod_broadcom_param_bits 48
#define fourcc_mod_broadcom_code(val, params) \
fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
#define fourcc_mod_broadcom_param(m) \
((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
#define fourcc_mod_broadcom_mod(m) \
((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
__fourcc_mod_broadcom_param_shift))
/*
* Broadcom VC4 "T" format
*
* This is the primary layout that the V3D GPU can texture from (it
* can't do linear). The T format has:
*
* - 64b utiles of pixels in a raster-order grid according to cpp. It's 4x4
* pixels at 32 bit depth.
*
* - 1k subtiles made of a 4x4 raster-order grid of 64b utiles (so usually
* 16x16 pixels).
*
* - 4k tiles made of a 2x2 grid of 1k subtiles (so usually 32x32 pixels). On
* even 4k tile rows, they're arranged as (BL, TL, TR, BR), and on odd rows
* they're (TR, BR, BL, TL), where bottom left is start of memory.
*
* - an image made of 4k tiles in rows either left-to-right (even rows of 4k
* tiles) or right-to-left (odd rows of 4k tiles).
*/
#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
/*
* Broadcom SAND format
*
* This is the native format that the H.264 codec block uses. For VC4
* HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
*
* The image can be considered to be split into columns, and the
* columns are placed consecutively into memory. The width of those
* columns can be either 32, 64, 128, or 256 pixels, but in practice
* only 128 pixel columns are used.
*
* The pitch between the start of each column is set to optimally
* switch between SDRAM banks. This is passed as the number of lines
* of column width in the modifier (we can't use the stride value due
* to various core checks that look at it , so you should set the
* stride to width*cpp).
*
* Note that the column height for this format modifier is the same
* for all of the planes, assuming that each column contains both Y
* and UV. Some SAND-using hardware stores UV in a separate tiled
* image from Y to reduce the column height, which is not supported
* with these modifiers.
*
* The DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT modifier is also
* supported for DRM_FORMAT_P030 where the columns remain as 128 bytes
* wide, but as this is a 10 bpp format that translates to 96 pixels.
*/
#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(2, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(3, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(4, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
fourcc_mod_broadcom_code(5, v)
#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
/* Broadcom UIF format
*
* This is the common format for the current Broadcom multimedia
* blocks, including V3D 3.x and newer, newer video codecs, and
* displays.
*
* The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
* and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
* stored in columns, with padding between the columns to ensure that
* moving from one column to the next doesn't hit the same SDRAM page
* bank.
*
* To calculate the padding, it is assumed that each hardware block
* and the software driving it knows the platform's SDRAM page size,
* number of banks, and XOR address, and that it's identical between
* all blocks using the format. This tiling modifier will use XOR as
* necessary to reduce the padding. If a hardware block can't do XOR,
* the assumption is that a no-XOR tiling modifier will be created.
*/
#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
/*
* Arm Framebuffer Compression (AFBC) modifiers
*
* AFBC is a proprietary lossless image compression protocol and format.
* It provides fine-grained random access and minimizes the amount of data
* transferred between IP blocks.
*
* AFBC has several features which may be supported and/or used, which are
* represented using bits in the modifier. Not all combinations are valid,
* and different devices or use-cases may support different combinations.
*
* Further information on the use of AFBC modifiers can be found in
* Documentation/gpu/afbc.rst
*/
/*
* The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
*/
#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00
#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01
#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \
DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode)
/*
* AFBC superblock size
*
* Indicates the superblock size(s) used for the AFBC buffer. The buffer
* size (in pixels) must be aligned to a multiple of the superblock size.
* Four lowest significant bits(LSBs) are reserved for block size.
*
* Where one superblock size is specified, it applies to all planes of the
* buffer (e.g. 16x16, 32x8). When multiple superblock sizes are specified,
* the first applies to the Luma plane and the second applies to the Chroma
* plane(s). e.g. (32x8_64x4 means 32x8 Luma, with 64x4 Chroma).
* Multiple superblock sizes are only valid for multi-plane YCbCr formats.
*/
#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
#define AFBC_FORMAT_MOD_BLOCK_SIZE_64x4 (3ULL)
#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4 (4ULL)
/*
* AFBC lossless colorspace transform
*
* Indicates that the buffer makes use of the AFBC lossless colorspace
* transform.
*/
#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
/*
* AFBC block-split
*
* Indicates that the payload of each superblock is split. The second
* half of the payload is positioned at a predefined offset from the start
* of the superblock payload.
*/
#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
/*
* AFBC sparse layout
*
* This flag indicates that the payload of each superblock must be stored at a
* predefined position relative to the other superblocks in the same AFBC
* buffer. This order is the same order used by the header buffer. In this mode
* each superblock is given the same amount of space as an uncompressed
* superblock of the particular format would require, rounding up to the next
* multiple of 128 bytes in size.
*/
#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
/*
* AFBC copy-block restrict
*
* Buffers with this flag must obey the copy-block restriction. The restriction
* is such that there are no copy-blocks referring across the border of 8x8
* blocks. For the subsampled data the 8x8 limitation is also subsampled.
*/
#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
/*
* AFBC tiled layout
*
* The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
* superblocks inside a tile are stored together in memory. 8x8 tiles are used
* for pixel formats up to and including 32 bpp while 4x4 tiles are used for
* larger bpp formats. The order between the tiles is scan line.
* When the tiled layout is used, the buffer size (in pixels) must be aligned
* to the tile size.
*/
#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
/*
* AFBC solid color blocks
*
* Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
* can be reduced if a whole superblock is a single color.
*/
#define AFBC_FORMAT_MOD_SC (1ULL << 9)
/*
* AFBC double-buffer
*
* Indicates that the buffer is allocated in a layout safe for front-buffer
* rendering.
*/
#define AFBC_FORMAT_MOD_DB (1ULL << 10)
/*
* AFBC buffer content hints
*
* Indicates that the buffer includes per-superblock content hints.
*/
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
/* AFBC uncompressed storage mode
*
* Indicates that the buffer is using AFBC uncompressed storage mode.
* In this mode all superblock payloads in the buffer use the uncompressed
* storage mode, which is usually only used for data which cannot be compressed.
* The buffer layout is the same as for AFBC buffers without USM set, this only
* affects the storage mode of the individual superblocks. Note that even a
* buffer without USM set may use uncompressed storage mode for some or all
* superblocks, USM just guarantees it for all.
*/
#define AFBC_FORMAT_MOD_USM (1ULL << 12)
/*
* Arm Fixed-Rate Compression (AFRC) modifiers
*
* AFRC is a proprietary fixed rate image compression protocol and format,
* designed to provide guaranteed bandwidth and memory footprint
* reductions in graphics and media use-cases.
*
* AFRC buffers consist of one or more planes, with the same components
* and meaning as an uncompressed buffer using the same pixel format.
*
* Within each plane, the pixel/luma/chroma values are grouped into
* "coding unit" blocks which are individually compressed to a
* fixed size (in bytes). All coding units within a given plane of a buffer
* store the same number of values, and have the same compressed size.
*
* The coding unit size is configurable, allowing different rates of compression.
*
* The start of each AFRC buffer plane must be aligned to an alignment granule which
* depends on the coding unit size.
*
* Coding Unit Size Plane Alignment
* ---------------- ---------------
* 16 bytes 1024 bytes
* 24 bytes 512 bytes
* 32 bytes 2048 bytes
*
* Coding units are grouped into paging tiles. AFRC buffer dimensions must be aligned
* to a multiple of the paging tile dimensions.
* The dimensions of each paging tile depend on whether the buffer is optimised for
* scanline (SCAN layout) or rotated (ROT layout) access.
*
* Layout Paging Tile Width Paging Tile Height
* ------ ----------------- ------------------
* SCAN 16 coding units 4 coding units
* ROT 8 coding units 8 coding units
*
* The dimensions of each coding unit depend on the number of components
* in the compressed plane and whether the buffer is optimised for
* scanline (SCAN layout) or rotated (ROT layout) access.
*
* Number of Components in Plane Layout Coding Unit Width Coding Unit Height
* ----------------------------- --------- ----------------- ------------------
* 1 SCAN 16 samples 4 samples
* Example: 16x4 luma samples in a 'Y' plane
* 16x4 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
* ----------------------------- --------- ----------------- ------------------
* 1 ROT 8 samples 8 samples
* Example: 8x8 luma samples in a 'Y' plane
* 8x8 chroma 'V' values, in the 'V' plane of a fully-planar YUV buffer
* ----------------------------- --------- ----------------- ------------------
* 2 DONT CARE 8 samples 4 samples
* Example: 8x4 chroma pairs in the 'UV' plane of a semi-planar YUV buffer
* ----------------------------- --------- ----------------- ------------------
* 3 DONT CARE 4 samples 4 samples
* Example: 4x4 pixels in an RGB buffer without alpha
* ----------------------------- --------- ----------------- ------------------
* 4 DONT CARE 4 samples 4 samples
* Example: 4x4 pixels in an RGB buffer with alpha
*/
#define DRM_FORMAT_MOD_ARM_TYPE_AFRC 0x02
#define DRM_FORMAT_MOD_ARM_AFRC(__afrc_mode) \
DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFRC, __afrc_mode)
/*
* AFRC coding unit size modifier.
*
* Indicates the number of bytes used to store each compressed coding unit for
* one or more planes in an AFRC encoded buffer. The coding unit size for chrominance
* is the same for both Cb and Cr, which may be stored in separate planes.
*
* AFRC_FORMAT_MOD_CU_SIZE_P0 indicates the number of bytes used to store
* each compressed coding unit in the first plane of the buffer. For RGBA buffers
* this is the only plane, while for semi-planar and fully-planar YUV buffers,
* this corresponds to the luma plane.
*
* AFRC_FORMAT_MOD_CU_SIZE_P12 indicates the number of bytes used to store
* each compressed coding unit in the second and third planes in the buffer.
* For semi-planar and fully-planar YUV buffers, this corresponds to the chroma plane(s).
*
* For single-plane buffers, AFRC_FORMAT_MOD_CU_SIZE_P0 must be specified
* and AFRC_FORMAT_MOD_CU_SIZE_P12 must be zero.
* For semi-planar and fully-planar buffers, both AFRC_FORMAT_MOD_CU_SIZE_P0 and
* AFRC_FORMAT_MOD_CU_SIZE_P12 must be specified.
*/
#define AFRC_FORMAT_MOD_CU_SIZE_MASK 0xf
#define AFRC_FORMAT_MOD_CU_SIZE_16 (1ULL)
#define AFRC_FORMAT_MOD_CU_SIZE_24 (2ULL)
#define AFRC_FORMAT_MOD_CU_SIZE_32 (3ULL)
#define AFRC_FORMAT_MOD_CU_SIZE_P0(__afrc_cu_size) (__afrc_cu_size)
#define AFRC_FORMAT_MOD_CU_SIZE_P12(__afrc_cu_size) ((__afrc_cu_size) << 4)
/*
* AFRC scanline memory layout.
*
* Indicates if the buffer uses the scanline-optimised layout
* for an AFRC encoded buffer, otherwise, it uses the rotation-optimised layout.
* The memory layout is the same for all planes.
*/
#define AFRC_FORMAT_MOD_LAYOUT_SCAN (1ULL << 8)
/*
* Arm 16x16 Block U-Interleaved modifier
*
* This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
* into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels
* in the block are reordered.
*/
#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \
DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
/*
* Allwinner tiled modifier
*
* This tiling mode is implemented by the VPU found on all Allwinner platforms,
* codenamed sunxi. It is associated with a YUV format that uses either 2 or 3
* planes.
*
* With this tiling, the luminance samples are disposed in tiles representing
* 32x32 pixels and the chrominance samples in tiles representing 32x64 pixels.
* The pixel order in each tile is linear and the tiles are disposed linearly,
* both in row-major order.
*/
#define DRM_FORMAT_MOD_ALLWINNER_TILED fourcc_mod_code(ALLWINNER, 1)
/*
* Amlogic Video Framebuffer Compression modifiers
*
* Amlogic uses a proprietary lossless image compression protocol and format
* for their hardware video codec accelerators, either video decoders or
* video input encoders.
*
* It considerably reduces memory bandwidth while writing and reading
* frames in memory.
*
* The underlying storage is considered to be 3 components, 8bit or 10-bit
* per component YCbCr 420, single plane :
* - DRM_FORMAT_YUV420_8BIT
* - DRM_FORMAT_YUV420_10BIT
*
* The first 8 bits of the mode defines the layout, then the following 8 bits
* defines the options changing the layout.
*
* Not all combinations are valid, and different SoCs may support different
* combinations of layout and options.
*/
#define __fourcc_mod_amlogic_layout_mask 0xff
#define __fourcc_mod_amlogic_options_shift 8
#define __fourcc_mod_amlogic_options_mask 0xff
#define DRM_FORMAT_MOD_AMLOGIC_FBC(__layout, __options) \
fourcc_mod_code(AMLOGIC, \
((__layout) & __fourcc_mod_amlogic_layout_mask) | \
(((__options) & __fourcc_mod_amlogic_options_mask) \
<< __fourcc_mod_amlogic_options_shift))
/* Amlogic FBC Layouts */
/*
* Amlogic FBC Basic Layout
*
* The basic layout is composed of:
* - a body content organized in 64x32 superblocks with 4096 bytes per
* superblock in default mode.
* - a 32 bytes per 128x64 header block
*
* This layout is transferrable between Amlogic SoCs supporting this modifier.
*/
#define AMLOGIC_FBC_LAYOUT_BASIC (1ULL)
/*
* Amlogic FBC Scatter Memory layout
*
* Indicates the header contains IOMMU references to the compressed
* frames content to optimize memory access and layout.
*
* In this mode, only the header memory address is needed, thus the
* content memory organization is tied to the current producer
* execution and cannot be saved/dumped neither transferrable between
* Amlogic SoCs supporting this modifier.
*
* Due to the nature of the layout, these buffers are not expected to
* be accessible by the user-space clients, but only accessible by the
* hardware producers and consumers.
*
* The user-space clients should expect a failure while trying to mmap
* the DMA-BUF handle returned by the producer.
*/
#define AMLOGIC_FBC_LAYOUT_SCATTER (2ULL)
/* Amlogic FBC Layout Options Bit Mask */
/*
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
* boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
* the basic layout and 3200 bytes per 64x32 superblock combined with
* the scatter layout.
*/
#define AMLOGIC_FBC_OPTION_MEM_SAVING (1ULL << 0)
/*
* AMD modifiers
*
* Memory layout:
*
* without DCC:
* - main surface
*
* with DCC & without DCC_RETILE:
* - main surface in plane 0
* - DCC surface in plane 1 (RB-aligned, pipe-aligned if DCC_PIPE_ALIGN is set)
*
* with DCC & DCC_RETILE:
* - main surface in plane 0
* - displayable DCC surface in plane 1 (not RB-aligned & not pipe-aligned)
* - pipe-aligned DCC surface in plane 2 (RB-aligned & pipe-aligned)
*
* For multi-plane formats the above surfaces get merged into one plane for
* each format plane, based on the required alignment only.
*
* Bits Parameter Notes
* ----- ------------------------ ---------------------------------------------
*
* 7:0 TILE_VERSION Values are AMD_FMT_MOD_TILE_VER_*
* 12:8 TILE Values are AMD_FMT_MOD_TILE_<version>_*
* 13 DCC
* 14 DCC_RETILE
* 15 DCC_PIPE_ALIGN
* 16 DCC_INDEPENDENT_64B
* 17 DCC_INDEPENDENT_128B
* 19:18 DCC_MAX_COMPRESSED_BLOCK Values are AMD_FMT_MOD_DCC_BLOCK_*
* 20 DCC_CONSTANT_ENCODE
* 23:21 PIPE_XOR_BITS Only for some chips
* 26:24 BANK_XOR_BITS Only for some chips
* 29:27 PACKERS Only for some chips
* 32:30 RB Only for some chips
* 35:33 PIPE Only for some chips
* 55:36 - Reserved for future use, must be zero
*/
#define AMD_FMT_MOD fourcc_mod_code(AMD, 0)
#define IS_AMD_FMT_MOD(val) (((val) >> 56) == DRM_FORMAT_MOD_VENDOR_AMD)
/* Reserve 0 for GFX8 and older */
#define AMD_FMT_MOD_TILE_VER_GFX9 1
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
#define AMD_FMT_MOD_TILE_VER_GFX11 4
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
* version.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_S 9
/*
* 64K_D for non-32 bpp is the same for GFX9/GFX10/GFX10_RBPLUS and hence has
* GFX9 as canonical version.
*/
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
#define AMD_FMT_MOD_DCC_BLOCK_256B 2
#define AMD_FMT_MOD_TILE_VERSION_SHIFT 0
#define AMD_FMT_MOD_TILE_VERSION_MASK 0xFF
#define AMD_FMT_MOD_TILE_SHIFT 8
#define AMD_FMT_MOD_TILE_MASK 0x1F
/* Whether DCC compression is enabled. */
#define AMD_FMT_MOD_DCC_SHIFT 13
#define AMD_FMT_MOD_DCC_MASK 0x1
/*
* Whether to include two DCC surfaces, one which is rb & pipe aligned, and
* one which is not-aligned.
*/
#define AMD_FMT_MOD_DCC_RETILE_SHIFT 14
#define AMD_FMT_MOD_DCC_RETILE_MASK 0x1
/* Only set if DCC_RETILE = false */
#define AMD_FMT_MOD_DCC_PIPE_ALIGN_SHIFT 15
#define AMD_FMT_MOD_DCC_PIPE_ALIGN_MASK 0x1
#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_SHIFT 16
#define AMD_FMT_MOD_DCC_INDEPENDENT_64B_MASK 0x1
#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_SHIFT 17
#define AMD_FMT_MOD_DCC_INDEPENDENT_128B_MASK 0x1
#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_SHIFT 18
#define AMD_FMT_MOD_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
/*
* DCC supports embedding some clear colors directly in the DCC surface.
* However, on older GPUs the rendering HW ignores the embedded clear color
* and prefers the driver provided color. This necessitates doing a fastclear
* eliminate operation before a process transfers control.
*
* If this bit is set that means the fastclear eliminate is not needed for these
* embeddable colors.
*/
#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_SHIFT 20
#define AMD_FMT_MOD_DCC_CONSTANT_ENCODE_MASK 0x1
/*
* The below fields are for accounting for per GPU differences. These are only
* relevant for GFX9 and later and if the tile field is *_X/_T.
*
* PIPE_XOR_BITS = always needed
* BANK_XOR_BITS = only for TILE_VER_GFX9
* PACKERS = only for TILE_VER_GFX10_RBPLUS
* RB = only for TILE_VER_GFX9 & DCC
* PIPE = only for TILE_VER_GFX9 & DCC & (DCC_RETILE | DCC_PIPE_ALIGN)
*/
#define AMD_FMT_MOD_PIPE_XOR_BITS_SHIFT 21
#define AMD_FMT_MOD_PIPE_XOR_BITS_MASK 0x7
#define AMD_FMT_MOD_BANK_XOR_BITS_SHIFT 24
#define AMD_FMT_MOD_BANK_XOR_BITS_MASK 0x7
#define AMD_FMT_MOD_PACKERS_SHIFT 27
#define AMD_FMT_MOD_PACKERS_MASK 0x7
#define AMD_FMT_MOD_RB_SHIFT 30
#define AMD_FMT_MOD_RB_MASK 0x7
#define AMD_FMT_MOD_PIPE_SHIFT 33
#define AMD_FMT_MOD_PIPE_MASK 0x7
#define AMD_FMT_MOD_SET(field, value) \
((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
#define AMD_FMT_MOD_GET(field, value) \
(((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
#define AMD_FMT_MOD_CLEAR(field) \
(~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
/* Mobile Industry Processor Interface (MIPI) modifiers */
/*
* MIPI CSI-2 packing layout
*
* The CSI-2 RAW formats (for example Bayer) use a different packing layout
* depenindg on the sample size.
*
* - 10-bits per sample
* Every four consecutive samples are packed into 5 bytes. Each of the first 4
* bytes contain the 8 high order bits of the pixels, and the 5th byte
* contains the 2 least-significant bits of each pixel, in the same order.
*
* - 12-bits per sample
* Every two consecutive samples are packed into three bytes. Each of the
* first two bytes contain the 8 high order bits of the pixels, and the third
* byte contains the four least-significant bits of each pixel, in the same
* order.
*
* - 14-bits per sample
* Every four consecutive samples are packed into seven bytes. Each of the
* first four bytes contain the eight high order bits of the pixels, and the
* three following bytes contains the six least-significant bits of each
* pixel, in the same order.
*/
#define MIPI_FORMAT_MOD_CSI2_PACKED fourcc_mod_code(MIPI, 1)
#define PISP_FORMAT_MOD_COMPRESS_MODE1 fourcc_mod_code(RPI, 1)
#define PISP_FORMAT_MOD_COMPRESS_MODE2 fourcc_mod_code(RPI, 2)
#if defined(__cplusplus)
}
#endif
#endif /* DRM_FOURCC_H */
|
0 | repos/libcamera/include | repos/libcamera/include/linux/v4l2-subdev.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* V4L2 subdev userspace API
*
* Copyright (C) 2010 Nokia Corporation
*
* Contacts: Laurent Pinchart <[email protected]>
* Sakari Ailus <[email protected]>
*/
#ifndef __LINUX_V4L2_SUBDEV_H
#define __LINUX_V4L2_SUBDEV_H
#include <linux/const.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/v4l2-common.h>
#include <linux/v4l2-mediabus.h>
/**
* enum v4l2_subdev_format_whence - Media bus format type
* @V4L2_SUBDEV_FORMAT_TRY: try format, for negotiation only
* @V4L2_SUBDEV_FORMAT_ACTIVE: active format, applied to the device
*/
enum v4l2_subdev_format_whence {
V4L2_SUBDEV_FORMAT_TRY = 0,
V4L2_SUBDEV_FORMAT_ACTIVE = 1,
};
/**
* struct v4l2_subdev_format - Pad-level media bus format
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @format: media bus format (format code and frame size)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_format {
__u32 which;
__u32 pad;
struct v4l2_mbus_framefmt format;
__u32 stream;
__u32 reserved[7];
};
/**
* struct v4l2_subdev_crop - Pad-level crop settings
* @which: format type (from enum v4l2_subdev_format_whence)
* @pad: pad number, as reported by the media API
* @rect: pad crop rectangle boundaries
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*
* The subdev crop API is an obsolete interface and may be removed in the
* future. It is superseded by the selection API. No new extensions to this
* structure will be accepted.
*/
struct v4l2_subdev_crop {
__u32 which;
__u32 pad;
struct v4l2_rect rect;
__u32 stream;
__u32 reserved[7];
};
#define V4L2_SUBDEV_MBUS_CODE_CSC_COLORSPACE 0x00000001
#define V4L2_SUBDEV_MBUS_CODE_CSC_XFER_FUNC 0x00000002
#define V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC 0x00000004
#define V4L2_SUBDEV_MBUS_CODE_CSC_HSV_ENC V4L2_SUBDEV_MBUS_CODE_CSC_YCBCR_ENC
#define V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION 0x00000008
/**
* struct v4l2_subdev_mbus_code_enum - Media bus format enumeration
* @pad: pad number, as reported by the media API
* @index: format index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @which: format type (from enum v4l2_subdev_format_whence)
* @flags: flags set by the driver, (V4L2_SUBDEV_MBUS_CODE_*)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_mbus_code_enum {
__u32 pad;
__u32 index;
__u32 code;
__u32 which;
__u32 flags;
__u32 stream;
__u32 reserved[6];
};
/**
* struct v4l2_subdev_frame_size_enum - Media bus format enumeration
* @index: format index during enumeration
* @pad: pad number, as reported by the media API
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @min_width: minimum frame width, in pixels
* @max_width: maximum frame width, in pixels
* @min_height: minimum frame height, in pixels
* @max_height: maximum frame height, in pixels
* @which: format type (from enum v4l2_subdev_format_whence)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_size_enum {
__u32 index;
__u32 pad;
__u32 code;
__u32 min_width;
__u32 max_width;
__u32 min_height;
__u32 max_height;
__u32 which;
__u32 stream;
__u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_interval - Pad-level frame rate
* @pad: pad number, as reported by the media API
* @interval: frame interval in seconds
* @stream: stream number, defined in subdev routing
* @which: interval type (from enum v4l2_subdev_format_whence)
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval {
__u32 pad;
struct v4l2_fract interval;
__u32 stream;
__u32 which;
__u32 reserved[7];
};
/**
* struct v4l2_subdev_frame_interval_enum - Frame interval enumeration
* @pad: pad number, as reported by the media API
* @index: frame interval index during enumeration
* @code: format code (MEDIA_BUS_FMT_ definitions)
* @width: frame width in pixels
* @height: frame height in pixels
* @interval: frame interval in seconds
* @which: interval type (from enum v4l2_subdev_format_whence)
* @stream: stream number, defined in subdev routing
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_frame_interval_enum {
__u32 index;
__u32 pad;
__u32 code;
__u32 width;
__u32 height;
struct v4l2_fract interval;
__u32 which;
__u32 stream;
__u32 reserved[7];
};
/**
* struct v4l2_subdev_selection - selection info
*
* @which: either V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY
* @pad: pad number, as reported by the media API
* @target: Selection target, used to choose one of possible rectangles,
* defined in v4l2-common.h; V4L2_SEL_TGT_* .
* @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of the selection window
* @stream: stream number, defined in subdev routing
* @reserved: for future use, set to zero for now
*
* Hardware may use multiple helper windows to process a video stream.
* The structure is used to exchange this selection areas between
* an application and a driver.
*/
struct v4l2_subdev_selection {
__u32 which;
__u32 pad;
__u32 target;
__u32 flags;
struct v4l2_rect r;
__u32 stream;
__u32 reserved[7];
};
/**
* struct v4l2_subdev_capability - subdev capabilities
* @version: the driver versioning number
* @capabilities: the subdev capabilities, see V4L2_SUBDEV_CAP_*
* @reserved: for future use, set to zero for now
*/
struct v4l2_subdev_capability {
__u32 version;
__u32 capabilities;
__u32 reserved[14];
};
/* The v4l2 sub-device video device node is registered in read-only mode. */
#define V4L2_SUBDEV_CAP_RO_SUBDEV 0x00000001
/* The v4l2 sub-device supports routing and multiplexed streams. */
#define V4L2_SUBDEV_CAP_STREAMS 0x00000002
/*
* Is the route active? An active route will start when streaming is enabled
* on a video node.
*/
#define V4L2_SUBDEV_ROUTE_FL_ACTIVE (1U << 0)
/**
* struct v4l2_subdev_route - A route inside a subdev
*
* @sink_pad: the sink pad index
* @sink_stream: the sink stream identifier
* @source_pad: the source pad index
* @source_stream: the source stream identifier
* @flags: route flags V4L2_SUBDEV_ROUTE_FL_*
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_route {
__u32 sink_pad;
__u32 sink_stream;
__u32 source_pad;
__u32 source_stream;
__u32 flags;
__u32 reserved[5];
};
/**
* struct v4l2_subdev_routing - Subdev routing information
*
* @which: configuration type (from enum v4l2_subdev_format_whence)
* @len_routes: the length of the routes array, in routes; set by the user, not
* modified by the kernel
* @routes: pointer to the routes array
* @num_routes: the total number of routes, possibly more than fits in the
* routes array
* @reserved: drivers and applications must zero this array
*/
struct v4l2_subdev_routing {
__u32 which;
__u32 len_routes;
__u64 routes;
__u32 num_routes;
__u32 reserved[11];
};
/*
* The client is aware of streams. Setting this flag enables the use of 'stream'
* fields (referring to the stream number) with various ioctls. If this is not
* set (which is the default), the 'stream' fields will be forced to 0 by the
* kernel.
*/
#define V4L2_SUBDEV_CLIENT_CAP_STREAMS (1ULL << 0)
/*
* The client is aware of the struct v4l2_subdev_frame_interval which field. If
* this is not set (which is the default), the which field is forced to
* V4L2_SUBDEV_FORMAT_ACTIVE by the kernel.
*/
#define V4L2_SUBDEV_CLIENT_CAP_INTERVAL_USES_WHICH (1ULL << 1)
/**
* struct v4l2_subdev_client_capability - Capabilities of the client accessing
* the subdev
*
* @capabilities: A bitmask of V4L2_SUBDEV_CLIENT_CAP_* flags.
*/
struct v4l2_subdev_client_capability {
__u64 capabilities;
};
/* Backwards compatibility define --- to be removed */
#define v4l2_subdev_edid v4l2_edid
#define VIDIOC_SUBDEV_QUERYCAP _IOR('V', 0, struct v4l2_subdev_capability)
#define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format)
#define VIDIOC_SUBDEV_G_FRAME_INTERVAL _IOWR('V', 21, struct v4l2_subdev_frame_interval)
#define VIDIOC_SUBDEV_S_FRAME_INTERVAL _IOWR('V', 22, struct v4l2_subdev_frame_interval)
#define VIDIOC_SUBDEV_ENUM_MBUS_CODE _IOWR('V', 2, struct v4l2_subdev_mbus_code_enum)
#define VIDIOC_SUBDEV_ENUM_FRAME_SIZE _IOWR('V', 74, struct v4l2_subdev_frame_size_enum)
#define VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL _IOWR('V', 75, struct v4l2_subdev_frame_interval_enum)
#define VIDIOC_SUBDEV_G_CROP _IOWR('V', 59, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_S_CROP _IOWR('V', 60, struct v4l2_subdev_crop)
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_G_ROUTING _IOWR('V', 38, struct v4l2_subdev_routing)
#define VIDIOC_SUBDEV_S_ROUTING _IOWR('V', 39, struct v4l2_subdev_routing)
#define VIDIOC_SUBDEV_G_CLIENT_CAP _IOR('V', 101, struct v4l2_subdev_client_capability)
#define VIDIOC_SUBDEV_S_CLIENT_CAP _IOWR('V', 102, struct v4l2_subdev_client_capability)
/* The following ioctls are identical to the ioctls in videodev2.h */
#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid)
#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid)
#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
#define VIDIOC_SUBDEV_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
#endif
|
0 | repos/libcamera/include | repos/libcamera/include/linux/v4l2-mediabus.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Media Bus API header
*
* Copyright (C) 2009, Guennadi Liakhovetski <[email protected]>
*/
#ifndef __LINUX_V4L2_MEDIABUS_H
#define __LINUX_V4L2_MEDIABUS_H
#include <linux/media-bus-format.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#define V4L2_MBUS_FRAMEFMT_SET_CSC 0x0001
/**
* struct v4l2_mbus_framefmt - frame format on the media bus
* @width: image width
* @height: image height
* @code: data format code (from enum v4l2_mbus_pixelcode)
* @field: used interlacing type (from enum v4l2_field), zero for metadata
* mbus codes
* @colorspace: colorspace of the data (from enum v4l2_colorspace), zero on
* metadata mbus codes
* @ycbcr_enc: YCbCr encoding of the data (from enum v4l2_ycbcr_encoding), zero
* for metadata mbus codes
* @hsv_enc: HSV encoding of the data (from enum v4l2_hsv_encoding), zero for
* metadata mbus codes
* @quantization: quantization of the data (from enum v4l2_quantization), zero
* for metadata mbus codes
* @xfer_func: transfer function of the data (from enum v4l2_xfer_func), zero
* for metadata mbus codes
* @flags: flags (V4L2_MBUS_FRAMEFMT_*)
* @reserved: reserved bytes that can be later used
*/
struct v4l2_mbus_framefmt {
__u32 width;
__u32 height;
__u32 code;
__u32 field;
__u32 colorspace;
union {
/* enum v4l2_ycbcr_encoding */
__u16 ycbcr_enc;
/* enum v4l2_hsv_encoding */
__u16 hsv_enc;
};
__u16 quantization;
__u16 xfer_func;
__u16 flags;
__u16 reserved[10];
};
/*
* enum v4l2_mbus_pixelcode and its definitions are now deprecated, and
* MEDIA_BUS_FMT_ definitions (defined in media-bus-format.h) should be
* used instead.
*
* New defines should only be added to media-bus-format.h. The
* v4l2_mbus_pixelcode enum is frozen.
*/
#define V4L2_MBUS_FROM_MEDIA_BUS_FMT(name) \
V4L2_MBUS_FMT_ ## name = MEDIA_BUS_FMT_ ## name
enum v4l2_mbus_pixelcode {
V4L2_MBUS_FROM_MEDIA_BUS_FMT(FIXED),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB444_2X8_PADHI_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB555_2X8_PADHI_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(BGR565_2X8_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB565_2X8_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB666_1X18),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(RGB888_2X12_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(ARGB8888_1X32),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UV8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1_5X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1_5X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1_5X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1_5X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_2X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_2X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_2X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_2X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y10_1X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_2X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_2X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_2X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_2X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(Y12_1X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY8_1X16),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY8_1X16),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV8_1X16),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU8_1X16),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YDYUYDYV8_1X16),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY10_1X20),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY10_1X20),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV10_1X20),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU10_1X20),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUV10_1X30),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(AYUV8_1X32),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_2X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_2X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_2X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_2X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(UYVY12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(VYUY12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YUYV12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(YVYU12_1X24),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_ALAW8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_ALAW8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_ALAW8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_ALAW8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_DPCM8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_DPCM8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_DPCM8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_DPCM8_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADHI_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_BE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_2X8_PADLO_LE),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR10_1X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG10_1X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG10_1X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB10_1X10),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SBGGR12_1X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGBRG12_1X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SGRBG12_1X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(SRGGB12_1X12),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(JPEG_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(S5C_UYVY_JPEG_1X8),
V4L2_MBUS_FROM_MEDIA_BUS_FMT(AHSV8888_1X32),
};
#endif
|
0 | repos/libcamera/include | repos/libcamera/include/linux/dma-heap.h | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* DMABUF Heaps Userspace API
*
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2019 Linaro Ltd.
*/
#ifndef _LINUX_DMABUF_POOL_H
#define _LINUX_DMABUF_POOL_H
#include <linux/ioctl.h>
#include <linux/types.h>
/**
* DOC: DMABUF Heaps Userspace API
*/
/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */
#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
/* Currently no heap flags */
#define DMA_HEAP_VALID_HEAP_FLAGS (0)
/**
* struct dma_heap_allocation_data - metadata passed from userspace for
* allocations
* @len: size of the allocation
* @fd: will be populated with a fd which provides the
* handle to the allocated dma-buf
* @fd_flags: file descriptor flags used when allocating
* @heap_flags: flags passed to heap
*
* Provided by userspace as an argument to the ioctl
*/
struct dma_heap_allocation_data {
__u64 len;
__u32 fd;
__u32 fd_flags;
__u64 heap_flags;
};
#define DMA_HEAP_IOC_MAGIC 'H'
/**
* DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool
*
* Takes a dma_heap_allocation_data struct and returns it with the fd field
* populated with the dmabuf handle of the allocation.
*/
#define DMA_HEAP_IOCTL_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,\
struct dma_heap_allocation_data)
#endif /* _LINUX_DMABUF_POOL_H */
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/camera.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H
#define SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <cutils/native_handle.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
__BEGIN_DECLS
/**
* A set of bit masks for specifying how the received preview frames are
* handled before the previewCallback() call.
*
* The least significant 3 bits of an "int" value are used for this purpose:
*
* ..... 0 0 0
* ^ ^ ^
* | | |---------> determine whether the callback is enabled or not
* | |-----------> determine whether the callback is one-shot or not
* |-------------> determine whether the frame is copied out or not
*
* WARNING: When a frame is sent directly without copying, it is the frame
* receiver's responsiblity to make sure that the frame data won't get
* corrupted by subsequent preview frames filled by the camera. This flag is
* recommended only when copying out data brings significant performance price
* and the handling/processing of the received frame data is always faster than
* the preview frame rate so that data corruption won't occur.
*
* For instance,
* 1. 0x00 disables the callback. In this case, copy out and one shot bits
* are ignored.
* 2. 0x01 enables a callback without copying out the received frames. A
* typical use case is the Camcorder application to avoid making costly
* frame copies.
* 3. 0x05 is enabling a callback with frame copied out repeatedly. A typical
* use case is the Camera application.
* 4. 0x07 is enabling a callback with frame copied out only once. A typical
* use case is the Barcode scanner application.
*/
enum {
CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK = 0x01,
CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK = 0x02,
CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK = 0x04,
/** Typical use cases */
CAMERA_FRAME_CALLBACK_FLAG_NOOP = 0x00,
CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER = 0x01,
CAMERA_FRAME_CALLBACK_FLAG_CAMERA = 0x05,
CAMERA_FRAME_CALLBACK_FLAG_BARCODE_SCANNER = 0x07
};
/** msgType in notifyCallback and dataCallback functions */
enum {
CAMERA_MSG_ERROR = 0x0001, // notifyCallback
CAMERA_MSG_SHUTTER = 0x0002, // notifyCallback
CAMERA_MSG_FOCUS = 0x0004, // notifyCallback
CAMERA_MSG_ZOOM = 0x0008, // notifyCallback
CAMERA_MSG_PREVIEW_FRAME = 0x0010, // dataCallback
CAMERA_MSG_VIDEO_FRAME = 0x0020, // data_timestamp_callback
CAMERA_MSG_POSTVIEW_FRAME = 0x0040, // dataCallback
CAMERA_MSG_RAW_IMAGE = 0x0080, // dataCallback
CAMERA_MSG_COMPRESSED_IMAGE = 0x0100, // dataCallback
CAMERA_MSG_RAW_IMAGE_NOTIFY = 0x0200, // dataCallback
// Preview frame metadata. This can be combined with
// CAMERA_MSG_PREVIEW_FRAME in dataCallback. For example, the apps can
// request FRAME and METADATA. Or the apps can request only FRAME or only
// METADATA.
CAMERA_MSG_PREVIEW_METADATA = 0x0400, // dataCallback
// Notify on autofocus start and stop. This is useful in continuous
// autofocus - FOCUS_MODE_CONTINUOUS_VIDEO and FOCUS_MODE_CONTINUOUS_PICTURE.
CAMERA_MSG_FOCUS_MOVE = 0x0800, // notifyCallback
CAMERA_MSG_ALL_MSGS = 0xFFFF
};
/** cmdType in sendCommand functions */
enum {
CAMERA_CMD_START_SMOOTH_ZOOM = 1,
CAMERA_CMD_STOP_SMOOTH_ZOOM = 2,
/**
* Set the clockwise rotation of preview display (setPreviewDisplay) in
* degrees. This affects the preview frames and the picture displayed after
* snapshot. This method is useful for portrait mode applications. Note
* that preview display of front-facing cameras is flipped horizontally
* before the rotation, that is, the image is reflected along the central
* vertical axis of the camera sensor. So the users can see themselves as
* looking into a mirror.
*
* This does not affect the order of byte array of
* CAMERA_MSG_PREVIEW_FRAME, CAMERA_MSG_VIDEO_FRAME,
* CAMERA_MSG_POSTVIEW_FRAME, CAMERA_MSG_RAW_IMAGE, or
* CAMERA_MSG_COMPRESSED_IMAGE. This is allowed to be set during preview
* since API level 14.
*/
CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3,
/**
* cmdType to disable/enable shutter sound. In sendCommand passing arg1 =
* 0 will disable, while passing arg1 = 1 will enable the shutter sound.
*/
CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4,
/* cmdType to play recording sound */
CAMERA_CMD_PLAY_RECORDING_SOUND = 5,
/**
* Start the face detection. This should be called after preview is started.
* The camera will notify the listener of CAMERA_MSG_FACE and the detected
* faces in the preview frame. The detected faces may be the same as the
* previous ones. Apps should call CAMERA_CMD_STOP_FACE_DETECTION to stop
* the face detection. This method is supported if CameraParameters
* KEY_MAX_NUM_HW_DETECTED_FACES or KEY_MAX_NUM_SW_DETECTED_FACES is
* bigger than 0. Hardware and software face detection should not be running
* at the same time. If the face detection has started, apps should not send
* this again.
*
* In hardware face detection mode, CameraParameters KEY_WHITE_BALANCE,
* KEY_FOCUS_AREAS and KEY_METERING_AREAS have no effect.
*
* arg1 is the face detection type. It can be CAMERA_FACE_DETECTION_HW or
* CAMERA_FACE_DETECTION_SW. If the type of face detection requested is not
* supported, the HAL must return BAD_VALUE.
*/
CAMERA_CMD_START_FACE_DETECTION = 6,
/**
* Stop the face detection.
*/
CAMERA_CMD_STOP_FACE_DETECTION = 7,
/**
* Enable/disable focus move callback (CAMERA_MSG_FOCUS_MOVE). Passing
* arg1 = 0 will disable, while passing arg1 = 1 will enable the callback.
*/
CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG = 8,
/**
* Ping camera service to see if camera hardware is released.
*
* When any camera method returns error, the client can use ping command
* to see if the camera has been taken away by other clients. If the result
* is NO_ERROR, it means the camera hardware is not released. If the result
* is not NO_ERROR, the camera has been released and the existing client
* can silently finish itself or show a dialog.
*/
CAMERA_CMD_PING = 9,
/**
* Configure the number of video buffers used for recording. The intended
* video buffer count for recording is passed as arg1, which must be
* greater than 0. This command must be sent before recording is started.
* This command returns INVALID_OPERATION error if it is sent after video
* recording is started, or the command is not supported at all. This
* command also returns a BAD_VALUE error if the intended video buffer
* count is non-positive or too big to be realized.
*/
CAMERA_CMD_SET_VIDEO_BUFFER_COUNT = 10,
/**
* Configure an explicit format to use for video recording metadata mode.
* This can be used to switch the format from the
* default IMPLEMENTATION_DEFINED gralloc format to some other
* device-supported format, and the default dataspace from the BT_709 color
* space to some other device-supported dataspace. arg1 is the HAL pixel
* format, and arg2 is the HAL dataSpace. This command returns
* INVALID_OPERATION error if it is sent after video recording is started,
* or the command is not supported at all.
*
* If the gralloc format is set to a format other than
* IMPLEMENTATION_DEFINED, then HALv3 devices will use gralloc usage flags
* of SW_READ_OFTEN.
*/
CAMERA_CMD_SET_VIDEO_FORMAT = 11
};
/** camera fatal errors */
enum {
CAMERA_ERROR_UNKNOWN = 1,
/**
* Camera was released because another client has connected to the camera.
* The original client should call Camera::disconnect immediately after
* getting this notification. Otherwise, the camera will be released by
* camera service in a short time. The client should not call any method
* (except disconnect and sending CAMERA_CMD_PING) after getting this.
*/
CAMERA_ERROR_RELEASED = 2,
/**
* Camera was released because device policy change or the client application
* is going to background. The client should call Camera::disconnect
* immediately after getting this notification. Otherwise, the camera will be
* released by camera service in a short time. The client should not call any
* method (except disconnect and sending CAMERA_CMD_PING) after getting this.
*/
CAMERA_ERROR_DISABLED = 3,
CAMERA_ERROR_SERVER_DIED = 100
};
enum {
/** The facing of the camera is opposite to that of the screen. */
CAMERA_FACING_BACK = 0,
/** The facing of the camera is the same as that of the screen. */
CAMERA_FACING_FRONT = 1,
/**
* The facing of the camera is not fixed relative to the screen.
* The cameras with this facing are external cameras, e.g. USB cameras.
*/
CAMERA_FACING_EXTERNAL = 2
};
enum {
/** Hardware face detection. It does not use much CPU. */
CAMERA_FACE_DETECTION_HW = 0,
/**
* Software face detection. It uses some CPU. Applications must use
* Camera.setPreviewTexture for preview in this mode.
*/
CAMERA_FACE_DETECTION_SW = 1
};
/**
* The information of a face from camera face detection.
*/
typedef struct camera_face {
/**
* Bounds of the face [left, top, right, bottom]. (-1000, -1000) represents
* the top-left of the camera field of view, and (1000, 1000) represents the
* bottom-right of the field of view. The width and height cannot be 0 or
* negative. This is supported by both hardware and software face detection.
*
* The direction is relative to the sensor orientation, that is, what the
* sensor sees. The direction is not affected by the rotation or mirroring
* of CAMERA_CMD_SET_DISPLAY_ORIENTATION.
*/
int32_t rect[4];
/**
* The confidence level of the face. The range is 1 to 100. 100 is the
* highest confidence. This is supported by both hardware and software
* face detection.
*/
int32_t score;
/**
* An unique id per face while the face is visible to the tracker. If
* the face leaves the field-of-view and comes back, it will get a new
* id. If the value is 0, id is not supported.
*/
int32_t id;
/**
* The coordinates of the center of the left eye. The range is -1000 to
* 1000. -2000, -2000 if this is not supported.
*/
int32_t left_eye[2];
/**
* The coordinates of the center of the right eye. The range is -1000 to
* 1000. -2000, -2000 if this is not supported.
*/
int32_t right_eye[2];
/**
* The coordinates of the center of the mouth. The range is -1000 to 1000.
* -2000, -2000 if this is not supported.
*/
int32_t mouth[2];
} camera_face_t;
/**
* The metadata of the frame data.
*/
typedef struct camera_frame_metadata {
/**
* The number of detected faces in the frame.
*/
int32_t number_of_faces;
/**
* An array of the detected faces. The length is number_of_faces.
*/
camera_face_t *faces;
} camera_frame_metadata_t;
__END_DECLS
#endif /* SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H */
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/graphics-sw.h | /* SPDX-License-Identifier: Apache-2.0 */
#ifndef SYSTEM_CORE_GRAPHICS_SW_H_
#define SYSTEM_CORE_GRAPHICS_SW_H_
/* Software formats not in the HAL definitions. */
typedef enum {
HAL_PIXEL_FORMAT_YCBCR_422_888 = 39, // 0x27
HAL_PIXEL_FORMAT_YCBCR_444_888 = 40, // 0x28
HAL_PIXEL_FORMAT_FLEX_RGB_888 = 41, // 0x29
HAL_PIXEL_FORMAT_FLEX_RGBA_8888 = 42, // 0x2A
} android_pixel_format_sw_t;
/* for compatibility */
#define HAL_PIXEL_FORMAT_YCbCr_422_888 HAL_PIXEL_FORMAT_YCBCR_422_888
#define HAL_PIXEL_FORMAT_YCbCr_444_888 HAL_PIXEL_FORMAT_YCBCR_444_888
#endif // SYSTEM_CORE_GRAPHICS_SW_H_
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/graphics-base.h | /* SPDX-License-Identifier: Apache-2.0 */
#ifndef SYSTEM_CORE_GRAPHICS_BASE_H_
#define SYSTEM_CORE_GRAPHICS_BASE_H_
#include "graphics-base-v1.0.h"
#include "graphics-base-v1.1.h"
#endif // SYSTEM_CORE_GRAPHICS_BASE_H_
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/graphics-base-v1.0.h | /* SPDX-License-Identifier: Apache-2.0 */
// This file is autogenerated by hidl-gen. Do not edit manually.
// Source: [email protected]
// Location: hardware/interfaces/graphics/common/1.0/
#ifndef HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_
#define HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
HAL_PIXEL_FORMAT_RGBA_8888 = 1,
HAL_PIXEL_FORMAT_RGBX_8888 = 2,
HAL_PIXEL_FORMAT_RGB_888 = 3,
HAL_PIXEL_FORMAT_RGB_565 = 4,
HAL_PIXEL_FORMAT_BGRA_8888 = 5,
HAL_PIXEL_FORMAT_YCBCR_422_SP = 16,
HAL_PIXEL_FORMAT_YCRCB_420_SP = 17,
HAL_PIXEL_FORMAT_YCBCR_422_I = 20,
HAL_PIXEL_FORMAT_RGBA_FP16 = 22,
HAL_PIXEL_FORMAT_RAW16 = 32,
HAL_PIXEL_FORMAT_BLOB = 33,
HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 34,
HAL_PIXEL_FORMAT_YCBCR_420_888 = 35,
HAL_PIXEL_FORMAT_RAW_OPAQUE = 36,
HAL_PIXEL_FORMAT_RAW10 = 37,
HAL_PIXEL_FORMAT_RAW12 = 38,
HAL_PIXEL_FORMAT_RGBA_1010102 = 43,
HAL_PIXEL_FORMAT_Y8 = 538982489,
HAL_PIXEL_FORMAT_Y16 = 540422489,
HAL_PIXEL_FORMAT_YV12 = 842094169,
} android_pixel_format_t;
typedef enum {
HAL_TRANSFORM_FLIP_H = 1, // (1 << 0)
HAL_TRANSFORM_FLIP_V = 2, // (1 << 1)
HAL_TRANSFORM_ROT_90 = 4, // (1 << 2)
HAL_TRANSFORM_ROT_180 = 3, // (FLIP_H | FLIP_V)
HAL_TRANSFORM_ROT_270 = 7, // ((FLIP_H | FLIP_V) | ROT_90)
} android_transform_t;
typedef enum {
HAL_DATASPACE_UNKNOWN = 0,
HAL_DATASPACE_ARBITRARY = 1,
HAL_DATASPACE_STANDARD_SHIFT = 16,
HAL_DATASPACE_STANDARD_MASK = 4128768, // (63 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_UNSPECIFIED = 0, // (0 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT709 = 65536, // (1 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT601_625 = 131072, // (2 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED = 196608, // (3 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT601_525 = 262144, // (4 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED = 327680, // (5 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT2020 = 393216, // (6 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE = 458752, // (7 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_BT470M = 524288, // (8 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_FILM = 589824, // (9 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_DCI_P3 = 655360, // (10 << STANDARD_SHIFT)
HAL_DATASPACE_STANDARD_ADOBE_RGB = 720896, // (11 << STANDARD_SHIFT)
HAL_DATASPACE_TRANSFER_SHIFT = 22,
HAL_DATASPACE_TRANSFER_MASK = 130023424, // (31 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_UNSPECIFIED = 0, // (0 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_LINEAR = 4194304, // (1 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_SRGB = 8388608, // (2 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_SMPTE_170M = 12582912, // (3 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_GAMMA2_2 = 16777216, // (4 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_GAMMA2_6 = 20971520, // (5 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_GAMMA2_8 = 25165824, // (6 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_ST2084 = 29360128, // (7 << TRANSFER_SHIFT)
HAL_DATASPACE_TRANSFER_HLG = 33554432, // (8 << TRANSFER_SHIFT)
HAL_DATASPACE_RANGE_SHIFT = 27,
HAL_DATASPACE_RANGE_MASK = 939524096, // (7 << RANGE_SHIFT)
HAL_DATASPACE_RANGE_UNSPECIFIED = 0, // (0 << RANGE_SHIFT)
HAL_DATASPACE_RANGE_FULL = 134217728, // (1 << RANGE_SHIFT)
HAL_DATASPACE_RANGE_LIMITED = 268435456, // (2 << RANGE_SHIFT)
HAL_DATASPACE_RANGE_EXTENDED = 402653184, // (3 << RANGE_SHIFT)
HAL_DATASPACE_SRGB_LINEAR = 512,
HAL_DATASPACE_V0_SRGB_LINEAR = 138477568, // ((STANDARD_BT709 | TRANSFER_LINEAR) | RANGE_FULL)
HAL_DATASPACE_V0_SCRGB_LINEAR =
406913024, // ((STANDARD_BT709 | TRANSFER_LINEAR) | RANGE_EXTENDED)
HAL_DATASPACE_SRGB = 513,
HAL_DATASPACE_V0_SRGB = 142671872, // ((STANDARD_BT709 | TRANSFER_SRGB) | RANGE_FULL)
HAL_DATASPACE_V0_SCRGB = 411107328, // ((STANDARD_BT709 | TRANSFER_SRGB) | RANGE_EXTENDED)
HAL_DATASPACE_JFIF = 257,
HAL_DATASPACE_V0_JFIF = 146931712, // ((STANDARD_BT601_625 | TRANSFER_SMPTE_170M) | RANGE_FULL)
HAL_DATASPACE_BT601_625 = 258,
HAL_DATASPACE_V0_BT601_625 =
281149440, // ((STANDARD_BT601_625 | TRANSFER_SMPTE_170M) | RANGE_LIMITED)
HAL_DATASPACE_BT601_525 = 259,
HAL_DATASPACE_V0_BT601_525 =
281280512, // ((STANDARD_BT601_525 | TRANSFER_SMPTE_170M) | RANGE_LIMITED)
HAL_DATASPACE_BT709 = 260,
HAL_DATASPACE_V0_BT709 = 281083904, // ((STANDARD_BT709 | TRANSFER_SMPTE_170M) | RANGE_LIMITED)
HAL_DATASPACE_DCI_P3_LINEAR = 139067392, // ((STANDARD_DCI_P3 | TRANSFER_LINEAR) | RANGE_FULL)
HAL_DATASPACE_DCI_P3 = 155844608, // ((STANDARD_DCI_P3 | TRANSFER_GAMMA2_6) | RANGE_FULL)
HAL_DATASPACE_DISPLAY_P3_LINEAR =
139067392, // ((STANDARD_DCI_P3 | TRANSFER_LINEAR) | RANGE_FULL)
HAL_DATASPACE_DISPLAY_P3 = 143261696, // ((STANDARD_DCI_P3 | TRANSFER_SRGB) | RANGE_FULL)
HAL_DATASPACE_ADOBE_RGB = 151715840, // ((STANDARD_ADOBE_RGB | TRANSFER_GAMMA2_2) | RANGE_FULL)
HAL_DATASPACE_BT2020_LINEAR = 138805248, // ((STANDARD_BT2020 | TRANSFER_LINEAR) | RANGE_FULL)
HAL_DATASPACE_BT2020 = 147193856, // ((STANDARD_BT2020 | TRANSFER_SMPTE_170M) | RANGE_FULL)
HAL_DATASPACE_BT2020_PQ = 163971072, // ((STANDARD_BT2020 | TRANSFER_ST2084) | RANGE_FULL)
HAL_DATASPACE_DEPTH = 4096,
HAL_DATASPACE_SENSOR = 4097,
} android_dataspace_t;
typedef enum {
HAL_COLOR_MODE_NATIVE = 0,
HAL_COLOR_MODE_STANDARD_BT601_625 = 1,
HAL_COLOR_MODE_STANDARD_BT601_625_UNADJUSTED = 2,
HAL_COLOR_MODE_STANDARD_BT601_525 = 3,
HAL_COLOR_MODE_STANDARD_BT601_525_UNADJUSTED = 4,
HAL_COLOR_MODE_STANDARD_BT709 = 5,
HAL_COLOR_MODE_DCI_P3 = 6,
HAL_COLOR_MODE_SRGB = 7,
HAL_COLOR_MODE_ADOBE_RGB = 8,
HAL_COLOR_MODE_DISPLAY_P3 = 9,
} android_color_mode_t;
typedef enum {
HAL_COLOR_TRANSFORM_IDENTITY = 0,
HAL_COLOR_TRANSFORM_ARBITRARY_MATRIX = 1,
HAL_COLOR_TRANSFORM_VALUE_INVERSE = 2,
HAL_COLOR_TRANSFORM_GRAYSCALE = 3,
HAL_COLOR_TRANSFORM_CORRECT_PROTANOPIA = 4,
HAL_COLOR_TRANSFORM_CORRECT_DEUTERANOPIA = 5,
HAL_COLOR_TRANSFORM_CORRECT_TRITANOPIA = 6,
} android_color_transform_t;
typedef enum {
HAL_HDR_DOLBY_VISION = 1,
HAL_HDR_HDR10 = 2,
HAL_HDR_HLG = 3,
} android_hdr_t;
#ifdef __cplusplus
}
#endif
#endif // HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_0_EXPORTED_CONSTANTS_H_
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/graphics-base-v1.1.h | /* SPDX-License-Identifier: Apache-2.0 */
// This file is autogenerated by hidl-gen. Do not edit manually.
// Source: [email protected]
// Location: hardware/interfaces/graphics/common/1.1/
#ifndef HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_
#define HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
HAL_PIXEL_FORMAT_DEPTH_16 = 48,
HAL_PIXEL_FORMAT_DEPTH_24 = 49,
HAL_PIXEL_FORMAT_DEPTH_24_STENCIL_8 = 50,
HAL_PIXEL_FORMAT_DEPTH_32F = 51,
HAL_PIXEL_FORMAT_DEPTH_32F_STENCIL_8 = 52,
HAL_PIXEL_FORMAT_STENCIL_8 = 53,
HAL_PIXEL_FORMAT_YCBCR_P010 = 54,
} android_pixel_format_v1_1_t;
typedef enum {
HAL_DATASPACE_BT2020_ITU =
281411584, // ((STANDARD_BT2020 | TRANSFER_SMPTE_170M) | RANGE_LIMITED)
HAL_DATASPACE_BT2020_ITU_PQ =
298188800, // ((STANDARD_BT2020 | TRANSFER_ST2084) | RANGE_LIMITED)
HAL_DATASPACE_BT2020_ITU_HLG = 302383104, // ((STANDARD_BT2020 | TRANSFER_HLG) | RANGE_LIMITED)
HAL_DATASPACE_BT2020_HLG = 168165376, // ((STANDARD_BT2020 | TRANSFER_HLG) | RANGE_FULL)
} android_dataspace_v1_1_t;
typedef enum {
HAL_COLOR_MODE_BT2020 = 10,
HAL_COLOR_MODE_BT2100_PQ = 11,
HAL_COLOR_MODE_BT2100_HLG = 12,
} android_color_mode_v1_1_t;
typedef enum {
HAL_RENDER_INTENT_COLORIMETRIC = 0,
HAL_RENDER_INTENT_ENHANCE = 1,
HAL_RENDER_INTENT_TONE_MAP_COLORIMETRIC = 2,
HAL_RENDER_INTENT_TONE_MAP_ENHANCE = 3,
} android_render_intent_v1_1_t;
#ifdef __cplusplus
}
#endif
#endif // HIDL_GENERATED_ANDROID_HARDWARE_GRAPHICS_COMMON_V1_1_EXPORTED_CONSTANTS_H_
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/system/graphics.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H
#define SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H
#include <stddef.h>
#include <stdint.h>
/*
* Some of the enums are now defined in HIDL in hardware/interfaces and are
* generated.
*/
#include "graphics-base.h"
#include "graphics-sw.h"
#ifdef __cplusplus
extern "C" {
#endif
/* for compatibility */
#define HAL_PIXEL_FORMAT_YCbCr_420_888 HAL_PIXEL_FORMAT_YCBCR_420_888
#define HAL_PIXEL_FORMAT_YCbCr_422_SP HAL_PIXEL_FORMAT_YCBCR_422_SP
#define HAL_PIXEL_FORMAT_YCrCb_420_SP HAL_PIXEL_FORMAT_YCRCB_420_SP
#define HAL_PIXEL_FORMAT_YCbCr_422_I HAL_PIXEL_FORMAT_YCBCR_422_I
typedef android_pixel_format_t android_pixel_format;
typedef android_transform_t android_transform;
typedef android_dataspace_t android_dataspace;
typedef android_color_mode_t android_color_mode;
typedef android_color_transform_t android_color_transform;
typedef android_hdr_t android_hdr;
/*
* If the HAL needs to create service threads to handle graphics related
* tasks, these threads need to run at HAL_PRIORITY_URGENT_DISPLAY priority
* if they can block the main rendering thread in any way.
*
* the priority of the current thread can be set with:
*
* #include <sys/resource.h>
* setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
*
*/
#define HAL_PRIORITY_URGENT_DISPLAY (-8)
/*
* Structure for describing YCbCr formats for consumption by applications.
* This is used with HAL_PIXEL_FORMAT_YCbCr_*_888.
*
* Buffer chroma subsampling is defined in the format.
* e.g. HAL_PIXEL_FORMAT_YCbCr_420_888 has subsampling 4:2:0.
*
* Buffers must have a 8 bit depth.
*
* y, cb, and cr point to the first byte of their respective planes.
*
* Stride describes the distance in bytes from the first value of one row of
* the image to the first value of the next row. It includes the width of the
* image plus padding.
* ystride is the stride of the luma plane.
* cstride is the stride of the chroma planes.
*
* chroma_step is the distance in bytes from one chroma pixel value to the
* next. This is 2 bytes for semiplanar (because chroma values are interleaved
* and each chroma value is one byte) and 1 for planar.
*/
struct android_ycbcr {
void *y;
void *cb;
void *cr;
size_t ystride;
size_t cstride;
size_t chroma_step;
/** reserved for future use, set to 0 by gralloc's (*lock_ycbcr)() */
uint32_t reserved[8];
};
/*
* Structures for describing flexible YUVA/RGBA formats for consumption by
* applications. Such flexible formats contain a plane for each component (e.g.
* red, green, blue), where each plane is laid out in a grid-like pattern
* occupying unique byte addresses and with consistent byte offsets between
* neighboring pixels.
*
* The android_flex_layout structure is used with any pixel format that can be
* represented by it, such as:
* - HAL_PIXEL_FORMAT_YCbCr_*_888
* - HAL_PIXEL_FORMAT_FLEX_RGB*_888
* - HAL_PIXEL_FORMAT_RGB[AX]_888[8],BGRA_8888,RGB_888
* - HAL_PIXEL_FORMAT_YV12,Y8,Y16,YCbCr_422_SP/I,YCrCb_420_SP
* - even implementation defined formats that can be represented by
* the structures
*
* Vertical increment (aka. row increment or stride) describes the distance in
* bytes from the first pixel of one row to the first pixel of the next row
* (below) for the component plane. This can be negative.
*
* Horizontal increment (aka. column or pixel increment) describes the distance
* in bytes from one pixel to the next pixel (to the right) on the same row for
* the component plane. This can be negative.
*
* Each plane can be subsampled either vertically or horizontally by
* a power-of-two factor.
*
* The bit-depth of each component can be arbitrary, as long as the pixels are
* laid out on whole bytes, in native byte-order, using the most significant
* bits of each unit.
*/
typedef enum android_flex_component {
/* luma */
FLEX_COMPONENT_Y = 1 << 0,
/* chroma blue */
FLEX_COMPONENT_Cb = 1 << 1,
/* chroma red */
FLEX_COMPONENT_Cr = 1 << 2,
/* red */
FLEX_COMPONENT_R = 1 << 10,
/* green */
FLEX_COMPONENT_G = 1 << 11,
/* blue */
FLEX_COMPONENT_B = 1 << 12,
/* alpha */
FLEX_COMPONENT_A = 1 << 30,
} android_flex_component_t;
typedef struct android_flex_plane {
/* pointer to the first byte of the top-left pixel of the plane. */
uint8_t *top_left;
android_flex_component_t component;
/* bits allocated for the component in each pixel. Must be a positive
multiple of 8. */
int32_t bits_per_component;
/* number of the most significant bits used in the format for this
component. Must be between 1 and bits_per_component, inclusive. */
int32_t bits_used;
/* horizontal increment */
int32_t h_increment;
/* vertical increment */
int32_t v_increment;
/* horizontal subsampling. Must be a positive power of 2. */
int32_t h_subsampling;
/* vertical subsampling. Must be a positive power of 2. */
int32_t v_subsampling;
} android_flex_plane_t;
typedef enum android_flex_format {
/* not a flexible format */
FLEX_FORMAT_INVALID = 0x0,
FLEX_FORMAT_Y = FLEX_COMPONENT_Y,
FLEX_FORMAT_YCbCr = FLEX_COMPONENT_Y | FLEX_COMPONENT_Cb | FLEX_COMPONENT_Cr,
FLEX_FORMAT_YCbCrA = FLEX_FORMAT_YCbCr | FLEX_COMPONENT_A,
FLEX_FORMAT_RGB = FLEX_COMPONENT_R | FLEX_COMPONENT_G | FLEX_COMPONENT_B,
FLEX_FORMAT_RGBA = FLEX_FORMAT_RGB | FLEX_COMPONENT_A,
} android_flex_format_t;
typedef struct android_flex_layout {
/* the kind of flexible format */
android_flex_format_t format;
/* number of planes; 0 for FLEX_FORMAT_INVALID */
uint32_t num_planes;
/* a plane for each component; ordered in increasing component value order.
E.g. FLEX_FORMAT_RGBA maps 0 -> R, 1 -> G, etc.
Can be NULL for FLEX_FORMAT_INVALID */
android_flex_plane_t *planes;
} android_flex_layout_t;
/**
* Structure used to define depth point clouds for format HAL_PIXEL_FORMAT_BLOB
* with dataSpace value of HAL_DATASPACE_DEPTH.
* When locking a native buffer of the above format and dataSpace value,
* the vaddr pointer can be cast to this structure.
*
* A variable-length list of (x,y,z, confidence) 3D points, as floats. (x, y,
* z) represents a measured point's position, with the coordinate system defined
* by the data source. Confidence represents the estimated likelihood that this
* measurement is correct. It is between 0.f and 1.f, inclusive, with 1.f ==
* 100% confidence.
*
* num_points is the number of points in the list
*
* xyz_points is the flexible array of floating-point values.
* It contains (num_points) * 4 floats.
*
* For example:
* android_depth_points d = get_depth_buffer();
* struct {
* float x; float y; float z; float confidence;
* } firstPoint, lastPoint;
*
* firstPoint.x = d.xyzc_points[0];
* firstPoint.y = d.xyzc_points[1];
* firstPoint.z = d.xyzc_points[2];
* firstPoint.confidence = d.xyzc_points[3];
* lastPoint.x = d.xyzc_points[(d.num_points - 1) * 4 + 0];
* lastPoint.y = d.xyzc_points[(d.num_points - 1) * 4 + 1];
* lastPoint.z = d.xyzc_points[(d.num_points - 1) * 4 + 2];
* lastPoint.confidence = d.xyzc_points[(d.num_points - 1) * 4 + 3];
*/
struct android_depth_points {
uint32_t num_points;
/** reserved for future use, set to 0 by gralloc's (*lock)() */
uint32_t reserved[8];
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc99-extensions"
#endif
float xyzc_points[];
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
};
/**
* These structures are used to define the reference display's
* capabilities for HDR content. Display engine can use this
* to better tone map content to user's display.
* Color is defined in CIE XYZ coordinates
*/
struct android_xy_color {
float x;
float y;
};
struct android_smpte2086_metadata {
struct android_xy_color displayPrimaryRed;
struct android_xy_color displayPrimaryGreen;
struct android_xy_color displayPrimaryBlue;
struct android_xy_color whitePoint;
float maxLuminance;
float minLuminance;
};
struct android_cta861_3_metadata {
float maxContentLightLevel;
float maxFrameAverageLightLevel;
};
#ifdef __cplusplus
}
#endif
#endif /* SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H */
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/cutils/compiler.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_CUTILS_COMPILER_H
#define ANDROID_CUTILS_COMPILER_H
/*
* helps the compiler's optimizer predicting branches
*/
#ifdef __cplusplus
# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), true ))
# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), false ))
#else
# define CC_LIKELY( exp ) (__builtin_expect( !!(exp), 1 ))
# define CC_UNLIKELY( exp ) (__builtin_expect( !!(exp), 0 ))
#endif
/**
* exports marked symbols
*
* if used on a C++ class declaration, this macro must be inserted
* after the "class" keyword. For instance:
*
* template <typename TYPE>
* class ANDROID_API Singleton { }
*/
#define ANDROID_API __attribute__((visibility("default")))
#endif // ANDROID_CUTILS_COMPILER_H
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/cutils/native_handle.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef NATIVE_HANDLE_H_
#define NATIVE_HANDLE_H_
#include <stdalign.h>
#ifdef __cplusplus
extern "C" {
#endif
#define NATIVE_HANDLE_MAX_FDS 1024
#define NATIVE_HANDLE_MAX_INTS 1024
/* Declare a char array for use with native_handle_init */
#define NATIVE_HANDLE_DECLARE_STORAGE(name, maxFds, maxInts) \
alignas(native_handle_t) char (name)[ \
sizeof(native_handle_t) + sizeof(int) * ((maxFds) + (maxInts))]
typedef struct native_handle
{
int version; /* sizeof(native_handle_t) */
int numFds; /* number of file-descriptors at &data[0] */
int numInts; /* number of ints at &data[numFds] */
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wzero-length-array"
#endif
int data[0]; /* numFds + numInts ints */
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
} native_handle_t;
typedef const native_handle_t* buffer_handle_t;
/*
* native_handle_close
*
* closes the file descriptors contained in this native_handle_t
*
* return 0 on success, or a negative error code on failure
*
*/
int native_handle_close(const native_handle_t* h);
/*
* native_handle_init
*
* Initializes a native_handle_t from storage. storage must be declared with
* NATIVE_HANDLE_DECLARE_STORAGE. numFds and numInts must not respectively
* exceed maxFds and maxInts used to declare the storage.
*/
native_handle_t* native_handle_init(char* storage, int numFds, int numInts);
/*
* native_handle_create
*
* creates a native_handle_t and initializes it. must be destroyed with
* native_handle_delete().
*
*/
native_handle_t* native_handle_create(int numFds, int numInts);
/*
* native_handle_clone
*
* creates a native_handle_t and initializes it from another native_handle_t.
* Must be destroyed with native_handle_delete().
*
*/
native_handle_t* native_handle_clone(const native_handle_t* handle);
/*
* native_handle_delete
*
* frees a native_handle_t allocated with native_handle_create().
* This ONLY frees the memory allocated for the native_handle_t, but doesn't
* close the file descriptors; which can be achieved with native_handle_close().
*
* return 0 on success, or a negative error code on failure
*
*/
int native_handle_delete(native_handle_t* h);
#ifdef __cplusplus
}
#endif
#endif /* NATIVE_HANDLE_H_ */
|
0 | repos/libcamera/include/android/system/core/include | repos/libcamera/include/android/system/core/include/android/log.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _ANDROID_LOG_H
#define _ANDROID_LOG_H
/******************************************************************
*
* IMPORTANT NOTICE:
*
* This file is part of Android's set of stable system headers
* exposed by the Android NDK (Native Development Kit) since
* platform release 1.5
*
* Third-party source AND binary code relies on the definitions
* here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES.
*
* - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES)
* - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS
* - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY
* - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES
*/
/*
* Support routines to send messages to the Android in-kernel log buffer,
* which can later be accessed through the 'logcat' utility.
*
* Each log message must have
* - a priority
* - a log tag
* - some text
*
* The tag normally corresponds to the component that emits the log message,
* and should be reasonably small.
*
* Log message text may be truncated to less than an implementation-specific
* limit (e.g. 1023 characters max).
*
* Note that a newline character ("\n") will be appended automatically to your
* log message, if not already there. It is not possible to send several messages
* and have them appear on a single line in logcat.
*
* PLEASE USE LOGS WITH MODERATION:
*
* - Sending log messages eats CPU and slow down your application and the
* system.
*
* - The circular log buffer is pretty small (<64KB), sending many messages
* might push off other important log messages from the rest of the system.
*
* - In release builds, only send log messages to account for exceptional
* conditions.
*
* NOTE: These functions MUST be implemented by /system/lib/liblog.so
*/
#include <stdarg.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Android log priority values, in ascending priority order.
*/
typedef enum android_LogPriority {
ANDROID_LOG_UNKNOWN = 0,
ANDROID_LOG_DEFAULT, /* only for SetMinPriority() */
ANDROID_LOG_VERBOSE,
ANDROID_LOG_DEBUG,
ANDROID_LOG_INFO,
ANDROID_LOG_WARN,
ANDROID_LOG_ERROR,
ANDROID_LOG_FATAL,
ANDROID_LOG_SILENT, /* only for SetMinPriority(); must be last */
} android_LogPriority;
/*
* Send a simple string to the log.
*/
int __android_log_write(int prio, const char *tag, const char *text);
/*
* Send a formatted string to the log, used like printf(fmt,...)
*/
int __android_log_print(int prio, const char *tag, const char *fmt, ...)
#if defined(__GNUC__)
#ifdef __USE_MINGW_ANSI_STDIO
#if __USE_MINGW_ANSI_STDIO
__attribute__ ((format(gnu_printf, 3, 4)))
#else
__attribute__ ((format(printf, 3, 4)))
#endif
#else
__attribute__ ((format(printf, 3, 4)))
#endif
#endif
;
/*
* A variant of __android_log_print() that takes a va_list to list
* additional parameters.
*/
int __android_log_vprint(int prio, const char *tag,
const char *fmt, va_list ap);
/*
* Log an assertion failure and abort the process to have a chance
* to inspect it if a debugger is attached. This uses the FATAL priority.
*/
void __android_log_assert(const char *cond, const char *tag,
const char *fmt, ...)
#if defined(__GNUC__)
__attribute__ ((noreturn))
#ifdef __USE_MINGW_ANSI_STDIO
#if __USE_MINGW_ANSI_STDIO
__attribute__ ((format(gnu_printf, 3, 4)))
#else
__attribute__ ((format(printf, 3, 4)))
#endif
#else
__attribute__ ((format(printf, 3, 4)))
#endif
#endif
;
#ifdef __cplusplus
}
#endif
#endif /* _ANDROID_LOG_H */
|
0 | repos/libcamera/include/android/hardware/libhardware/include | repos/libcamera/include/android/hardware/libhardware/include/hardware/hardware.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_INCLUDE_HARDWARE_HARDWARE_H
#define ANDROID_INCLUDE_HARDWARE_HARDWARE_H
#include <stdint.h>
#include <sys/cdefs.h>
#include <cutils/native_handle.h>
#include <system/graphics.h>
__BEGIN_DECLS
/*
* Value for the hw_module_t.tag field
*/
#define MAKE_TAG_CONSTANT(A,B,C,D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D))
#define HARDWARE_MODULE_TAG MAKE_TAG_CONSTANT('H', 'W', 'M', 'T')
#define HARDWARE_DEVICE_TAG MAKE_TAG_CONSTANT('H', 'W', 'D', 'T')
#define HARDWARE_MAKE_API_VERSION(maj,min) \
((((maj) & 0xff) << 8) | ((min) & 0xff))
#define HARDWARE_MAKE_API_VERSION_2(maj,min,hdr) \
((((maj) & 0xff) << 24) | (((min) & 0xff) << 16) | ((hdr) & 0xffff))
#define HARDWARE_API_VERSION_2_MAJ_MIN_MASK 0xffff0000
#define HARDWARE_API_VERSION_2_HEADER_MASK 0x0000ffff
/*
* The current HAL API version.
*
* All module implementations must set the hw_module_t.hal_api_version field
* to this value when declaring the module with HAL_MODULE_INFO_SYM.
*
* Note that previous implementations have always set this field to 0.
* Therefore, libhardware HAL API will always consider versions 0.0 and 1.0
* to be 100% binary compatible.
*
*/
#define HARDWARE_HAL_API_VERSION HARDWARE_MAKE_API_VERSION(1, 0)
/*
* Helper macros for module implementors.
*
* The derived modules should provide convenience macros for supported
* versions so that implementations can explicitly specify module/device
* versions at definition time.
*
* Use this macro to set the hw_module_t.module_api_version field.
*/
#define HARDWARE_MODULE_API_VERSION(maj,min) HARDWARE_MAKE_API_VERSION(maj,min)
#define HARDWARE_MODULE_API_VERSION_2(maj,min,hdr) HARDWARE_MAKE_API_VERSION_2(maj,min,hdr)
/*
* Use this macro to set the hw_device_t.version field
*/
#define HARDWARE_DEVICE_API_VERSION(maj,min) HARDWARE_MAKE_API_VERSION(maj,min)
#define HARDWARE_DEVICE_API_VERSION_2(maj,min,hdr) HARDWARE_MAKE_API_VERSION_2(maj,min,hdr)
struct hw_module_t;
struct hw_module_methods_t;
struct hw_device_t;
/**
* Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
* and the fields of this data structure must begin with hw_module_t
* followed by module specific information.
*/
typedef struct hw_module_t {
/** tag must be initialized to HARDWARE_MODULE_TAG */
uint32_t tag;
/**
* The API version of the implemented module. The module owner is
* responsible for updating the version when a module interface has
* changed.
*
* The derived modules such as gralloc and audio own and manage this field.
* The module user must interpret the version field to decide whether or
* not to inter-operate with the supplied module implementation.
* For example, SurfaceFlinger is responsible for making sure that
* it knows how to manage different versions of the gralloc-module API,
* and AudioFlinger must know how to do the same for audio-module API.
*
* The module API version should include a major and a minor component.
* For example, version 1.0 could be represented as 0x0100. This format
* implies that versions 0x0100-0x01ff are all API-compatible.
*
* In the future, libhardware will expose a hw_get_module_version()
* (or equivalent) function that will take minimum/maximum supported
* versions as arguments and would be able to reject modules with
* versions outside of the supplied range.
*/
uint16_t module_api_version;
#define version_major module_api_version
/**
* version_major/version_minor defines are supplied here for temporary
* source code compatibility. They will be removed in the next version.
* ALL clients must convert to the new version format.
*/
/**
* The API version of the HAL module interface. This is meant to
* version the hw_module_t, hw_module_methods_t, and hw_device_t
* structures and definitions.
*
* The HAL interface owns this field. Module users/implementations
* must NOT rely on this value for version information.
*
* Presently, 0 is the only valid value.
*/
uint16_t hal_api_version;
#define version_minor hal_api_version
/** Identifier of module */
const char *id;
/** Name of this module */
const char *name;
/** Author/owner/implementor of the module */
const char *author;
/** Modules methods */
struct hw_module_methods_t* methods;
/** module's dso */
void* dso;
#ifdef __LP64__
uint64_t reserved[32-7];
#else
/** padding to 128 bytes, reserved for future use */
uint32_t reserved[32-7];
#endif
} hw_module_t;
typedef struct hw_module_methods_t {
/** Open a specific device */
int (*open)(const struct hw_module_t* module, const char* id,
struct hw_device_t** device);
} hw_module_methods_t;
/**
* Every device data structure must begin with hw_device_t
* followed by module specific public methods and attributes.
*/
typedef struct hw_device_t {
/** tag must be initialized to HARDWARE_DEVICE_TAG */
uint32_t tag;
/**
* Version of the module-specific device API. This value is used by
* the derived-module user to manage different device implementations.
*
* The module user is responsible for checking the module_api_version
* and device version fields to ensure that the user is capable of
* communicating with the specific module implementation.
*
* One module can support multiple devices with different versions. This
* can be useful when a device interface changes in an incompatible way
* but it is still necessary to support older implementations at the same
* time. One such example is the Camera 2.0 API.
*
* This field is interpreted by the module user and is ignored by the
* HAL interface itself.
*/
uint32_t version;
/** reference to the module this device belongs to */
struct hw_module_t* module;
/** padding reserved for future use */
#ifdef __LP64__
uint64_t reserved[12];
#else
uint32_t reserved[12];
#endif
/** Close this device */
int (*close)(struct hw_device_t* device);
} hw_device_t;
#ifdef __cplusplus
#define TO_HW_DEVICE_T_OPEN(x) reinterpret_cast<struct hw_device_t**>(x)
#else
#define TO_HW_DEVICE_T_OPEN(x) (struct hw_device_t**)(x)
#endif
/**
* Name of the hal_module_info
*/
#define HAL_MODULE_INFO_SYM HMI
/**
* Name of the hal_module_info as a string
*/
#define HAL_MODULE_INFO_SYM_AS_STR "HMI"
/**
* Get the module info associated with a module by id.
*
* @return: 0 == success, <0 == error and *module == NULL
*/
int hw_get_module(const char *id, const struct hw_module_t **module);
/**
* Get the module info associated with a module instance by class 'class_id'
* and instance 'inst'.
*
* Some modules types necessitate multiple instances. For example audio supports
* multiple concurrent interfaces and thus 'audio' is the module class
* and 'primary' or 'a2dp' are module interfaces. This implies that the files
* providing these modules would be named audio.primary.<variant>.so and
* audio.a2dp.<variant>.so
*
* @return: 0 == success, <0 == error and *module == NULL
*/
int hw_get_module_by_class(const char *class_id, const char *inst,
const struct hw_module_t **module);
__END_DECLS
#endif /* ANDROID_INCLUDE_HARDWARE_HARDWARE_H */
|
0 | repos/libcamera/include/android/hardware/libhardware/include | repos/libcamera/include/android/hardware/libhardware/include/hardware/fb.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_FB_INTERFACE_H
#define ANDROID_FB_INTERFACE_H
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <cutils/native_handle.h>
#include <hardware/hardware.h>
__BEGIN_DECLS
#define GRALLOC_HARDWARE_FB0 "fb0"
/*****************************************************************************/
/*****************************************************************************/
typedef struct framebuffer_device_t {
/**
* Common methods of the framebuffer device. This *must* be the first member of
* framebuffer_device_t as users of this structure will cast a hw_device_t to
* framebuffer_device_t pointer in contexts where it's known the hw_device_t references a
* framebuffer_device_t.
*/
struct hw_device_t common;
/* flags describing some attributes of the framebuffer */
const uint32_t flags;
/* dimensions of the framebuffer in pixels */
const uint32_t width;
const uint32_t height;
/* frambuffer stride in pixels */
const int stride;
/* framebuffer pixel format */
const int format;
/* resolution of the framebuffer's display panel in pixel per inch*/
const float xdpi;
const float ydpi;
/* framebuffer's display panel refresh rate in frames per second */
const float fps;
/* min swap interval supported by this framebuffer */
const int minSwapInterval;
/* max swap interval supported by this framebuffer */
const int maxSwapInterval;
/* Number of framebuffers supported*/
const int numFramebuffers;
int reserved[7];
/*
* requests a specific swap-interval (same definition than EGL)
*
* Returns 0 on success or -errno on error.
*/
int (*setSwapInterval)(struct framebuffer_device_t* window,
int interval);
/*
* This hook is OPTIONAL.
*
* It is non NULL If the framebuffer driver supports "update-on-demand"
* and the given rectangle is the area of the screen that gets
* updated during (*post)().
*
* This is useful on devices that are able to DMA only a portion of
* the screen to the display panel, upon demand -- as opposed to
* constantly refreshing the panel 60 times per second, for instance.
*
* Only the area defined by this rectangle is guaranteed to be valid, that
* is, the driver is not allowed to post anything outside of this
* rectangle.
*
* The rectangle evaluated during (*post)() and specifies which area
* of the buffer passed in (*post)() shall to be posted.
*
* return -EINVAL if width or height <=0, or if left or top < 0
*/
int (*setUpdateRect)(struct framebuffer_device_t* window,
int left, int top, int width, int height);
/*
* Post <buffer> to the display (display it on the screen)
* The buffer must have been allocated with the
* GRALLOC_USAGE_HW_FB usage flag.
* buffer must be the same width and height as the display and must NOT
* be locked.
*
* The buffer is shown during the next VSYNC.
*
* If the same buffer is posted again (possibly after some other buffer),
* post() will block until the the first post is completed.
*
* Internally, post() is expected to lock the buffer so that a
* subsequent call to gralloc_module_t::(*lock)() with USAGE_RENDER or
* USAGE_*_WRITE will block until it is safe; that is typically once this
* buffer is shown and another buffer has been posted.
*
* Returns 0 on success or -errno on error.
*/
int (*post)(struct framebuffer_device_t* dev, buffer_handle_t buffer);
/*
* The (*compositionComplete)() method must be called after the
* compositor has finished issuing GL commands for client buffers.
*/
int (*compositionComplete)(struct framebuffer_device_t* dev);
/*
* This hook is OPTIONAL.
*
* If non NULL it will be caused by SurfaceFlinger on dumpsys
*/
void (*dump)(struct framebuffer_device_t* dev, char *buff, int buff_len);
/*
* (*enableScreen)() is used to either blank (enable=0) or
* unblank (enable=1) the screen this framebuffer is attached to.
*
* Returns 0 on success or -errno on error.
*/
int (*enableScreen)(struct framebuffer_device_t* dev, int enable);
void* reserved_proc[6];
} framebuffer_device_t;
/** convenience API for opening and closing a supported device */
static inline int framebuffer_open(const struct hw_module_t* module,
struct framebuffer_device_t** device) {
return module->methods->open(module,
GRALLOC_HARDWARE_FB0, TO_HW_DEVICE_T_OPEN(device));
}
static inline int framebuffer_close(struct framebuffer_device_t* device) {
return device->common.close(&device->common);
}
__END_DECLS
#endif // ANDROID_FB_INTERFACE_H
|
0 | repos/libcamera/include/android/hardware/libhardware/include | repos/libcamera/include/android/hardware/libhardware/include/hardware/gralloc.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_GRALLOC_INTERFACE_H
#define ANDROID_GRALLOC_INTERFACE_H
#include <hardware/hardware.h>
#include <system/graphics.h>
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <cutils/native_handle.h>
#include <hardware/fb.h>
#include <hardware/hardware.h>
__BEGIN_DECLS
/**
* Module versioning information for the Gralloc hardware module, based on
* gralloc_module_t.common.module_api_version.
*
* Version History:
*
* GRALLOC_MODULE_API_VERSION_0_1:
* Initial Gralloc hardware module API.
*
* GRALLOC_MODULE_API_VERSION_0_2:
* Add support for flexible YCbCr format with (*lock_ycbcr)() method.
*
* GRALLOC_MODULE_API_VERSION_0_3:
* Add support for fence passing to/from lock/unlock.
*/
#define GRALLOC_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1)
#define GRALLOC_MODULE_API_VERSION_0_2 HARDWARE_MODULE_API_VERSION(0, 2)
#define GRALLOC_MODULE_API_VERSION_0_3 HARDWARE_MODULE_API_VERSION(0, 3)
#define GRALLOC_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1)
/**
* The id of this module
*/
#define GRALLOC_HARDWARE_MODULE_ID "gralloc"
/**
* Name of the graphics device to open
*/
#define GRALLOC_HARDWARE_GPU0 "gpu0"
enum {
/* buffer is never read in software */
GRALLOC_USAGE_SW_READ_NEVER = 0x00000000U,
/* buffer is rarely read in software */
GRALLOC_USAGE_SW_READ_RARELY = 0x00000002U,
/* buffer is often read in software */
GRALLOC_USAGE_SW_READ_OFTEN = 0x00000003U,
/* mask for the software read values */
GRALLOC_USAGE_SW_READ_MASK = 0x0000000FU,
/* buffer is never written in software */
GRALLOC_USAGE_SW_WRITE_NEVER = 0x00000000U,
/* buffer is rarely written in software */
GRALLOC_USAGE_SW_WRITE_RARELY = 0x00000020U,
/* buffer is often written in software */
GRALLOC_USAGE_SW_WRITE_OFTEN = 0x00000030U,
/* mask for the software write values */
GRALLOC_USAGE_SW_WRITE_MASK = 0x000000F0U,
/* buffer will be used as an OpenGL ES texture */
GRALLOC_USAGE_HW_TEXTURE = 0x00000100U,
/* buffer will be used as an OpenGL ES render target */
GRALLOC_USAGE_HW_RENDER = 0x00000200U,
/* buffer will be used by the 2D hardware blitter */
GRALLOC_USAGE_HW_2D = 0x00000400U,
/* buffer will be used by the HWComposer HAL module */
GRALLOC_USAGE_HW_COMPOSER = 0x00000800U,
/* buffer will be used with the framebuffer device */
GRALLOC_USAGE_HW_FB = 0x00001000U,
/* buffer should be displayed full-screen on an external display when
* possible */
GRALLOC_USAGE_EXTERNAL_DISP = 0x00002000U,
/* Must have a hardware-protected path to external display sink for
* this buffer. If a hardware-protected path is not available, then
* either don't composite only this buffer (preferred) to the
* external sink, or (less desirable) do not route the entire
* composition to the external sink. */
GRALLOC_USAGE_PROTECTED = 0x00004000U,
/* buffer may be used as a cursor */
GRALLOC_USAGE_CURSOR = 0x00008000U,
/* buffer will be used with the HW video encoder */
GRALLOC_USAGE_HW_VIDEO_ENCODER = 0x00010000U,
/* buffer will be written by the HW camera pipeline */
GRALLOC_USAGE_HW_CAMERA_WRITE = 0x00020000U,
/* buffer will be read by the HW camera pipeline */
GRALLOC_USAGE_HW_CAMERA_READ = 0x00040000U,
/* buffer will be used as part of zero-shutter-lag queue */
GRALLOC_USAGE_HW_CAMERA_ZSL = 0x00060000U,
/* mask for the camera access values */
GRALLOC_USAGE_HW_CAMERA_MASK = 0x00060000U,
/* mask for the software usage bit-mask */
GRALLOC_USAGE_HW_MASK = 0x00071F00U,
/* buffer will be used as a RenderScript Allocation */
GRALLOC_USAGE_RENDERSCRIPT = 0x00100000U,
/* Set by the consumer to indicate to the producer that they may attach a
* buffer that they did not detach from the BufferQueue. Will be filtered
* out by GRALLOC_USAGE_ALLOC_MASK, so gralloc modules will not need to
* handle this flag. */
GRALLOC_USAGE_FOREIGN_BUFFERS = 0x00200000U,
/* Mask of all flags which could be passed to a gralloc module for buffer
* allocation. Any flags not in this mask do not need to be handled by
* gralloc modules. */
GRALLOC_USAGE_ALLOC_MASK = ~(GRALLOC_USAGE_FOREIGN_BUFFERS),
/* implementation-specific private usage flags */
GRALLOC_USAGE_PRIVATE_0 = 0x10000000U,
GRALLOC_USAGE_PRIVATE_1 = 0x20000000U,
GRALLOC_USAGE_PRIVATE_2 = 0x40000000U,
GRALLOC_USAGE_PRIVATE_3 = 0x80000000U,
GRALLOC_USAGE_PRIVATE_MASK = 0xF0000000U,
};
/*****************************************************************************/
/**
* Every hardware module must have a data structure named HAL_MODULE_INFO_SYM
* and the fields of this data structure must begin with hw_module_t
* followed by module specific information.
*/
typedef struct gralloc_module_t {
struct hw_module_t common;
/*
* (*registerBuffer)() must be called before a buffer_handle_t that has not
* been created with (*alloc_device_t::alloc)() can be used.
*
* This is intended to be used with buffer_handle_t's that have been
* received in this process through IPC.
*
* This function checks that the handle is indeed a valid one and prepares
* it for use with (*lock)() and (*unlock)().
*
* It is not necessary to call (*registerBuffer)() on a handle created
* with (*alloc_device_t::alloc)().
*
* returns an error if this buffer_handle_t is not valid.
*/
int (*registerBuffer)(struct gralloc_module_t const* module,
buffer_handle_t handle);
/*
* (*unregisterBuffer)() is called once this handle is no longer needed in
* this process. After this call, it is an error to call (*lock)(),
* (*unlock)(), or (*registerBuffer)().
*
* This function doesn't close or free the handle itself; this is done
* by other means, usually through libcutils's native_handle_close() and
* native_handle_free().
*
* It is an error to call (*unregisterBuffer)() on a buffer that wasn't
* explicitly registered first.
*/
int (*unregisterBuffer)(struct gralloc_module_t const* module,
buffer_handle_t handle);
/*
* The (*lock)() method is called before a buffer is accessed for the
* specified usage. This call may block, for instance if the h/w needs
* to finish rendering or if CPU caches need to be synchronized.
*
* The caller promises to modify only pixels in the area specified
* by (l,t,w,h).
*
* The content of the buffer outside of the specified area is NOT modified
* by this call.
*
* If usage specifies GRALLOC_USAGE_SW_*, vaddr is filled with the address
* of the buffer in virtual memory.
*
* Note calling (*lock)() on HAL_PIXEL_FORMAT_YCbCr_*_888 buffers will fail
* and return -EINVAL. These buffers must be locked with (*lock_ycbcr)()
* instead.
*
* THREADING CONSIDERATIONS:
*
* It is legal for several different threads to lock a buffer from
* read access, none of the threads are blocked.
*
* However, locking a buffer simultaneously for write or read/write is
* undefined, but:
* - shall not result in termination of the process
* - shall not block the caller
* It is acceptable to return an error or to leave the buffer's content
* into an indeterminate state.
*
* If the buffer was created with a usage mask incompatible with the
* requested usage flags here, -EINVAL is returned.
*
*/
int (*lock)(struct gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
void** vaddr);
/*
* The (*unlock)() method must be called after all changes to the buffer
* are completed.
*/
int (*unlock)(struct gralloc_module_t const* module,
buffer_handle_t handle);
/* reserved for future use */
int (*perform)(struct gralloc_module_t const* module,
int operation, ... );
/*
* The (*lock_ycbcr)() method is like the (*lock)() method, with the
* difference that it fills a struct ycbcr with a description of the buffer
* layout, and zeroes out the reserved fields.
*
* If the buffer format is not compatible with a flexible YUV format (e.g.
* the buffer layout cannot be represented with the ycbcr struct), it
* will return -EINVAL.
*
* This method must work on buffers with HAL_PIXEL_FORMAT_YCbCr_*_888
* if supported by the device, as well as with any other format that is
* requested by the multimedia codecs when they are configured with a
* flexible-YUV-compatible color-format with android native buffers.
*
* Note that this method may also be called on buffers of other formats,
* including non-YUV formats.
*
* Added in GRALLOC_MODULE_API_VERSION_0_2.
*/
int (*lock_ycbcr)(struct gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
struct android_ycbcr *ycbcr);
/*
* The (*lockAsync)() method is like the (*lock)() method except
* that the buffer's sync fence object is passed into the lock
* call instead of requiring the caller to wait for completion.
*
* The gralloc implementation takes ownership of the fenceFd and
* is responsible for closing it when no longer needed.
*
* Added in GRALLOC_MODULE_API_VERSION_0_3.
*/
int (*lockAsync)(struct gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
void** vaddr, int fenceFd);
/*
* The (*unlockAsync)() method is like the (*unlock)() method
* except that a buffer sync fence object is returned from the
* lock call, representing the completion of any pending work
* performed by the gralloc implementation.
*
* The caller takes ownership of the fenceFd and is responsible
* for closing it when no longer needed.
*
* Added in GRALLOC_MODULE_API_VERSION_0_3.
*/
int (*unlockAsync)(struct gralloc_module_t const* module,
buffer_handle_t handle, int* fenceFd);
/*
* The (*lockAsync_ycbcr)() method is like the (*lock_ycbcr)()
* method except that the buffer's sync fence object is passed
* into the lock call instead of requiring the caller to wait for
* completion.
*
* The gralloc implementation takes ownership of the fenceFd and
* is responsible for closing it when no longer needed.
*
* Added in GRALLOC_MODULE_API_VERSION_0_3.
*/
int (*lockAsync_ycbcr)(struct gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
struct android_ycbcr *ycbcr, int fenceFd);
/* reserved for future use */
void* reserved_proc[3];
} gralloc_module_t;
/*****************************************************************************/
/**
* Every device data structure must begin with hw_device_t
* followed by module specific public methods and attributes.
*/
typedef struct alloc_device_t {
struct hw_device_t common;
/*
* (*alloc)() Allocates a buffer in graphic memory with the requested
* parameters and returns a buffer_handle_t and the stride in pixels to
* allow the implementation to satisfy hardware constraints on the width
* of a pixmap (eg: it may have to be multiple of 8 pixels).
* The CALLER TAKES OWNERSHIP of the buffer_handle_t.
*
* If format is HAL_PIXEL_FORMAT_YCbCr_420_888, the returned stride must be
* 0, since the actual strides are available from the android_ycbcr
* structure.
*
* Returns 0 on success or -errno on error.
*/
int (*alloc)(struct alloc_device_t* dev,
int w, int h, int format, int usage,
buffer_handle_t* handle, int* stride);
/*
* (*free)() Frees a previously allocated buffer.
* Behavior is undefined if the buffer is still mapped in any process,
* but shall not result in termination of the program or security breaches
* (allowing a process to get access to another process' buffers).
* THIS FUNCTION TAKES OWNERSHIP of the buffer_handle_t which becomes
* invalid after the call.
*
* Returns 0 on success or -errno on error.
*/
int (*free)(struct alloc_device_t* dev,
buffer_handle_t handle);
/* This hook is OPTIONAL.
*
* If non NULL it will be caused by SurfaceFlinger on dumpsys
*/
void (*dump)(struct alloc_device_t *dev, char *buff, int buff_len);
void* reserved_proc[7];
} alloc_device_t;
/** convenience API for opening and closing a supported device */
static inline int gralloc_open(const struct hw_module_t* module,
struct alloc_device_t** device) {
return module->methods->open(module,
GRALLOC_HARDWARE_GPU0, TO_HW_DEVICE_T_OPEN(device));
}
static inline int gralloc_close(struct alloc_device_t* device) {
return device->common.close(&device->common);
}
/**
* map_usage_to_memtrack should be called after allocating a gralloc buffer.
*
* @param usage - it is the flag used when alloc function is called.
*
* This function maps the gralloc usage flags to appropriate memtrack bucket.
* GrallocHAL implementers and users should make an additional ION_IOCTL_TAG
* call using the memtrack tag returned by this function. This will help the
* in-kernel memtack to categorize the memory allocated by different processes
* according to their usage.
*
*/
static inline const char* map_usage_to_memtrack(uint32_t usage) {
usage &= GRALLOC_USAGE_ALLOC_MASK;
if ((usage & GRALLOC_USAGE_HW_CAMERA_WRITE) != 0) {
return "camera";
} else if ((usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) != 0 ||
(usage & GRALLOC_USAGE_EXTERNAL_DISP) != 0) {
return "video";
} else if ((usage & GRALLOC_USAGE_HW_RENDER) != 0 ||
(usage & GRALLOC_USAGE_HW_TEXTURE) != 0) {
return "gl";
} else if ((usage & GRALLOC_USAGE_HW_CAMERA_READ) != 0) {
return "camera";
} else if ((usage & GRALLOC_USAGE_SW_READ_MASK) != 0 ||
(usage & GRALLOC_USAGE_SW_WRITE_MASK) != 0) {
return "cpu";
}
return "graphics";
}
__END_DECLS
#endif // ANDROID_GRALLOC_INTERFACE_H
|
0 | repos/libcamera/include/android/hardware/libhardware/include | repos/libcamera/include/android/hardware/libhardware/include/hardware/camera_common.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// FIXME: add well-defined names for cameras
#ifndef ANDROID_INCLUDE_CAMERA_COMMON_H
#define ANDROID_INCLUDE_CAMERA_COMMON_H
#include <stdint.h>
#include <stdbool.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <cutils/native_handle.h>
#include <system/camera.h>
#include <system/camera_vendor_tags.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
__BEGIN_DECLS
/**
* The id of this module
*/
#define CAMERA_HARDWARE_MODULE_ID "camera"
/**
* Module versioning information for the Camera hardware module, based on
* camera_module_t.common.module_api_version. The two most significant hex
* digits represent the major version, and the two least significant represent
* the minor version.
*
*******************************************************************************
* Versions: 0.X - 1.X [CAMERA_MODULE_API_VERSION_1_0]
*
* Camera modules that report these version numbers implement the initial
* camera module HAL interface. All camera devices openable through this
* module support only version 1 of the camera device HAL. The device_version
* and static_camera_characteristics fields of camera_info are not valid. Only
* the android.hardware.Camera API can be supported by this module and its
* devices.
*
*******************************************************************************
* Version: 2.0 [CAMERA_MODULE_API_VERSION_2_0]
*
* Camera modules that report this version number implement the second version
* of the camera module HAL interface. Camera devices openable through this
* module may support either version 1.0 or version 2.0 of the camera device
* HAL interface. The device_version field of camera_info is always valid; the
* static_camera_characteristics field of camera_info is valid if the
* device_version field is 2.0 or higher.
*
*******************************************************************************
* Version: 2.1 [CAMERA_MODULE_API_VERSION_2_1]
*
* This camera module version adds support for asynchronous callbacks to the
* framework from the camera HAL module, which is used to notify the framework
* about changes to the camera module state. Modules that provide a valid
* set_callbacks() method must report at least this version number.
*
*******************************************************************************
* Version: 2.2 [CAMERA_MODULE_API_VERSION_2_2]
*
* This camera module version adds vendor tag support from the module, and
* deprecates the old vendor_tag_query_ops that were previously only
* accessible with a device open.
*
*******************************************************************************
* Version: 2.3 [CAMERA_MODULE_API_VERSION_2_3]
*
* This camera module version adds open legacy camera HAL device support.
* Framework can use it to open the camera device as lower device HAL version
* HAL device if the same device can support multiple device API versions.
* The standard hardware module open call (common.methods->open) continues
* to open the camera device with the latest supported version, which is
* also the version listed in camera_info_t.device_version.
*
*******************************************************************************
* Version: 2.4 [CAMERA_MODULE_API_VERSION_2_4]
*
* This camera module version adds below API changes:
*
* 1. Torch mode support. The framework can use it to turn on torch mode for
* any camera device that has a flash unit, without opening a camera device. The
* camera device has a higher priority accessing the flash unit than the camera
* module; opening a camera device will turn off the torch if it had been enabled
* through the module interface. When there are any resource conflicts, such as
* open() is called to open a camera device, the camera HAL module must notify the
* framework through the torch mode status callback that the torch mode has been
* turned off.
*
* 2. External camera (e.g. USB hot-plug camera) support. The API updates specify that
* the camera static info is only available when camera is connected and ready to
* use for external hot-plug cameras. Calls to get static info will be invalid
* calls when camera status is not CAMERA_DEVICE_STATUS_PRESENT. The frameworks
* will only count on device status change callbacks to manage the available external
* camera list.
*
* 3. Camera arbitration hints. This module version adds support for explicitly
* indicating the number of camera devices that can be simultaneously opened and used.
* To specify valid combinations of devices, the resource_cost and conflicting_devices
* fields should always be set in the camera_info structure returned by the
* get_camera_info call.
*
* 4. Module initialization method. This will be called by the camera service
* right after the HAL module is loaded, to allow for one-time initialization
* of the HAL. It is called before any other module methods are invoked.
*/
/**
* Predefined macros for currently-defined version numbers
*/
/**
* All module versions <= HARDWARE_MODULE_API_VERSION(1, 0xFF) must be treated
* as CAMERA_MODULE_API_VERSION_1_0
*/
#define CAMERA_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0)
#define CAMERA_MODULE_API_VERSION_2_0 HARDWARE_MODULE_API_VERSION(2, 0)
#define CAMERA_MODULE_API_VERSION_2_1 HARDWARE_MODULE_API_VERSION(2, 1)
#define CAMERA_MODULE_API_VERSION_2_2 HARDWARE_MODULE_API_VERSION(2, 2)
#define CAMERA_MODULE_API_VERSION_2_3 HARDWARE_MODULE_API_VERSION(2, 3)
#define CAMERA_MODULE_API_VERSION_2_4 HARDWARE_MODULE_API_VERSION(2, 4)
#define CAMERA_MODULE_API_VERSION_CURRENT CAMERA_MODULE_API_VERSION_2_4
/**
* All device versions <= HARDWARE_DEVICE_API_VERSION(1, 0xFF) must be treated
* as CAMERA_DEVICE_API_VERSION_1_0
*/
#define CAMERA_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0) // DEPRECATED
#define CAMERA_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0) // NO LONGER SUPPORTED
#define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1) // NO LONGER SUPPORTED
#define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) // NO LONGER SUPPORTED
#define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1) // NO LONGER SUPPORTED
#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2)
#define CAMERA_DEVICE_API_VERSION_3_3 HARDWARE_DEVICE_API_VERSION(3, 3)
#define CAMERA_DEVICE_API_VERSION_3_4 HARDWARE_DEVICE_API_VERSION(3, 4)
#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
// Device version 3.5 is current, older HAL camera device versions are not
// recommended for new devices.
#define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_3_5
/**
* Defined in /system/media/camera/include/system/camera_metadata.h
*/
typedef struct camera_metadata camera_metadata_t;
typedef struct camera_info {
/**
* The direction that the camera faces to. See system/core/include/system/camera.h
* for camera facing definitions.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* It should be CAMERA_FACING_BACK or CAMERA_FACING_FRONT.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* It should be CAMERA_FACING_BACK, CAMERA_FACING_FRONT or
* CAMERA_FACING_EXTERNAL.
*/
int facing;
/**
* The orientation of the camera image. The value is the angle that the
* camera image needs to be rotated clockwise so it shows correctly on the
* display in its natural orientation. It should be 0, 90, 180, or 270.
*
* For example, suppose a device has a naturally tall screen. The
* back-facing camera sensor is mounted in landscape. You are looking at the
* screen. If the top side of the camera sensor is aligned with the right
* edge of the screen in natural orientation, the value should be 90. If the
* top side of a front-facing camera sensor is aligned with the right of the
* screen, the value should be 270.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Valid in all camera_module versions.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* Valid if camera facing is CAMERA_FACING_BACK or CAMERA_FACING_FRONT,
* not valid if camera facing is CAMERA_FACING_EXTERNAL.
*/
int orientation;
/**
* The value of camera_device_t.common.version.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_0:
*
* Not valid. Can be assumed to be CAMERA_DEVICE_API_VERSION_1_0. Do
* not read this field.
*
* CAMERA_MODULE_API_VERSION_2_0 or higher:
*
* Always valid
*
*/
uint32_t device_version;
/**
* The camera's fixed characteristics, which include all static camera metadata
* specified in system/media/camera/docs/docs.html. This should be a sorted metadata
* buffer, and may not be modified or freed by the caller. The pointer should remain
* valid for the lifetime of the camera module, and values in it may not
* change after it is returned by get_camera_info().
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_0:
*
* Not valid. Extra characteristics are not available. Do not read this
* field.
*
* CAMERA_MODULE_API_VERSION_2_0 or higher:
*
* Valid if device_version >= CAMERA_DEVICE_API_VERSION_2_0. Do not read
* otherwise.
*
*/
const camera_metadata_t *static_camera_characteristics;
/**
* The total resource "cost" of using this camera, represented as an integer
* value in the range [0, 100] where 100 represents total usage of the shared
* resource that is the limiting bottleneck of the camera subsystem. This may
* be a very rough estimate, and is used as a hint to the camera service to
* determine when to disallow multiple applications from simultaneously
* opening different cameras advertised by the camera service.
*
* The camera service must be able to simultaneously open and use any
* combination of camera devices exposed by the HAL where the sum of
* the resource costs of these cameras is <= 100. For determining cost,
* each camera device must be assumed to be configured and operating at
* the maximally resource-consuming framerate and stream size settings
* available in the configuration settings exposed for that device through
* the camera metadata.
*
* The camera service may still attempt to simultaneously open combinations
* of camera devices with a total resource cost > 100. This may succeed or
* fail. If this succeeds, combinations of configurations that are not
* supported due to resource constraints from having multiple open devices
* should fail during the configure calls. If the total resource cost is
* <= 100, open and configure should never fail for any stream configuration
* settings or other device capabilities that would normally succeed for a
* device when it is the only open camera device.
*
* This field will be used to determine whether background applications are
* allowed to use this camera device while other applications are using other
* camera devices. Note: multiple applications will never be allowed by the
* camera service to simultaneously open the same camera device.
*
* Example use cases:
*
* Ex. 1: Camera Device 0 = Back Camera
* Camera Device 1 = Front Camera
* - Using both camera devices causes a large framerate slowdown due to
* limited ISP bandwidth.
*
* Configuration:
*
* Camera Device 0 - resource_cost = 51
* conflicting_devices = null
* Camera Device 1 - resource_cost = 51
* conflicting_devices = null
*
* Result:
*
* Since the sum of the resource costs is > 100, if a higher-priority
* application has either device open, no lower-priority applications will be
* allowed by the camera service to open either device. If a lower-priority
* application is using a device that a higher-priority subsequently attempts
* to open, the lower-priority application will be forced to disconnect the
* the device.
*
* If the highest-priority application chooses, it may still attempt to open
* both devices (since these devices are not listed as conflicting in the
* conflicting_devices fields), but usage of these devices may fail in the
* open or configure calls.
*
* Ex. 2: Camera Device 0 = Left Back Camera
* Camera Device 1 = Right Back Camera
* Camera Device 2 = Combined stereo camera using both right and left
* back camera sensors used by devices 0, and 1
* Camera Device 3 = Front Camera
* - Due to do hardware constraints, up to two cameras may be open at once. The
* combined stereo camera may never be used at the same time as either of the
* two back camera devices (device 0, 1), and typically requires too much
* bandwidth to use at the same time as the front camera (device 3).
*
* Configuration:
*
* Camera Device 0 - resource_cost = 50
* conflicting_devices = { 2 }
* Camera Device 1 - resource_cost = 50
* conflicting_devices = { 2 }
* Camera Device 2 - resource_cost = 100
* conflicting_devices = { 0, 1 }
* Camera Device 3 - resource_cost = 50
* conflicting_devices = null
*
* Result:
*
* Based on the conflicting_devices fields, the camera service guarantees that
* the following sets of open devices will never be allowed: { 1, 2 }, { 0, 2 }.
*
* Based on the resource_cost fields, if a high-priority foreground application
* is using camera device 0, a background application would be allowed to open
* camera device 1 or 3 (but would be forced to disconnect it again if the
* foreground application opened another device).
*
* The highest priority application may still attempt to simultaneously open
* devices 0, 2, and 3, but the HAL may fail in open or configure calls for
* this combination.
*
* Ex. 3: Camera Device 0 = Back Camera
* Camera Device 1 = Front Camera
* Camera Device 2 = Low-power Front Camera that uses the same
* sensor as device 1, but only exposes image stream
* resolutions that can be used in low-power mode
* - Using both front cameras (device 1, 2) at the same time is impossible due
* a shared physical sensor. Using the back and "high-power" front camera
* (device 1) may be impossible for some stream configurations due to hardware
* limitations, but the "low-power" front camera option may always be used as
* it has special dedicated hardware.
*
* Configuration:
*
* Camera Device 0 - resource_cost = 100
* conflicting_devices = null
* Camera Device 1 - resource_cost = 100
* conflicting_devices = { 2 }
* Camera Device 2 - resource_cost = 0
* conflicting_devices = { 1 }
* Result:
*
* Based on the conflicting_devices fields, the camera service guarantees that
* the following sets of open devices will never be allowed: { 1, 2 }.
*
* Based on the resource_cost fields, only the highest priority application
* may attempt to open both device 0 and 1 at the same time. If a higher-priority
* application is not using device 1 or 2, a low-priority background application
* may open device 2 (but will be forced to disconnect it if a higher-priority
* application subsequently opens device 1 or 2).
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Not valid. Can be assumed to be 100. Do not read this field.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* Always valid.
*/
int resource_cost;
/**
* An array of camera device IDs represented as NULL-terminated strings
* indicating other devices that cannot be simultaneously opened while this
* camera device is in use.
*
* This field is intended to be used to indicate that this camera device
* is a composite of several other camera devices, or otherwise has
* hardware dependencies that prohibit simultaneous usage. If there are no
* dependencies, a NULL may be returned in this field to indicate this.
*
* The camera service will never simultaneously open any of the devices
* in this list while this camera device is open.
*
* The strings pointed to in this field will not be cleaned up by the camera
* service, and must remain while this device is plugged in.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Not valid. Can be assumed to be NULL. Do not read this field.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* Always valid.
*/
char** conflicting_devices;
/**
* The length of the array given in the conflicting_devices field.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Not valid. Can be assumed to be 0. Do not read this field.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* Always valid.
*/
size_t conflicting_devices_length;
} camera_info_t;
/**
* camera_device_status_t:
*
* The current status of the camera device, as provided by the HAL through the
* camera_module_callbacks.camera_device_status_change() call.
*
* At module load time, the framework will assume all camera devices are in the
* CAMERA_DEVICE_STATUS_PRESENT state. The HAL should invoke
* camera_module_callbacks::camera_device_status_change to inform the framework
* of any initially NOT_PRESENT devices.
*
* Allowed transitions:
* PRESENT -> NOT_PRESENT
* NOT_PRESENT -> ENUMERATING
* NOT_PRESENT -> PRESENT
* ENUMERATING -> PRESENT
* ENUMERATING -> NOT_PRESENT
*/
typedef enum camera_device_status {
/**
* The camera device is not currently connected, and opening it will return
* failure.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Calls to get_camera_info must still succeed, and provide the same information
* it would if the camera were connected.
*
* CAMERA_MODULE_API_VERSION_2_4:
*
* The camera device at this status must return -EINVAL for get_camera_info call,
* as the device is not connected.
*/
CAMERA_DEVICE_STATUS_NOT_PRESENT = 0,
/**
* The camera device is connected, and opening it will succeed.
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* The information returned by get_camera_info cannot change due to this status
* change. By default, the framework will assume all devices are in this state.
*
* CAMERA_MODULE_API_VERSION_2_4:
*
* The information returned by get_camera_info will become valid after a device's
* status changes to this. By default, the framework will assume all devices are in
* this state.
*/
CAMERA_DEVICE_STATUS_PRESENT = 1,
/**
* The camera device is connected, but it is undergoing an enumeration and
* so opening the device will return -EBUSY.
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* Calls to get_camera_info must still succeed, as if the camera was in the
* PRESENT status.
*
* CAMERA_MODULE_API_VERSION_2_4:
*
* The camera device at this status must return -EINVAL for get_camera_info for call,
* as the device is not ready.
*/
CAMERA_DEVICE_STATUS_ENUMERATING = 2,
} camera_device_status_t;
/**
* torch_mode_status_t:
*
* The current status of the torch mode, as provided by the HAL through the
* camera_module_callbacks.torch_mode_status_change() call.
*
* The torch mode status of a camera device is applicable only when the camera
* device is present. The framework will not call set_torch_mode() to turn on
* torch mode of a camera device if the camera device is not present. At module
* load time, the framework will assume torch modes are in the
* TORCH_MODE_STATUS_AVAILABLE_OFF state if the camera device is present and
* android.flash.info.available is reported as true via get_camera_info() call.
*
* The behaviors of the camera HAL module that the framework expects in the
* following situations when a camera device's status changes:
* 1. A previously-disconnected camera device becomes connected.
* After camera_module_callbacks::camera_device_status_change() is invoked
* to inform the framework that the camera device is present, the framework
* will assume the camera device's torch mode is in
* TORCH_MODE_STATUS_AVAILABLE_OFF state. The camera HAL module does not need
* to invoke camera_module_callbacks::torch_mode_status_change() unless the
* flash unit is unavailable to use by set_torch_mode().
*
* 2. A previously-connected camera becomes disconnected.
* After camera_module_callbacks::camera_device_status_change() is invoked
* to inform the framework that the camera device is not present, the
* framework will not call set_torch_mode() for the disconnected camera
* device until its flash unit becomes available again. The camera HAL
* module does not need to invoke
* camera_module_callbacks::torch_mode_status_change() separately to inform
* that the flash unit has become unavailable.
*
* 3. open() is called to open a camera device.
* The camera HAL module must invoke
* camera_module_callbacks::torch_mode_status_change() for all flash units
* that have entered TORCH_MODE_STATUS_NOT_AVAILABLE state and can not be
* turned on by calling set_torch_mode() anymore due to this open() call.
* open() must not trigger TORCH_MODE_STATUS_AVAILABLE_OFF before
* TORCH_MODE_STATUS_NOT_AVAILABLE for all flash units that have become
* unavailable.
*
* 4. close() is called to close a camera device.
* The camera HAL module must invoke
* camera_module_callbacks::torch_mode_status_change() for all flash units
* that have entered TORCH_MODE_STATUS_AVAILABLE_OFF state and can be turned
* on by calling set_torch_mode() again because of enough resources freed
* up by this close() call.
*
* Note that the framework calling set_torch_mode() successfully must trigger
* TORCH_MODE_STATUS_AVAILABLE_OFF or TORCH_MODE_STATUS_AVAILABLE_ON callback
* for the given camera device. Additionally it must trigger
* TORCH_MODE_STATUS_AVAILABLE_OFF callbacks for other previously-on torch
* modes if HAL cannot keep multiple torch modes on simultaneously.
*/
typedef enum torch_mode_status {
/**
* The flash unit is no longer available and the torch mode can not be
* turned on by calling set_torch_mode(). If the torch mode is on, it
* will be turned off by HAL before HAL calls torch_mode_status_change().
*/
TORCH_MODE_STATUS_NOT_AVAILABLE = 0,
/**
* A torch mode has become off and available to be turned on via
* set_torch_mode(). This may happen in the following
* cases:
* 1. After the resources to turn on the torch mode have become available.
* 2. After set_torch_mode() is called to turn off the torch mode.
* 3. After the framework turned on the torch mode of some other camera
* device and HAL had to turn off the torch modes of any camera devices
* that were previously on.
*/
TORCH_MODE_STATUS_AVAILABLE_OFF = 1,
/**
* A torch mode has become on and available to be turned off via
* set_torch_mode(). This can happen only after set_torch_mode() is called
* to turn on the torch mode.
*/
TORCH_MODE_STATUS_AVAILABLE_ON = 2,
} torch_mode_status_t;
/**
* Callback functions for the camera HAL module to use to inform the framework
* of changes to the camera subsystem.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* Each callback is called only by HAL modules implementing the indicated
* version or higher of the HAL module API interface.
*
* CAMERA_MODULE_API_VERSION_2_1:
* camera_device_status_change()
*
* CAMERA_MODULE_API_VERSION_2_4:
* torch_mode_status_change()
*/
typedef struct camera_module_callbacks {
/**
* camera_device_status_change:
*
* Callback to the framework to indicate that the state of a specific camera
* device has changed. At module load time, the framework will assume all
* camera devices are in the CAMERA_DEVICE_STATUS_PRESENT state. The HAL
* must call this method to inform the framework of any initially
* NOT_PRESENT devices.
*
* This callback is added for CAMERA_MODULE_API_VERSION_2_1.
*
* camera_module_callbacks: The instance of camera_module_callbacks_t passed
* to the module with set_callbacks.
*
* camera_id: The ID of the camera device that has a new status.
*
* new_status: The new status code, one of the camera_device_status_t enums,
* or a platform-specific status.
*
*/
void (*camera_device_status_change)(const struct camera_module_callbacks*,
int camera_id,
int new_status);
/**
* torch_mode_status_change:
*
* Callback to the framework to indicate that the state of the torch mode
* of the flash unit associated with a specific camera device has changed.
* At module load time, the framework will assume the torch modes are in
* the TORCH_MODE_STATUS_AVAILABLE_OFF state if android.flash.info.available
* is reported as true via get_camera_info() call.
*
* This callback is added for CAMERA_MODULE_API_VERSION_2_4.
*
* camera_module_callbacks: The instance of camera_module_callbacks_t
* passed to the module with set_callbacks.
*
* camera_id: The ID of camera device whose flash unit has a new torch mode
* status.
*
* new_status: The new status code, one of the torch_mode_status_t enums.
*/
void (*torch_mode_status_change)(const struct camera_module_callbacks*,
const char* camera_id,
int new_status);
} camera_module_callbacks_t;
typedef struct camera_module {
/**
* Common methods of the camera module. This *must* be the first member of
* camera_module as users of this structure will cast a hw_module_t to
* camera_module pointer in contexts where it's known the hw_module_t
* references a camera_module.
*
* The return values for common.methods->open for camera_module are:
*
* 0: On a successful open of the camera device.
*
* -ENODEV: The camera device cannot be opened due to an internal
* error.
*
* -EINVAL: The input arguments are invalid, i.e. the id is invalid,
* and/or the module is invalid.
*
* -EBUSY: The camera device was already opened for this camera id
* (by using this method or open_legacy),
* regardless of the device HAL version it was opened as.
*
* -EUSERS: The maximal number of camera devices that can be
* opened concurrently were opened already, either by
* this method or the open_legacy method.
*
* All other return values from common.methods->open will be treated as
* -ENODEV.
*/
hw_module_t common;
/**
* get_number_of_cameras:
*
* Returns the number of camera devices accessible through the camera
* module. The camera devices are numbered 0 through N-1, where N is the
* value returned by this call. The name of the camera device for open() is
* simply the number converted to a string. That is, "0" for camera ID 0,
* "1" for camera ID 1.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_3 or lower:
*
* The value here must be static, and cannot change after the first call
* to this method.
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* The value here must be static, and must count only built-in cameras,
* which have CAMERA_FACING_BACK or CAMERA_FACING_FRONT camera facing values
* (camera_info.facing). The HAL must not include the external cameras
* (camera_info.facing == CAMERA_FACING_EXTERNAL) into the return value
* of this call. Frameworks will use camera_device_status_change callback
* to manage number of external cameras.
*/
int (*get_number_of_cameras)(void);
/**
* get_camera_info:
*
* Return the static camera information for a given camera device. This
* information may not change for a camera device.
*
* Return values:
*
* 0: On a successful operation
*
* -ENODEV: The information cannot be provided due to an internal
* error.
*
* -EINVAL: The input arguments are invalid, i.e. the id is invalid,
* and/or the module is invalid.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_2_4 or higher:
*
* When a camera is disconnected, its camera id becomes invalid. Calling this
* this method with this invalid camera id will get -EINVAL and NULL camera
* static metadata (camera_info.static_camera_characteristics).
*/
int (*get_camera_info)(int camera_id, struct camera_info *info);
/**
* set_callbacks:
*
* Provide callback function pointers to the HAL module to inform framework
* of asynchronous camera module events. The framework will call this
* function once after initial camera HAL module load, after the
* get_number_of_cameras() method is called for the first time, and before
* any other calls to the module.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_0, CAMERA_MODULE_API_VERSION_2_0:
*
* Not provided by HAL module. Framework may not call this function.
*
* CAMERA_MODULE_API_VERSION_2_1:
*
* Valid to be called by the framework.
*
* Return values:
*
* 0: On a successful operation
*
* -ENODEV: The operation cannot be completed due to an internal
* error.
*
* -EINVAL: The input arguments are invalid, i.e. the callbacks are
* null
*/
int (*set_callbacks)(const camera_module_callbacks_t *callbacks);
/**
* get_vendor_tag_ops:
*
* Get methods to query for vendor extension metadata tag information. The
* HAL should fill in all the vendor tag operation methods, or leave ops
* unchanged if no vendor tags are defined.
*
* The vendor_tag_ops structure used here is defined in:
* system/media/camera/include/system/vendor_tags.h
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1:
* Not provided by HAL module. Framework may not call this function.
*
* CAMERA_MODULE_API_VERSION_2_2:
* Valid to be called by the framework.
*/
void (*get_vendor_tag_ops)(vendor_tag_ops_t* ops);
/**
* open_legacy:
*
* Open a specific legacy camera HAL device if multiple device HAL API
* versions are supported by this camera HAL module. For example, if the
* camera module supports both CAMERA_DEVICE_API_VERSION_1_0 and
* CAMERA_DEVICE_API_VERSION_3_2 device API for the same camera id,
* framework can call this function to open the camera device as
* CAMERA_DEVICE_API_VERSION_1_0 device.
*
* This is an optional method. A Camera HAL module does not need to support
* more than one device HAL version per device, and such modules may return
* -ENOSYS for all calls to this method. For all older HAL device API
* versions that are not supported, it may return -EOPNOTSUPP. When above
* cases occur, The normal open() method (common.methods->open) will be
* used by the framework instead.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2:
* Not provided by HAL module. Framework will not call this function.
*
* CAMERA_MODULE_API_VERSION_2_3:
* Valid to be called by the framework.
*
* Return values:
*
* 0: On a successful open of the camera device.
*
* -ENOSYS This method is not supported.
*
* -EOPNOTSUPP: The requested HAL version is not supported by this method.
*
* -EINVAL: The input arguments are invalid, i.e. the id is invalid,
* and/or the module is invalid.
*
* -EBUSY: The camera device was already opened for this camera id
* (by using this method or common.methods->open method),
* regardless of the device HAL version it was opened as.
*
* -EUSERS: The maximal number of camera devices that can be
* opened concurrently were opened already, either by
* this method or common.methods->open method.
*/
int (*open_legacy)(const struct hw_module_t* module, const char* id,
uint32_t halVersion, struct hw_device_t** device);
/**
* set_torch_mode:
*
* Turn on or off the torch mode of the flash unit associated with a given
* camera ID. If the operation is successful, HAL must notify the framework
* torch state by invoking
* camera_module_callbacks.torch_mode_status_change() with the new state.
*
* The camera device has a higher priority accessing the flash unit. When
* there are any resource conflicts, such as open() is called to open a
* camera device, HAL module must notify the framework through
* camera_module_callbacks.torch_mode_status_change() that the
* torch mode has been turned off and the torch mode state has become
* TORCH_MODE_STATUS_NOT_AVAILABLE. When resources to turn on torch mode
* become available again, HAL module must notify the framework through
* camera_module_callbacks.torch_mode_status_change() that the torch mode
* state has become TORCH_MODE_STATUS_AVAILABLE_OFF for set_torch_mode() to
* be called.
*
* When the framework calls set_torch_mode() to turn on the torch mode of a
* flash unit, if HAL cannot keep multiple torch modes on simultaneously,
* HAL should turn off the torch mode that was turned on by
* a previous set_torch_mode() call and notify the framework that the torch
* mode state of that flash unit has become TORCH_MODE_STATUS_AVAILABLE_OFF.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3:
* Not provided by HAL module. Framework will not call this function.
*
* CAMERA_MODULE_API_VERSION_2_4:
* Valid to be called by the framework.
*
* Return values:
*
* 0: On a successful operation.
*
* -ENOSYS: The camera device does not support this operation. It is
* returned if and only if android.flash.info.available is
* false.
*
* -EBUSY: The camera device is already in use.
*
* -EUSERS: The resources needed to turn on the torch mode are not
* available, typically because other camera devices are
* holding the resources to make using the flash unit not
* possible.
*
* -EINVAL: camera_id is invalid.
*
*/
int (*set_torch_mode)(const char* camera_id, bool enabled);
/**
* init:
*
* This method is called by the camera service before any other methods
* are invoked, right after the camera HAL library has been successfully
* loaded. It may be left as NULL by the HAL module, if no initialization
* in needed.
*
* It can be used by HAL implementations to perform initialization and
* other one-time operations.
*
* Version information (based on camera_module_t.common.module_api_version):
*
* CAMERA_MODULE_API_VERSION_1_x/2_0/2_1/2_2/2_3:
* Not provided by HAL module. Framework will not call this function.
*
* CAMERA_MODULE_API_VERSION_2_4:
* If not NULL, will always be called by the framework once after the HAL
* module is loaded, before any other HAL module method is called.
*
* Return values:
*
* 0: On a successful operation.
*
* -ENODEV: Initialization cannot be completed due to an internal
* error. The HAL must be assumed to be in a nonfunctional
* state.
*
*/
int (*init)();
/* reserved for future use */
void* reserved[5];
} camera_module_t;
__END_DECLS
#endif /* ANDROID_INCLUDE_CAMERA_COMMON_H */
|
0 | repos/libcamera/include/android/hardware/libhardware/include | repos/libcamera/include/android/hardware/libhardware/include/hardware/camera3.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2013-2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_INCLUDE_CAMERA3_H
#define ANDROID_INCLUDE_CAMERA3_H
#include <system/camera_metadata.h>
#include "camera_common.h"
/**
* Camera device HAL 3.5[ CAMERA_DEVICE_API_VERSION_3_5 ]
*
* This is the current recommended version of the camera device HAL.
*
* Supports the android.hardware.Camera API, and as of v3.2, the
* android.hardware.camera2 API as LIMITED or above hardware level.
*
* Camera devices that support this version of the HAL must return
* CAMERA_DEVICE_API_VERSION_3_5 in camera_device_t.common.version and in
* camera_info_t.device_version (from camera_module_t.get_camera_info).
*
* CAMERA_DEVICE_API_VERSION_3_3 and above:
* Camera modules that may contain version 3.3 or above devices must
* implement at least version 2.2 of the camera module interface (as defined
* by camera_module_t.common.module_api_version).
*
* CAMERA_DEVICE_API_VERSION_3_2:
* Camera modules that may contain version 3.2 devices must implement at
* least version 2.2 of the camera module interface (as defined by
* camera_module_t.common.module_api_version).
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
* Camera modules that may contain version 3.1 (or 3.0) devices must
* implement at least version 2.0 of the camera module interface
* (as defined by camera_module_t.common.module_api_version).
*
* See camera_common.h for more versioning details.
*
* Documentation index:
* S1. Version history
* S2. Startup and operation sequencing
* S3. Operational modes
* S4. 3A modes and state machines
* S5. Cropping
* S6. Error management
* S7. Key Performance Indicator (KPI) glossary
* S8. Sample Use Cases
* S9. Notes on Controls and Metadata
* S10. Reprocessing flow and controls
*/
/**
* S1. Version history:
*
* 1.0: Initial Android camera HAL (Android 4.0) [camera.h]:
*
* - Converted from C++ CameraHardwareInterface abstraction layer.
*
* - Supports android.hardware.Camera API.
*
* 2.0: Initial release of expanded-capability HAL (Android 4.2) [camera2.h]:
*
* - Sufficient for implementing existing android.hardware.Camera API.
*
* - Allows for ZSL queue in camera service layer
*
* - Not tested for any new features such manual capture control, Bayer RAW
* capture, reprocessing of RAW data.
*
* 3.0: First revision of expanded-capability HAL:
*
* - Major version change since the ABI is completely different. No change to
* the required hardware capabilities or operational model from 2.0.
*
* - Reworked input request and stream queue interfaces: Framework calls into
* HAL with next request and stream buffers already dequeued. Sync framework
* support is included, necessary for efficient implementations.
*
* - Moved triggers into requests, most notifications into results.
*
* - Consolidated all callbacks into framework into one structure, and all
* setup methods into a single initialize() call.
*
* - Made stream configuration into a single call to simplify stream
* management. Bidirectional streams replace STREAM_FROM_STREAM construct.
*
* - Limited mode semantics for older/limited hardware devices.
*
* 3.1: Minor revision of expanded-capability HAL:
*
* - configure_streams passes consumer usage flags to the HAL.
*
* - flush call to drop all in-flight requests/buffers as fast as possible.
*
* 3.2: Minor revision of expanded-capability HAL:
*
* - Deprecates get_metadata_vendor_tag_ops. Please use get_vendor_tag_ops
* in camera_common.h instead.
*
* - register_stream_buffers deprecated. All gralloc buffers provided
* by framework to HAL in process_capture_request may be new at any time.
*
* - add partial result support. process_capture_result may be called
* multiple times with a subset of the available result before the full
* result is available.
*
* - add manual template to camera3_request_template. The applications may
* use this template to control the capture settings directly.
*
* - Rework the bidirectional and input stream specifications.
*
* - change the input buffer return path. The buffer is returned in
* process_capture_result instead of process_capture_request.
*
* 3.3: Minor revision of expanded-capability HAL:
*
* - OPAQUE and YUV reprocessing API updates.
*
* - Basic support for depth output buffers.
*
* - Addition of data_space field to camera3_stream_t.
*
* - Addition of rotation field to camera3_stream_t.
*
* - Addition of camera3 stream configuration operation mode to camera3_stream_configuration_t
*
* 3.4: Minor additions to supported metadata and changes to data_space support
*
* - Add ANDROID_SENSOR_OPAQUE_RAW_SIZE static metadata as mandatory if
* RAW_OPAQUE format is supported.
*
* - Add ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE static metadata as
* mandatory if any RAW format is supported
*
* - Switch camera3_stream_t data_space field to a more flexible definition,
* using the version 0 definition of dataspace encoding.
*
* - General metadata additions which are available to use for HALv3.2 or
* newer:
* - ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3
* - ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST
* - ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE
* - ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL
* - ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL
* - ANDROID_SENSOR_OPAQUE_RAW_SIZE
* - ANDROID_SENSOR_OPTICAL_BLACK_REGIONS
*
* 3.5: Minor revisions to support session parameters and logical multi camera:
*
* - Add ANDROID_REQUEST_AVAILABLE_SESSION_KEYS static metadata, which is
* optional for implementations that want to support session parameters. If support is
* needed, then Hal should populate the list with all available capture request keys
* that can cause severe processing delays when modified by client. Typical examples
* include parameters that require time-consuming HW re-configuration or internal camera
* pipeline update.
*
* - Add a session parameter field to camera3_stream_configuration which can be populated
* by clients with initial values for the keys found in ANDROID_REQUEST_AVAILABLE_SESSION_KEYS.
*
* - Metadata additions for logical multi camera capability:
* - ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA
* - ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS
* - ANDROID_LOGICAL_MULTI_CAMERA_SYNC_TYPE
*
* - Add physical camera id field in camera3_stream, so that for a logical
* multi camera, the application has the option to specify which physical camera
* a particular stream is configured on.
*
* - Add physical camera id and settings field in camera3_capture_request, so that
* for a logical multi camera, the application has the option to specify individual
* settings for a particular physical device.
*
*/
/**
* S2. Startup and general expected operation sequence:
*
* 1. Framework calls camera_module_t->common.open(), which returns a
* hardware_device_t structure.
*
* 2. Framework inspects the hardware_device_t->version field, and instantiates
* the appropriate handler for that version of the camera hardware device. In
* case the version is CAMERA_DEVICE_API_VERSION_3_0, the device is cast to
* a camera3_device_t.
*
* 3. Framework calls camera3_device_t->ops->initialize() with the framework
* callback function pointers. This will only be called this one time after
* open(), before any other functions in the ops structure are called.
*
* 4. The framework calls camera3_device_t->ops->configure_streams() with a list
* of input/output streams to the HAL device.
*
* 5. <= CAMERA_DEVICE_API_VERSION_3_1:
*
* The framework allocates gralloc buffers and calls
* camera3_device_t->ops->register_stream_buffers() for at least one of the
* output streams listed in configure_streams. The same stream is registered
* only once.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* camera3_device_t->ops->register_stream_buffers() is not called and must
* be NULL.
*
* 6. The framework requests default settings for some number of use cases with
* calls to camera3_device_t->ops->construct_default_request_settings(). This
* may occur any time after step 3.
*
* 7. The framework constructs and sends the first capture request to the HAL,
* with settings based on one of the sets of default settings, and with at
* least one output stream, which has been registered earlier by the
* framework. This is sent to the HAL with
* camera3_device_t->ops->process_capture_request(). The HAL must block the
* return of this call until it is ready for the next request to be sent.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The buffer_handle_t provided in the camera3_stream_buffer_t array
* in the camera3_capture_request_t may be new and never-before-seen
* by the HAL on any given new request.
*
* 8. The framework continues to submit requests, and call
* construct_default_request_settings to get default settings buffers for
* other use cases.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* The framework may call register_stream_buffers() at this time for
* not-yet-registered streams.
*
* 9. When the capture of a request begins (sensor starts exposing for the
* capture) or processing a reprocess request begins, the HAL
* calls camera3_callback_ops_t->notify() with the SHUTTER event, including
* the frame number and the timestamp for start of exposure. For a reprocess
* request, the timestamp must be the start of exposure of the input image
* which can be looked up with android.sensor.timestamp from
* camera3_capture_request_t.settings when process_capture_request() is
* called.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* This notify call must be made before the first call to
* process_capture_result() for that frame number.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The camera3_callback_ops_t->notify() call with the SHUTTER event should
* be made as early as possible since the framework will be unable to
* deliver gralloc buffers to the application layer (for that frame) until
* it has a valid timestamp for the start of exposure (or the input image's
* start of exposure for a reprocess request).
*
* Both partial metadata results and the gralloc buffers may be sent to the
* framework at any time before or after the SHUTTER event.
*
* 10. After some pipeline delay, the HAL begins to return completed captures to
* the framework with camera3_callback_ops_t->process_capture_result(). These
* are returned in the same order as the requests were submitted. Multiple
* requests can be in flight at once, depending on the pipeline depth of the
* camera HAL device.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Once a buffer is returned by process_capture_result as part of the
* camera3_stream_buffer_t array, and the fence specified by release_fence
* has been signaled (this is a no-op for -1 fences), the ownership of that
* buffer is considered to be transferred back to the framework. After that,
* the HAL must no longer retain that particular buffer, and the
* framework may clean up the memory for it immediately.
*
* process_capture_result may be called multiple times for a single frame,
* each time with a new disjoint piece of metadata and/or set of gralloc
* buffers. The framework will accumulate these partial metadata results
* into one result.
*
* In particular, it is legal for a process_capture_result to be called
* simultaneously for both a frame N and a frame N+1 as long as the
* above rule holds for gralloc buffers (both input and output).
*
* 11. After some time, the framework may stop submitting new requests, wait for
* the existing captures to complete (all buffers filled, all results
* returned), and then call configure_streams() again. This resets the camera
* hardware and pipeline for a new set of input/output streams. Some streams
* may be reused from the previous configuration; if these streams' buffers
* had already been registered with the HAL, they will not be registered
* again. The framework then continues from step 7, if at least one
* registered output stream remains (otherwise, step 5 is required first).
*
* 12. Alternatively, the framework may call camera3_device_t->common->close()
* to end the camera session. This may be called at any time when no other
* calls from the framework are active, although the call may block until all
* in-flight captures have completed (all results returned, all buffers
* filled). After the close call returns, no more calls to the
* camera3_callback_ops_t functions are allowed from the HAL. Once the
* close() call is underway, the framework may not call any other HAL device
* functions.
*
* 13. In case of an error or other asynchronous event, the HAL must call
* camera3_callback_ops_t->notify() with the appropriate error/event
* message. After returning from a fatal device-wide error notification, the
* HAL should act as if close() had been called on it. However, the HAL must
* either cancel or complete all outstanding captures before calling
* notify(), so that once notify() is called with a fatal error, the
* framework will not receive further callbacks from the device. Methods
* besides close() should return -ENODEV or NULL after the notify() method
* returns from a fatal error message.
*/
/**
* S3. Operational modes:
*
* The camera 3 HAL device can implement one of two possible operational modes;
* limited and full. Full support is expected from new higher-end
* devices. Limited mode has hardware requirements roughly in line with those
* for a camera HAL device v1 implementation, and is expected from older or
* inexpensive devices. Full is a strict superset of limited, and they share the
* same essential operational flow, as documented above.
*
* The HAL must indicate its level of support with the
* android.info.supportedHardwareLevel static metadata entry, with 0 indicating
* limited mode, and 1 indicating full mode support.
*
* Roughly speaking, limited-mode devices do not allow for application control
* of capture settings (3A control only), high-rate capture of high-resolution
* images, raw sensor readout, or support for YUV output streams above maximum
* recording resolution (JPEG only for large images).
*
* ** Details of limited mode behavior:
*
* - Limited-mode devices do not need to implement accurate synchronization
* between capture request settings and the actual image data
* captured. Instead, changes to settings may take effect some time in the
* future, and possibly not for the same output frame for each settings
* entry. Rapid changes in settings may result in some settings never being
* used for a capture. However, captures that include high-resolution output
* buffers ( > 1080p ) have to use the settings as specified (but see below
* for processing rate).
*
* - Limited-mode devices do not need to support most of the
* settings/result/static info metadata. Specifically, only the following settings
* are expected to be consumed or produced by a limited-mode HAL device:
*
* android.control.aeAntibandingMode (controls and dynamic)
* android.control.aeExposureCompensation (controls and dynamic)
* android.control.aeLock (controls and dynamic)
* android.control.aeMode (controls and dynamic)
* android.control.aeRegions (controls and dynamic)
* android.control.aeTargetFpsRange (controls and dynamic)
* android.control.aePrecaptureTrigger (controls and dynamic)
* android.control.afMode (controls and dynamic)
* android.control.afRegions (controls and dynamic)
* android.control.awbLock (controls and dynamic)
* android.control.awbMode (controls and dynamic)
* android.control.awbRegions (controls and dynamic)
* android.control.captureIntent (controls and dynamic)
* android.control.effectMode (controls and dynamic)
* android.control.mode (controls and dynamic)
* android.control.sceneMode (controls and dynamic)
* android.control.videoStabilizationMode (controls and dynamic)
* android.control.aeAvailableAntibandingModes (static)
* android.control.aeAvailableModes (static)
* android.control.aeAvailableTargetFpsRanges (static)
* android.control.aeCompensationRange (static)
* android.control.aeCompensationStep (static)
* android.control.afAvailableModes (static)
* android.control.availableEffects (static)
* android.control.availableSceneModes (static)
* android.control.availableVideoStabilizationModes (static)
* android.control.awbAvailableModes (static)
* android.control.maxRegions (static)
* android.control.sceneModeOverrides (static)
* android.control.aeState (dynamic)
* android.control.afState (dynamic)
* android.control.awbState (dynamic)
*
* android.flash.mode (controls and dynamic)
* android.flash.info.available (static)
*
* android.info.supportedHardwareLevel (static)
*
* android.jpeg.gpsCoordinates (controls and dynamic)
* android.jpeg.gpsProcessingMethod (controls and dynamic)
* android.jpeg.gpsTimestamp (controls and dynamic)
* android.jpeg.orientation (controls and dynamic)
* android.jpeg.quality (controls and dynamic)
* android.jpeg.thumbnailQuality (controls and dynamic)
* android.jpeg.thumbnailSize (controls and dynamic)
* android.jpeg.availableThumbnailSizes (static)
* android.jpeg.maxSize (static)
*
* android.lens.info.minimumFocusDistance (static)
*
* android.request.id (controls and dynamic)
*
* android.scaler.cropRegion (controls and dynamic)
* android.scaler.availableStreamConfigurations (static)
* android.scaler.availableMinFrameDurations (static)
* android.scaler.availableStallDurations (static)
* android.scaler.availableMaxDigitalZoom (static)
* android.scaler.maxDigitalZoom (static)
* android.scaler.croppingType (static)
*
* android.sensor.orientation (static)
* android.sensor.timestamp (dynamic)
*
* android.statistics.faceDetectMode (controls and dynamic)
* android.statistics.info.availableFaceDetectModes (static)
* android.statistics.faceIds (dynamic)
* android.statistics.faceLandmarks (dynamic)
* android.statistics.faceRectangles (dynamic)
* android.statistics.faceScores (dynamic)
*
* android.sync.frameNumber (dynamic)
* android.sync.maxLatency (static)
*
* - Captures in limited mode that include high-resolution (> 1080p) output
* buffers may block in process_capture_request() until all the output buffers
* have been filled. A full-mode HAL device must process sequences of
* high-resolution requests at the rate indicated in the static metadata for
* that pixel format. The HAL must still call process_capture_result() to
* provide the output; the framework must simply be prepared for
* process_capture_request() to block until after process_capture_result() for
* that request completes for high-resolution captures for limited-mode
* devices.
*
* - Full-mode devices must support below additional capabilities:
* - 30fps at maximum resolution is preferred, more than 20fps is required.
* - Per frame control (android.sync.maxLatency == PER_FRAME_CONTROL).
* - Sensor manual control metadata. See MANUAL_SENSOR defined in
* android.request.availableCapabilities.
* - Post-processing manual control metadata. See MANUAL_POST_PROCESSING defined
* in android.request.availableCapabilities.
*
*/
/**
* S4. 3A modes and state machines:
*
* While the actual 3A algorithms are up to the HAL implementation, a high-level
* state machine description is defined by the HAL interface, to allow the HAL
* device and the framework to communicate about the current state of 3A, and to
* trigger 3A events.
*
* When the device is opened, all the individual 3A states must be
* STATE_INACTIVE. Stream configuration does not reset 3A. For example, locked
* focus must be maintained across the configure() call.
*
* Triggering a 3A action involves simply setting the relevant trigger entry in
* the settings for the next request to indicate start of trigger. For example,
* the trigger for starting an autofocus scan is setting the entry
* ANDROID_CONTROL_AF_TRIGGER to ANDROID_CONTROL_AF_TRIGGER_START for one
* request, and cancelling an autofocus scan is triggered by setting
* ANDROID_CONTROL_AF_TRIGGER to ANDROID_CONTRL_AF_TRIGGER_CANCEL. Otherwise,
* the entry will not exist, or be set to ANDROID_CONTROL_AF_TRIGGER_IDLE. Each
* request with a trigger entry set to a non-IDLE value will be treated as an
* independent triggering event.
*
* At the top level, 3A is controlled by the ANDROID_CONTROL_MODE setting, which
* selects between no 3A (ANDROID_CONTROL_MODE_OFF), normal AUTO mode
* (ANDROID_CONTROL_MODE_AUTO), and using the scene mode setting
* (ANDROID_CONTROL_USE_SCENE_MODE).
*
* - In OFF mode, each of the individual AE/AF/AWB modes are effectively OFF,
* and none of the capture controls may be overridden by the 3A routines.
*
* - In AUTO mode, Auto-focus, auto-exposure, and auto-whitebalance all run
* their own independent algorithms, and have their own mode, state, and
* trigger metadata entries, as listed in the next section.
*
* - In USE_SCENE_MODE, the value of the ANDROID_CONTROL_SCENE_MODE entry must
* be used to determine the behavior of 3A routines. In SCENE_MODEs other than
* FACE_PRIORITY, the HAL must override the values of
* ANDROId_CONTROL_AE/AWB/AF_MODE to be the mode it prefers for the selected
* SCENE_MODE. For example, the HAL may prefer SCENE_MODE_NIGHT to use
* CONTINUOUS_FOCUS AF mode. Any user selection of AE/AWB/AF_MODE when scene
* must be ignored for these scene modes.
*
* - For SCENE_MODE_FACE_PRIORITY, the AE/AWB/AF_MODE controls work as in
* ANDROID_CONTROL_MODE_AUTO, but the 3A routines must bias toward metering
* and focusing on any detected faces in the scene.
*
* S4.1. Auto-focus settings and result entries:
*
* Main metadata entries:
*
* ANDROID_CONTROL_AF_MODE: Control for selecting the current autofocus
* mode. Set by the framework in the request settings.
*
* AF_MODE_OFF: AF is disabled; the framework/app directly controls lens
* position.
*
* AF_MODE_AUTO: Single-sweep autofocus. No lens movement unless AF is
* triggered.
*
* AF_MODE_MACRO: Single-sweep up-close autofocus. No lens movement unless
* AF is triggered.
*
* AF_MODE_CONTINUOUS_VIDEO: Smooth continuous focusing, for recording
* video. Triggering immediately locks focus in current
* position. Canceling resumes cotinuous focusing.
*
* AF_MODE_CONTINUOUS_PICTURE: Fast continuous focusing, for
* zero-shutter-lag still capture. Triggering locks focus once currently
* active sweep concludes. Canceling resumes continuous focusing.
*
* AF_MODE_EDOF: Advanced extended depth of field focusing. There is no
* autofocus scan, so triggering one or canceling one has no effect.
* Images are focused automatically by the HAL.
*
* ANDROID_CONTROL_AF_STATE: Dynamic metadata describing the current AF
* algorithm state, reported by the HAL in the result metadata.
*
* AF_STATE_INACTIVE: No focusing has been done, or algorithm was
* reset. Lens is not moving. Always the state for MODE_OFF or MODE_EDOF.
* When the device is opened, it must start in this state.
*
* AF_STATE_PASSIVE_SCAN: A continuous focus algorithm is currently scanning
* for good focus. The lens is moving.
*
* AF_STATE_PASSIVE_FOCUSED: A continuous focus algorithm believes it is
* well focused. The lens is not moving. The HAL may spontaneously leave
* this state.
*
* AF_STATE_PASSIVE_UNFOCUSED: A continuous focus algorithm believes it is
* not well focused. The lens is not moving. The HAL may spontaneously
* leave this state.
*
* AF_STATE_ACTIVE_SCAN: A scan triggered by the user is underway.
*
* AF_STATE_FOCUSED_LOCKED: The AF algorithm believes it is focused. The
* lens is not moving.
*
* AF_STATE_NOT_FOCUSED_LOCKED: The AF algorithm has been unable to
* focus. The lens is not moving.
*
* ANDROID_CONTROL_AF_TRIGGER: Control for starting an autofocus scan, the
* meaning of which is mode- and state- dependent. Set by the framework in
* the request settings.
*
* AF_TRIGGER_IDLE: No current trigger.
*
* AF_TRIGGER_START: Trigger start of AF scan. Effect is mode and state
* dependent.
*
* AF_TRIGGER_CANCEL: Cancel current AF scan if any, and reset algorithm to
* default.
*
* Additional metadata entries:
*
* ANDROID_CONTROL_AF_REGIONS: Control for selecting the regions of the FOV
* that should be used to determine good focus. This applies to all AF
* modes that scan for focus. Set by the framework in the request
* settings.
*
* S4.2. Auto-exposure settings and result entries:
*
* Main metadata entries:
*
* ANDROID_CONTROL_AE_MODE: Control for selecting the current auto-exposure
* mode. Set by the framework in the request settings.
*
* AE_MODE_OFF: Autoexposure is disabled; the user controls exposure, gain,
* frame duration, and flash.
*
* AE_MODE_ON: Standard autoexposure, with flash control disabled. User may
* set flash to fire or to torch mode.
*
* AE_MODE_ON_AUTO_FLASH: Standard autoexposure, with flash on at HAL's
* discretion for precapture and still capture. User control of flash
* disabled.
*
* AE_MODE_ON_ALWAYS_FLASH: Standard autoexposure, with flash always fired
* for capture, and at HAL's discretion for precapture.. User control of
* flash disabled.
*
* AE_MODE_ON_AUTO_FLASH_REDEYE: Standard autoexposure, with flash on at
* HAL's discretion for precapture and still capture. Use a flash burst
* at end of precapture sequence to reduce redeye in the final
* picture. User control of flash disabled.
*
* ANDROID_CONTROL_AE_STATE: Dynamic metadata describing the current AE
* algorithm state, reported by the HAL in the result metadata.
*
* AE_STATE_INACTIVE: Initial AE state after mode switch. When the device is
* opened, it must start in this state.
*
* AE_STATE_SEARCHING: AE is not converged to a good value, and is adjusting
* exposure parameters.
*
* AE_STATE_CONVERGED: AE has found good exposure values for the current
* scene, and the exposure parameters are not changing. HAL may
* spontaneously leave this state to search for better solution.
*
* AE_STATE_LOCKED: AE has been locked with the AE_LOCK control. Exposure
* values are not changing.
*
* AE_STATE_FLASH_REQUIRED: The HAL has converged exposure, but believes
* flash is required for a sufficiently bright picture. Used for
* determining if a zero-shutter-lag frame can be used.
*
* AE_STATE_PRECAPTURE: The HAL is in the middle of a precapture
* sequence. Depending on AE mode, this mode may involve firing the
* flash for metering, or a burst of flash pulses for redeye reduction.
*
* ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER: Control for starting a metering
* sequence before capturing a high-quality image. Set by the framework in
* the request settings.
*
* PRECAPTURE_TRIGGER_IDLE: No current trigger.
*
* PRECAPTURE_TRIGGER_START: Start a precapture sequence. The HAL should
* use the subsequent requests to measure good exposure/white balance
* for an upcoming high-resolution capture.
*
* Additional metadata entries:
*
* ANDROID_CONTROL_AE_LOCK: Control for locking AE controls to their current
* values
*
* ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION: Control for adjusting AE
* algorithm target brightness point.
*
* ANDROID_CONTROL_AE_TARGET_FPS_RANGE: Control for selecting the target frame
* rate range for the AE algorithm. The AE routine cannot change the frame
* rate to be outside these bounds.
*
* ANDROID_CONTROL_AE_REGIONS: Control for selecting the regions of the FOV
* that should be used to determine good exposure levels. This applies to
* all AE modes besides OFF.
*
* S4.3. Auto-whitebalance settings and result entries:
*
* Main metadata entries:
*
* ANDROID_CONTROL_AWB_MODE: Control for selecting the current white-balance
* mode.
*
* AWB_MODE_OFF: Auto-whitebalance is disabled. User controls color matrix.
*
* AWB_MODE_AUTO: Automatic white balance is enabled; 3A controls color
* transform, possibly using more complex transforms than a simple
* matrix.
*
* AWB_MODE_INCANDESCENT: Fixed white balance settings good for indoor
* incandescent (tungsten) lighting, roughly 2700K.
*
* AWB_MODE_FLUORESCENT: Fixed white balance settings good for fluorescent
* lighting, roughly 5000K.
*
* AWB_MODE_WARM_FLUORESCENT: Fixed white balance settings good for
* fluorescent lighting, roughly 3000K.
*
* AWB_MODE_DAYLIGHT: Fixed white balance settings good for daylight,
* roughly 5500K.
*
* AWB_MODE_CLOUDY_DAYLIGHT: Fixed white balance settings good for clouded
* daylight, roughly 6500K.
*
* AWB_MODE_TWILIGHT: Fixed white balance settings good for
* near-sunset/sunrise, roughly 15000K.
*
* AWB_MODE_SHADE: Fixed white balance settings good for areas indirectly
* lit by the sun, roughly 7500K.
*
* ANDROID_CONTROL_AWB_STATE: Dynamic metadata describing the current AWB
* algorithm state, reported by the HAL in the result metadata.
*
* AWB_STATE_INACTIVE: Initial AWB state after mode switch. When the device
* is opened, it must start in this state.
*
* AWB_STATE_SEARCHING: AWB is not converged to a good value, and is
* changing color adjustment parameters.
*
* AWB_STATE_CONVERGED: AWB has found good color adjustment values for the
* current scene, and the parameters are not changing. HAL may
* spontaneously leave this state to search for better solution.
*
* AWB_STATE_LOCKED: AWB has been locked with the AWB_LOCK control. Color
* adjustment values are not changing.
*
* Additional metadata entries:
*
* ANDROID_CONTROL_AWB_LOCK: Control for locking AWB color adjustments to
* their current values.
*
* ANDROID_CONTROL_AWB_REGIONS: Control for selecting the regions of the FOV
* that should be used to determine good color balance. This applies only
* to auto-WB mode.
*
* S4.4. General state machine transition notes
*
* Switching between AF, AE, or AWB modes always resets the algorithm's state
* to INACTIVE. Similarly, switching between CONTROL_MODE or
* CONTROL_SCENE_MODE if CONTROL_MODE == USE_SCENE_MODE resets all the
* algorithm states to INACTIVE.
*
* The tables below are per-mode.
*
* S4.5. AF state machines
*
* when enabling AF or changing AF mode
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| Any | AF mode change| INACTIVE | |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AF_MODE_OFF or AF_MODE_EDOF
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | | INACTIVE | Never changes |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AF_MODE_AUTO or AF_MODE_MACRO
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | AF_TRIGGER | ACTIVE_SCAN | Start AF sweep |
*| | | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| ACTIVE_SCAN | AF sweep done | FOCUSED_LOCKED | If AF successful |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| ACTIVE_SCAN | AF sweep done | NOT_FOCUSED_LOCKED | If AF successful |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| ACTIVE_SCAN | AF_CANCEL | INACTIVE | Cancel/reset AF |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Cancel/reset AF |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_TRIGGER | ACTIVE_SCAN | Start new sweep |
*| | | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Cancel/reset AF |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_TRIGGER | ACTIVE_SCAN | Start new sweep |
*| | | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| All states | mode change | INACTIVE | |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AF_MODE_CONTINUOUS_VIDEO
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | AF_TRIGGER | NOT_FOCUSED_LOCKED | AF state query |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is good |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. |
*| | | | if focus is bad |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_CANCEL | INACTIVE | Reset lens |
*| | | | position |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_TRIGGER | NOT_FOCUSED_LOCKED | No effect |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AF_MODE_CONTINUOUS_PICTURE
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | AF_TRIGGER | NOT_FOCUSED_LOCKED | AF state query |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL completes | PASSIVE_FOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | HAL fails | PASSIVE_UNFOCUSED | End AF scan |
*| | current scan | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | FOCUSED_LOCKED | Eventual trans. |
*| | | | once focus good |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_TRIGGER | NOT_FOCUSED_LOCKED | Eventual trans. |
*| | | | if cannot focus |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_SCAN | AF_CANCEL | INACTIVE | Reset lens |
*| | | | position |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | HAL initiates | PASSIVE_SCAN | Start AF scan |
*| | new scan | | Lens now moving |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_FOCUSED | AF_TRIGGER | FOCUSED_LOCKED | Immediate trans. |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| PASSIVE_UNFOCUSED | AF_TRIGGER | NOT_FOCUSED_LOCKED | Immediate trans. |
*| | | | Lens now locked |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_TRIGGER | FOCUSED_LOCKED | No effect |
*+--------------------+---------------+--------------------+------------------+
*| FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_TRIGGER | NOT_FOCUSED_LOCKED | No effect |
*+--------------------+---------------+--------------------+------------------+
*| NOT_FOCUSED_LOCKED | AF_CANCEL | INACTIVE | Restart AF scan |
*+--------------------+---------------+--------------------+------------------+
*
* S4.6. AE and AWB state machines
*
* The AE and AWB state machines are mostly identical. AE has additional
* FLASH_REQUIRED and PRECAPTURE states. So rows below that refer to those two
* states should be ignored for the AWB state machine.
*
* when enabling AE/AWB or changing AE/AWB mode
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| Any | mode change | INACTIVE | |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AE_MODE_OFF / AWB mode not AUTO
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | | INACTIVE | AE/AWB disabled |
*+--------------------+---------------+--------------------+------------------+
*
* mode = AE_MODE_ON_* / AWB_MODE_AUTO
*| state | trans. cause | new state | notes |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | HAL initiates | SEARCHING | |
*| | AE/AWB scan | | |
*+--------------------+---------------+--------------------+------------------+
*| INACTIVE | AE/AWB_LOCK | LOCKED | values locked |
*| | on | | |
*+--------------------+---------------+--------------------+------------------+
*| SEARCHING | HAL finishes | CONVERGED | good values, not |
*| | AE/AWB scan | | changing |
*+--------------------+---------------+--------------------+------------------+
*| SEARCHING | HAL finishes | FLASH_REQUIRED | converged but too|
*| | AE scan | | dark w/o flash |
*+--------------------+---------------+--------------------+------------------+
*| SEARCHING | AE/AWB_LOCK | LOCKED | values locked |
*| | on | | |
*+--------------------+---------------+--------------------+------------------+
*| CONVERGED | HAL initiates | SEARCHING | values locked |
*| | AE/AWB scan | | |
*+--------------------+---------------+--------------------+------------------+
*| CONVERGED | AE/AWB_LOCK | LOCKED | values locked |
*| | on | | |
*+--------------------+---------------+--------------------+------------------+
*| FLASH_REQUIRED | HAL initiates | SEARCHING | values locked |
*| | AE/AWB scan | | |
*+--------------------+---------------+--------------------+------------------+
*| FLASH_REQUIRED | AE/AWB_LOCK | LOCKED | values locked |
*| | on | | |
*+--------------------+---------------+--------------------+------------------+
*| LOCKED | AE/AWB_LOCK | SEARCHING | values not good |
*| | off | | after unlock |
*+--------------------+---------------+--------------------+------------------+
*| LOCKED | AE/AWB_LOCK | CONVERGED | values good |
*| | off | | after unlock |
*+--------------------+---------------+--------------------+------------------+
*| LOCKED | AE_LOCK | FLASH_REQUIRED | exposure good, |
*| | off | | but too dark |
*+--------------------+---------------+--------------------+------------------+
*| All AE states | PRECAPTURE_ | PRECAPTURE | Start precapture |
*| | START | | sequence |
*+--------------------+---------------+--------------------+------------------+
*| PRECAPTURE | Sequence done.| CONVERGED | Ready for high- |
*| | AE_LOCK off | | quality capture |
*+--------------------+---------------+--------------------+------------------+
*| PRECAPTURE | Sequence done.| LOCKED | Ready for high- |
*| | AE_LOCK on | | quality capture |
*+--------------------+---------------+--------------------+------------------+
*
*/
/**
* S5. Cropping:
*
* Cropping of the full pixel array (for digital zoom and other use cases where
* a smaller FOV is desirable) is communicated through the
* ANDROID_SCALER_CROP_REGION setting. This is a per-request setting, and can
* change on a per-request basis, which is critical for implementing smooth
* digital zoom.
*
* The region is defined as a rectangle (x, y, width, height), with (x, y)
* describing the top-left corner of the rectangle. The rectangle is defined on
* the coordinate system of the sensor active pixel array, with (0,0) being the
* top-left pixel of the active pixel array. Therefore, the width and height
* cannot be larger than the dimensions reported in the
* ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY static info field. The minimum allowed
* width and height are reported by the HAL through the
* ANDROID_SCALER_MAX_DIGITAL_ZOOM static info field, which describes the
* maximum supported zoom factor. Therefore, the minimum crop region width and
* height are:
*
* {width, height} =
* { floor(ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY[0] /
* ANDROID_SCALER_MAX_DIGITAL_ZOOM),
* floor(ANDROID_SENSOR_ACTIVE_PIXEL_ARRAY[1] /
* ANDROID_SCALER_MAX_DIGITAL_ZOOM) }
*
* If the crop region needs to fulfill specific requirements (for example, it
* needs to start on even coordinates, and its width/height needs to be even),
* the HAL must do the necessary rounding and write out the final crop region
* used in the output result metadata. Similarly, if the HAL implements video
* stabilization, it must adjust the result crop region to describe the region
* actually included in the output after video stabilization is applied. In
* general, a camera-using application must be able to determine the field of
* view it is receiving based on the crop region, the dimensions of the image
* sensor, and the lens focal length.
*
* It is assumed that the cropping is applied after raw to other color space
* conversion. Raw streams (RAW16 and RAW_OPAQUE) don't have this conversion stage,
* and are not croppable. Therefore, the crop region must be ignored by the HAL
* for raw streams.
*
* Since the crop region applies to all non-raw streams, which may have different aspect
* ratios than the crop region, the exact sensor region used for each stream may
* be smaller than the crop region. Specifically, each stream should maintain
* square pixels and its aspect ratio by minimally further cropping the defined
* crop region. If the stream's aspect ratio is wider than the crop region, the
* stream should be further cropped vertically, and if the stream's aspect ratio
* is narrower than the crop region, the stream should be further cropped
* horizontally.
*
* In all cases, the stream crop must be centered within the full crop region,
* and each stream is only either cropped horizontally or vertical relative to
* the full crop region, never both.
*
* For example, if two streams are defined, a 640x480 stream (4:3 aspect), and a
* 1280x720 stream (16:9 aspect), below demonstrates the expected output regions
* for each stream for a few sample crop regions, on a hypothetical 3 MP (2000 x
* 1500 pixel array) sensor.
*
* Crop region: (500, 375, 1000, 750) (4:3 aspect ratio)
*
* 640x480 stream crop: (500, 375, 1000, 750) (equal to crop region)
* 1280x720 stream crop: (500, 469, 1000, 562) (marked with =)
*
* 0 1000 2000
* +---------+---------+---------+----------+
* | Active pixel array |
* | |
* | |
* + +-------------------+ + 375
* | | | |
* | O===================O |
* | I 1280x720 stream I |
* + I I + 750
* | I I |
* | O===================O |
* | | | |
* + +-------------------+ + 1125
* | Crop region, 640x480 stream |
* | |
* | |
* +---------+---------+---------+----------+ 1500
*
* Crop region: (500, 375, 1333, 750) (16:9 aspect ratio)
*
* 640x480 stream crop: (666, 375, 1000, 750) (marked with =)
* 1280x720 stream crop: (500, 375, 1333, 750) (equal to crop region)
*
* 0 1000 2000
* +---------+---------+---------+----------+
* | Active pixel array |
* | |
* | |
* + +---O==================O---+ + 375
* | | I 640x480 stream I | |
* | | I I | |
* | | I I | |
* + | I I | + 750
* | | I I | |
* | | I I | |
* | | I I | |
* + +---O==================O---+ + 1125
* | Crop region, 1280x720 stream |
* | |
* | |
* +---------+---------+---------+----------+ 1500
*
* Crop region: (500, 375, 750, 750) (1:1 aspect ratio)
*
* 640x480 stream crop: (500, 469, 750, 562) (marked with =)
* 1280x720 stream crop: (500, 543, 750, 414) (marged with #)
*
* 0 1000 2000
* +---------+---------+---------+----------+
* | Active pixel array |
* | |
* | |
* + +--------------+ + 375
* | O==============O |
* | ################ |
* | # # |
* + # # + 750
* | # # |
* | ################ 1280x720 |
* | O==============O 640x480 |
* + +--------------+ + 1125
* | Crop region |
* | |
* | |
* +---------+---------+---------+----------+ 1500
*
* And a final example, a 1024x1024 square aspect ratio stream instead of the
* 480p stream:
*
* Crop region: (500, 375, 1000, 750) (4:3 aspect ratio)
*
* 1024x1024 stream crop: (625, 375, 750, 750) (marked with #)
* 1280x720 stream crop: (500, 469, 1000, 562) (marked with =)
*
* 0 1000 2000
* +---------+---------+---------+----------+
* | Active pixel array |
* | |
* | 1024x1024 stream |
* + +--###############--+ + 375
* | | # # | |
* | O===================O |
* | I 1280x720 stream I |
* + I I + 750
* | I I |
* | O===================O |
* | | # # | |
* + +--###############--+ + 1125
* | Crop region |
* | |
* | |
* +---------+---------+---------+----------+ 1500
*
*/
/**
* S6. Error management:
*
* Camera HAL device ops functions that have a return value will all return
* -ENODEV / NULL in case of a serious error. This means the device cannot
* continue operation, and must be closed by the framework. Once this error is
* returned by some method, or if notify() is called with ERROR_DEVICE, only
* the close() method can be called successfully. All other methods will return
* -ENODEV / NULL.
*
* If a device op is called in the wrong sequence, for example if the framework
* calls configure_streams() is called before initialize(), the device must
* return -ENOSYS from the call, and do nothing.
*
* Transient errors in image capture must be reported through notify() as follows:
*
* - The failure of an entire capture to occur must be reported by the HAL by
* calling notify() with ERROR_REQUEST. Individual errors for the result
* metadata or the output buffers must not be reported in this case.
*
* - If the metadata for a capture cannot be produced, but some image buffers
* were filled, the HAL must call notify() with ERROR_RESULT.
*
* - If an output image buffer could not be filled, but either the metadata was
* produced or some other buffers were filled, the HAL must call notify() with
* ERROR_BUFFER for each failed buffer.
*
* In each of these transient failure cases, the HAL must still call
* process_capture_result, with valid output and input (if an input buffer was
* submitted) buffer_handle_t. If the result metadata could not be produced, it
* should be NULL. If some buffers could not be filled, they must be returned with
* process_capture_result in the error state, their release fences must be set to
* the acquire fences passed by the framework, or -1 if they have been waited on by
* the HAL already.
*
* Invalid input arguments result in -EINVAL from the appropriate methods. In
* that case, the framework must act as if that call had never been made.
*
*/
/**
* S7. Key Performance Indicator (KPI) glossary:
*
* This includes some critical definitions that are used by KPI metrics.
*
* Pipeline Latency:
* For a given capture request, the duration from the framework calling
* process_capture_request to the HAL sending capture result and all buffers
* back by process_capture_result call. To make the Pipeline Latency measure
* independent of frame rate, it is measured by frame count.
*
* For example, when frame rate is 30 (fps), the frame duration (time interval
* between adjacent frame capture time) is 33 (ms).
* If it takes 5 frames for framework to get the result and buffers back for
* a given request, then the Pipeline Latency is 5 (frames), instead of
* 5 x 33 = 165 (ms).
*
* The Pipeline Latency is determined by android.request.pipelineDepth and
* android.request.pipelineMaxDepth, see their definitions for more details.
*
*/
/**
* S8. Sample Use Cases:
*
* This includes some typical use case examples the camera HAL may support.
*
* S8.1 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_BIDIRECTIONAL stream.
*
* For this use case, the bidirectional stream will be used by the framework as follows:
*
* 1. The framework includes a buffer from this stream as output buffer in a
* request as normal.
*
* 2. Once the HAL device returns a filled output buffer to the framework,
* the framework may do one of two things with the filled buffer:
*
* 2. a. The framework uses the filled data, and returns the now-used buffer
* to the stream queue for reuse. This behavior exactly matches the
* OUTPUT type of stream.
*
* 2. b. The framework wants to reprocess the filled data, and uses the
* buffer as an input buffer for a request. Once the HAL device has
* used the reprocessing buffer, it then returns it to the
* framework. The framework then returns the now-used buffer to the
* stream queue for reuse.
*
* 3. The HAL device will be given the buffer again as an output buffer for
* a request at some future point.
*
* For ZSL use case, the pixel format for bidirectional stream will be
* HAL_PIXEL_FORMAT_RAW_OPAQUE or HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED if it
* is listed in android.scaler.availableInputOutputFormatsMap. When
* HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, the gralloc
* usage flags for the consumer endpoint will be set to GRALLOC_USAGE_HW_CAMERA_ZSL.
* A configuration stream list that has BIDIRECTIONAL stream used as input, will
* usually also have a distinct OUTPUT stream to get the reprocessing data. For example,
* for the ZSL use case, the stream list might be configured with the following:
*
* - A HAL_PIXEL_FORMAT_RAW_OPAQUE bidirectional stream is used
* as input.
* - And a HAL_PIXEL_FORMAT_BLOB (JPEG) output stream.
*
* S8.2 ZSL (OPAQUE) reprocessing with CAMERA3_STREAM_INPUT stream.
*
* CAMERA_DEVICE_API_VERSION_3_3:
* When OPAQUE_REPROCESSING capability is supported by the camera device, the INPUT stream
* can be used for application/framework implemented use case like Zero Shutter Lag (ZSL).
* This kind of stream will be used by the framework as follows:
*
* 1. Application/framework configures an opaque (RAW or YUV based) format output stream that is
* used to produce the ZSL output buffers. The stream pixel format will be
* HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED.
*
* 2. Application/framework configures an opaque format input stream that is used to
* send the reprocessing ZSL buffers to the HAL. The stream pixel format will
* also be HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED.
*
* 3. Application/framework configures a YUV/JPEG output stream that is used to receive the
* reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB.
*
* 4. Application/framework picks a ZSL buffer from the ZSL output stream when a ZSL capture is
* issued by the application, and sends the data back as an input buffer in a
* reprocessing request, then sends to the HAL for reprocessing.
*
* 5. The HAL sends back the output YUV/JPEG result to framework.
*
* The HAL can select the actual opaque buffer format and configure the ISP pipeline
* appropriately based on the HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED format and
* the gralloc usage flag GRALLOC_USAGE_HW_CAMERA_ZSL.
* S8.3 YUV reprocessing with CAMERA3_STREAM_INPUT stream.
*
* When YUV reprocessing is supported by the HAL, the INPUT stream
* can be used for the YUV reprocessing use cases like lucky-shot and image fusion.
* This kind of stream will be used by the framework as follows:
*
* 1. Application/framework configures an YCbCr_420 format output stream that is
* used to produce the output buffers.
*
* 2. Application/framework configures an YCbCr_420 format input stream that is used to
* send the reprocessing YUV buffers to the HAL.
*
* 3. Application/framework configures a YUV/JPEG output stream that is used to receive the
* reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB.
*
* 4. Application/framework processes the output buffers (could be as simple as picking
* an output buffer directly) from the output stream when a capture is issued, and sends
* the data back as an input buffer in a reprocessing request, then sends to the HAL
* for reprocessing.
*
* 5. The HAL sends back the output YUV/JPEG result to framework.
*
*/
/**
* S9. Notes on Controls and Metadata
*
* This section contains notes about the interpretation and usage of various metadata tags.
*
* S9.1 HIGH_QUALITY and FAST modes.
*
* Many camera post-processing blocks may be listed as having HIGH_QUALITY,
* FAST, and OFF operating modes. These blocks will typically also have an
* 'available modes' tag representing which of these operating modes are
* available on a given device. The general policy regarding implementing
* these modes is as follows:
*
* 1. Operating mode controls of hardware blocks that cannot be disabled
* must not list OFF in their corresponding 'available modes' tags.
*
* 2. OFF will always be included in their corresponding 'available modes'
* tag if it is possible to disable that hardware block.
*
* 3. FAST must always be included in the 'available modes' tags for all
* post-processing blocks supported on the device. If a post-processing
* block also has a slower and higher quality operating mode that does
* not meet the framerate requirements for FAST mode, HIGH_QUALITY should
* be included in the 'available modes' tag to represent this operating
* mode.
*/
/**
* S10. Reprocessing flow and controls
*
* This section describes the OPAQUE and YUV reprocessing flow and controls. OPAQUE reprocessing
* uses an opaque format that is not directly application-visible, and the application can
* only select some of the output buffers and send back to HAL for reprocessing, while YUV
* reprocessing gives the application opportunity to process the buffers before reprocessing.
*
* S8 gives the stream configurations for the typical reprocessing uses cases,
* this section specifies the buffer flow and controls in more details.
*
* S10.1 OPAQUE (typically for ZSL use case) reprocessing flow and controls
*
* For OPAQUE reprocessing (e.g. ZSL) use case, after the application creates the specific
* output and input streams, runtime buffer flow and controls are specified as below:
*
* 1. Application starts output streaming by sending repeating requests for output
* opaque buffers and preview. The buffers are held by an application
* maintained circular buffer. The requests are based on CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG
* capture template, which should have all necessary settings that guarantee output
* frame rate is not slowed down relative to sensor output frame rate.
*
* 2. When a capture is issued, the application selects one output buffer based
* on application buffer selection logic, e.g. good AE and AF statistics etc.
* Application then creates an reprocess request based on the capture result associated
* with this selected buffer. The selected output buffer is now added to this reprocess
* request as an input buffer, the output buffer of this reprocess request should be
* either JPEG output buffer or YUV output buffer, or both, depending on the application
* choice.
*
* 3. Application then alters the reprocess settings to get best image quality. The HAL must
* support and only support below controls if the HAL support OPAQUE_REPROCESSING capability:
* - android.jpeg.* (if JPEG buffer is included as one of the output)
* - android.noiseReduction.mode (change to HIGH_QUALITY if it is supported)
* - android.edge.mode (change to HIGH_QUALITY if it is supported)
* All other controls must be ignored by the HAL.
* 4. HAL processed the input buffer and return the output buffers in the capture results
* as normal.
*
* S10.2 YUV reprocessing flow and controls
*
* The YUV reprocessing buffer flow is similar as OPAQUE reprocessing, with below difference:
*
* 1. Application may want to have finer granularity control of the intermediate YUV images
* (before reprocessing). For example, application may choose
* - android.noiseReduction.mode == MINIMAL
* to make sure the no YUV domain noise reduction has applied to the output YUV buffers,
* then it can do its own advanced noise reduction on them. For OPAQUE reprocessing case, this
* doesn't matter, as long as the final reprocessed image has the best quality.
* 2. Application may modify the YUV output buffer data. For example, for image fusion use
* case, where multiple output images are merged together to improve the signal-to-noise
* ratio (SNR). The input buffer may be generated from multiple buffers by the application.
* To avoid excessive amount of noise reduction and insufficient amount of edge enhancement
* being applied to the input buffer, the application can hint the HAL how much effective
* exposure time improvement has been done by the application, then the HAL can adjust the
* noise reduction and edge enhancement paramters to get best reprocessed image quality.
* Below tag can be used for this purpose:
* - android.reprocess.effectiveExposureFactor
* The value would be exposure time increase factor applied to the original output image,
* for example, if there are N image merged, the exposure time increase factor would be up
* to sqrt(N). See this tag spec for more details.
*
* S10.3 Reprocessing pipeline characteristics
*
* Reprocessing pipeline has below different characteristics comparing with normal output
* pipeline:
*
* 1. The reprocessing result can be returned ahead of the pending normal output results. But
* the FIFO ordering must be maintained for all reprocessing results. For example, there are
* below requests (A stands for output requests, B stands for reprocessing requests)
* being processed by the HAL:
* A1, A2, A3, A4, B1, A5, B2, A6...
* result of B1 can be returned before A1-A4, but result of B2 must be returned after B1.
* 2. Single input rule: For a given reprocessing request, all output buffers must be from the
* input buffer, rather than sensor output. For example, if a reprocess request include both
* JPEG and preview buffers, all output buffers must be produced from the input buffer
* included by the reprocessing request, rather than sensor. The HAL must not output preview
* buffers from sensor, while output JPEG buffer from the input buffer.
* 3. Input buffer will be from camera output directly (ZSL case) or indirectly(image fusion
* case). For the case where buffer is modified, the size will remain same. The HAL can
* notify CAMERA3_MSG_ERROR_REQUEST if buffer from unknown source is sent.
* 4. Result as reprocessing request: The HAL can expect that a reprocessing request is a copy
* of one of the output results with minor allowed setting changes. The HAL can notify
* CAMERA3_MSG_ERROR_REQUEST if a request from unknown source is issued.
* 5. Output buffers may not be used as inputs across the configure stream boundary, This is
* because an opaque stream like the ZSL output stream may have different actual image size
* inside of the ZSL buffer to save power and bandwidth for smaller resolution JPEG capture.
* The HAL may notify CAMERA3_MSG_ERROR_REQUEST if this case occurs.
* 6. HAL Reprocess requests error reporting during flush should follow the same rule specified
* by flush() method.
*
*/
__BEGIN_DECLS
struct camera3_device;
/**********************************************************************
*
* Camera3 stream and stream buffer definitions.
*
* These structs and enums define the handles and contents of the input and
* output streams connecting the HAL to various framework and application buffer
* consumers. Each stream is backed by a gralloc buffer queue.
*
*/
/**
* camera3_stream_type_t:
*
* The type of the camera stream, which defines whether the camera HAL device is
* the producer or the consumer for that stream, and how the buffers of the
* stream relate to the other streams.
*/
typedef enum camera3_stream_type {
/**
* This stream is an output stream; the camera HAL device will be
* responsible for filling buffers from this stream with newly captured or
* reprocessed image data.
*/
CAMERA3_STREAM_OUTPUT = 0,
/**
* This stream is an input stream; the camera HAL device will be responsible
* for reading buffers from this stream and sending them through the camera
* processing pipeline, as if the buffer was a newly captured image from the
* imager.
*
* The pixel format for input stream can be any format reported by
* android.scaler.availableInputOutputFormatsMap. The pixel format of the
* output stream that is used to produce the reprocessing data may be any
* format reported by android.scaler.availableStreamConfigurations. The
* supported input/output stream combinations depends the camera device
* capabilities, see android.scaler.availableInputOutputFormatsMap for
* stream map details.
*
* This kind of stream is generally used to reprocess data into higher
* quality images (that otherwise would cause a frame rate performance
* loss), or to do off-line reprocessing.
*
* CAMERA_DEVICE_API_VERSION_3_3:
* The typical use cases are OPAQUE (typically ZSL) and YUV reprocessing,
* see S8.2, S8.3 and S10 for more details.
*/
CAMERA3_STREAM_INPUT = 1,
/**
* This stream can be used for input and output. Typically, the stream is
* used as an output stream, but occasionally one already-filled buffer may
* be sent back to the HAL device for reprocessing.
*
* This kind of stream is meant generally for Zero Shutter Lag (ZSL)
* features, where copying the captured image from the output buffer to the
* reprocessing input buffer would be expensive. See S8.1 for more details.
*
* Note that the HAL will always be reprocessing data it produced.
*
*/
CAMERA3_STREAM_BIDIRECTIONAL = 2,
/**
* Total number of framework-defined stream types
*/
CAMERA3_NUM_STREAM_TYPES
} camera3_stream_type_t;
/**
* camera3_stream_rotation_t:
*
* The required counterclockwise rotation of camera stream.
*/
typedef enum camera3_stream_rotation {
/* No rotation */
CAMERA3_STREAM_ROTATION_0 = 0,
/* Rotate by 90 degree counterclockwise */
CAMERA3_STREAM_ROTATION_90 = 1,
/* Rotate by 180 degree counterclockwise */
CAMERA3_STREAM_ROTATION_180 = 2,
/* Rotate by 270 degree counterclockwise */
CAMERA3_STREAM_ROTATION_270 = 3
} camera3_stream_rotation_t;
/**
* camera3_stream_configuration_mode_t:
*
* This defines the general operation mode for the HAL (for a given stream configuration), where
* modes besides NORMAL have different semantics, and usually limit the generality of the API in
* exchange for higher performance in some particular area.
*/
typedef enum camera3_stream_configuration_mode {
/**
* Normal stream configuration operation mode. This is the default camera operation mode,
* where all semantics of HAL APIs and metadata controls apply.
*/
CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE = 0,
/**
* Special constrained high speed operation mode for devices that can not support high
* speed output in NORMAL mode. All streams in this configuration are operating at high speed
* mode and have different characteristics and limitations to achieve high speed output.
* The NORMAL mode can still be used for high speed output if the HAL can support high speed
* output while satisfying all the semantics of HAL APIs and metadata controls. It is
* recommended for the HAL to support high speed output in NORMAL mode (by advertising the high
* speed FPS ranges in android.control.aeAvailableTargetFpsRanges) if possible.
*
* This mode has below limitations/requirements:
*
* 1. The HAL must support up to 2 streams with sizes reported by
* android.control.availableHighSpeedVideoConfigurations.
* 2. In this mode, the HAL is expected to output up to 120fps or higher. This mode must
* support the targeted FPS range and size configurations reported by
* android.control.availableHighSpeedVideoConfigurations.
* 3. The HAL must support HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED output stream format.
* 4. To achieve efficient high speed streaming, the HAL may have to aggregate
* multiple frames together and send to camera device for processing where the request
* controls are same for all the frames in this batch (batch mode). The HAL must support
* max batch size and the max batch size requirements defined by
* android.control.availableHighSpeedVideoConfigurations.
* 5. In this mode, the HAL must override aeMode, awbMode, and afMode to ON, ON, and
* CONTINUOUS_VIDEO, respectively. All post-processing block mode controls must be
* overridden to be FAST. Therefore, no manual control of capture and post-processing
* parameters is possible. All other controls operate the same as when
* android.control.mode == AUTO. This means that all other android.control.* fields
* must continue to work, such as
*
* android.control.aeTargetFpsRange
* android.control.aeExposureCompensation
* android.control.aeLock
* android.control.awbLock
* android.control.effectMode
* android.control.aeRegions
* android.control.afRegions
* android.control.awbRegions
* android.control.afTrigger
* android.control.aePrecaptureTrigger
*
* Outside of android.control.*, the following controls must work:
*
* android.flash.mode (TORCH mode only, automatic flash for still capture will not work
* since aeMode is ON)
* android.lens.opticalStabilizationMode (if it is supported)
* android.scaler.cropRegion
* android.statistics.faceDetectMode (if it is supported)
* 6. To reduce the amount of data passed across process boundaries at
* high frame rate, within one batch, camera framework only propagates
* the last shutter notify and the last capture results (including partial
* results and final result) to the app. The shutter notifies and capture
* results for the other requests in the batch are derived by
* the camera framework. As a result, the HAL can return empty metadata
* except for the last result in the batch.
*
* For more details about high speed stream requirements, see
* android.control.availableHighSpeedVideoConfigurations and CONSTRAINED_HIGH_SPEED_VIDEO
* capability defined in android.request.availableCapabilities.
*
* This mode only needs to be supported by HALs that include CONSTRAINED_HIGH_SPEED_VIDEO in
* the android.request.availableCapabilities static metadata.
*/
CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE = 1,
/**
* First value for vendor-defined stream configuration modes.
*/
CAMERA3_VENDOR_STREAM_CONFIGURATION_MODE_START = 0x8000
} camera3_stream_configuration_mode_t;
/**
* camera3_stream_t:
*
* A handle to a single camera input or output stream. A stream is defined by
* the framework by its buffer resolution and format, and additionally by the
* HAL with the gralloc usage flags and the maximum in-flight buffer count.
*
* The stream structures are owned by the framework, but pointers to a
* camera3_stream passed into the HAL by configure_streams() are valid until the
* end of the first subsequent configure_streams() call that _does not_ include
* that camera3_stream as an argument, or until the end of the close() call.
*
* All camera3_stream framework-controlled members are immutable once the
* camera3_stream is passed into configure_streams(). The HAL may only change
* the HAL-controlled parameters during a configure_streams() call, except for
* the contents of the private pointer.
*
* If a configure_streams() call returns a non-fatal error, all active streams
* remain valid as if configure_streams() had not been called.
*
* The endpoint of the stream is not visible to the camera HAL device.
* In DEVICE_API_VERSION_3_1, this was changed to share consumer usage flags
* on streams where the camera is a producer (OUTPUT and BIDIRECTIONAL stream
* types) see the usage field below.
*/
typedef struct camera3_stream {
/*****
* Set by framework before configure_streams()
*/
/**
* The type of the stream, one of the camera3_stream_type_t values.
*/
int stream_type;
/**
* The width in pixels of the buffers in this stream
*/
uint32_t width;
/**
* The height in pixels of the buffers in this stream
*/
uint32_t height;
/**
* The pixel format for the buffers in this stream. Format is a value from
* the HAL_PIXEL_FORMAT_* list in system/core/include/system/graphics.h, or
* from device-specific headers.
*
* If HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED is used, then the platform
* gralloc module will select a format based on the usage flags provided by
* the camera device and the other endpoint of the stream.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* The camera HAL device must inspect the buffers handed to it in the
* subsequent register_stream_buffers() call to obtain the
* implementation-specific format details, if necessary.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* register_stream_buffers() won't be called by the framework, so the HAL
* should configure the ISP and sensor pipeline based purely on the sizes,
* usage flags, and formats for the configured streams.
*/
int format;
/*****
* Set by HAL during configure_streams().
*/
/**
* The gralloc usage flags for this stream, as needed by the HAL. The usage
* flags are defined in gralloc.h (GRALLOC_USAGE_*), or in device-specific
* headers.
*
* For output streams, these are the HAL's producer usage flags. For input
* streams, these are the HAL's consumer usage flags. The usage flags from
* the producer and the consumer will be combined together and then passed
* to the platform gralloc HAL module for allocating the gralloc buffers for
* each stream.
*
* Version information:
*
* == CAMERA_DEVICE_API_VERSION_3_0:
*
* No initial value guaranteed when passed via configure_streams().
* HAL may not use this field as input, and must write over this field
* with its usage flags.
*
* >= CAMERA_DEVICE_API_VERSION_3_1:
*
* For stream_type OUTPUT and BIDIRECTIONAL, when passed via
* configure_streams(), the initial value of this is the consumer's
* usage flags. The HAL may use these consumer flags to decide stream
* configuration.
* For stream_type INPUT, when passed via configure_streams(), the initial
* value of this is 0.
* For all streams passed via configure_streams(), the HAL must write
* over this field with its usage flags.
*
* From Android O, the usage flag for an output stream may be bitwise
* combination of usage flags for multiple consumers, for the purpose of
* sharing one camera stream between those consumers. The HAL must fail
* configure_streams call with -EINVAL if the combined flags cannot be
* supported due to imcompatible buffer format, dataSpace, or other hardware
* limitations.
*/
uint32_t usage;
/**
* The maximum number of buffers the HAL device may need to have dequeued at
* the same time. The HAL device may not have more buffers in-flight from
* this stream than this value.
*/
uint32_t max_buffers;
/**
* A handle to HAL-private information for the stream. Will not be inspected
* by the framework code.
*/
void *priv;
/**
* A field that describes the contents of the buffer. The format and buffer
* dimensions define the memory layout and structure of the stream buffers,
* while dataSpace defines the meaning of the data within the buffer.
*
* For most formats, dataSpace defines the color space of the image data.
* In addition, for some formats, dataSpace indicates whether image- or
* depth-based data is requested. See system/core/include/system/graphics.h
* for details of formats and valid dataSpace values for each format.
*
* Version information:
*
* < CAMERA_DEVICE_API_VERSION_3_3:
*
* Not defined and should not be accessed. dataSpace should be assumed to
* be HAL_DATASPACE_UNKNOWN, and the appropriate color space, etc, should
* be determined from the usage flags and the format.
*
* = CAMERA_DEVICE_API_VERSION_3_3:
*
* Always set by the camera service. HAL must use this dataSpace to
* configure the stream to the correct colorspace, or to select between
* color and depth outputs if supported. The dataspace values are the
* legacy definitions in graphics.h
*
* >= CAMERA_DEVICE_API_VERSION_3_4:
*
* Always set by the camera service. HAL must use this dataSpace to
* configure the stream to the correct colorspace, or to select between
* color and depth outputs if supported. The dataspace values are set
* using the V0 dataspace definitions in graphics.h
*/
android_dataspace_t data_space;
/**
* The required output rotation of the stream, one of
* the camera3_stream_rotation_t values. This must be inspected by HAL along
* with stream width and height. For example, if the rotation is 90 degree
* and the stream width and height is 720 and 1280 respectively, camera service
* will supply buffers of size 720x1280, and HAL should capture a 1280x720 image
* and rotate the image by 90 degree counterclockwise. The rotation field is
* no-op when the stream type is input. Camera HAL must ignore the rotation
* field for an input stream.
*
* <= CAMERA_DEVICE_API_VERSION_3_2:
*
* Not defined and must not be accessed. HAL must not apply any rotation
* on output images.
*
* >= CAMERA_DEVICE_API_VERSION_3_3:
*
* Always set by camera service. HAL must inspect this field during stream
* configuration and returns -EINVAL if HAL cannot perform such rotation.
* HAL must always support CAMERA3_STREAM_ROTATION_0, so a
* configure_streams() call must not fail for unsupported rotation if
* rotation field of all streams is CAMERA3_STREAM_ROTATION_0.
*
*/
int rotation;
/**
* The physical camera id this stream belongs to.
*
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERISON_3_5:
*
* Always set by camera service. If the camera device is not a logical
* multi camera, or if the camera is a logical multi camera but the stream
* is not a physical output stream, this field will point to a 0-length
* string.
*
* A logical multi camera is a camera device backed by multiple physical
* cameras that are also exposed to the application. And for a logical
* multi camera, a physical output stream is an output stream specifically
* requested on an underlying physical camera.
*
* For an input stream, this field is guaranteed to be a 0-length string.
*/
const char* physical_camera_id;
/**
* This should be one of the camera3_stream_rotation_t values except for
* CAMERA3_STREAM_ROTATION_180.
* When setting to CAMERA3_STREAM_ROTATION_90 or CAMERA3_STREAM_ROTATION_270, HAL would crop,
* rotate the frame by the specified degrees clockwise and scale it up to original size.
* In Chrome OS, it's possible to have a portrait activity run in a landscape screen with
* landscape-mounted camera. The activity would show stretched or rotated preview because it
* does not expect to receive landscape preview frames. To solve this problem, we ask HAL to
* crop, rotate and scale the frames and modify CameraCharacteristics.SENSOR_ORIENTATION
* accordingly to imitate a portrait camera.
* Setting it to CAMERA3_STREAM_ROTATION_0 means no crop-rotate-scale would be performed.
* |cros_rotate_scale_degrees| in all camera3_stream_t of a configure_streams() call must be
* identical. The HAL should return -EINVAL if the degrees are not the same for all the streams.
*/
int crop_rotate_scale_degrees;
/* reserved for future use */
void *reserved[5];
} camera3_stream_t;
/**
* camera3_stream_configuration_t:
*
* A structure of stream definitions, used by configure_streams(). This
* structure defines all the output streams and the reprocessing input
* stream for the current camera use case.
*/
typedef struct camera3_stream_configuration {
/**
* The total number of streams requested by the framework. This includes
* both input and output streams. The number of streams will be at least 1,
* and there will be at least one output-capable stream.
*/
uint32_t num_streams;
/**
* An array of camera stream pointers, defining the input/output
* configuration for the camera HAL device.
*
* At most one input-capable stream may be defined (INPUT or BIDIRECTIONAL)
* in a single configuration.
*
* At least one output-capable stream must be defined (OUTPUT or
* BIDIRECTIONAL).
*/
camera3_stream_t **streams;
/**
* >= CAMERA_DEVICE_API_VERSION_3_3:
*
* The operation mode of streams in this configuration, one of the value
* defined in camera3_stream_configuration_mode_t. The HAL can use this
* mode as an indicator to set the stream property (e.g.,
* camera3_stream->max_buffers) appropriately. For example, if the
* configuration is
* CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE, the HAL may
* want to set aside more buffers for batch mode operation (see
* android.control.availableHighSpeedVideoConfigurations for batch mode
* definition).
*
*/
uint32_t operation_mode;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* The session metadata buffer contains the initial values of
* ANDROID_REQUEST_AVAILABLE_SESSION_KEYS. This field is optional
* and camera clients can choose to ignore it, in which case it will
* be set to NULL. If parameters are present, then Hal should examine
* the parameter values and configure its internal camera pipeline
* accordingly.
*/
const camera_metadata_t *session_parameters;
} camera3_stream_configuration_t;
/**
* camera3_buffer_status_t:
*
* The current status of a single stream buffer.
*/
typedef enum camera3_buffer_status {
/**
* The buffer is in a normal state, and can be used after waiting on its
* sync fence.
*/
CAMERA3_BUFFER_STATUS_OK = 0,
/**
* The buffer does not contain valid data, and the data in it should not be
* used. The sync fence must still be waited on before reusing the buffer.
*/
CAMERA3_BUFFER_STATUS_ERROR = 1
} camera3_buffer_status_t;
/**
* camera3_stream_buffer_t:
*
* A single buffer from a camera3 stream. It includes a handle to its parent
* stream, the handle to the gralloc buffer itself, and sync fences
*
* The buffer does not specify whether it is to be used for input or output;
* that is determined by its parent stream type and how the buffer is passed to
* the HAL device.
*/
typedef struct camera3_stream_buffer {
/**
* The handle of the stream this buffer is associated with
*/
camera3_stream_t *stream;
/**
* The native handle to the buffer
*/
buffer_handle_t *buffer;
/**
* Current state of the buffer, one of the camera3_buffer_status_t
* values. The framework will not pass buffers to the HAL that are in an
* error state. In case a buffer could not be filled by the HAL, it must
* have its status set to CAMERA3_BUFFER_STATUS_ERROR when returned to the
* framework with process_capture_result().
*/
int status;
/**
* The acquire sync fence for this buffer. The HAL must wait on this fence
* fd before attempting to read from or write to this buffer.
*
* The framework may be set to -1 to indicate that no waiting is necessary
* for this buffer.
*
* When the HAL returns an output buffer to the framework with
* process_capture_result(), the acquire_fence must be set to -1. If the HAL
* never waits on the acquire_fence due to an error in filling a buffer,
* when calling process_capture_result() the HAL must set the release_fence
* of the buffer to be the acquire_fence passed to it by the framework. This
* will allow the framework to wait on the fence before reusing the buffer.
*
* For input buffers, the HAL must not change the acquire_fence field during
* the process_capture_request() call.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* When the HAL returns an input buffer to the framework with
* process_capture_result(), the acquire_fence must be set to -1. If the HAL
* never waits on input buffer acquire fence due to an error, the sync
* fences should be handled similarly to the way they are handled for output
* buffers.
*/
int acquire_fence;
/**
* The release sync fence for this buffer. The HAL must set this fence when
* returning buffers to the framework, or write -1 to indicate that no
* waiting is required for this buffer.
*
* For the output buffers, the fences must be set in the output_buffers
* array passed to process_capture_result().
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* For the input buffer, the release fence must be set by the
* process_capture_request() call.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* For the input buffer, the fences must be set in the input_buffer
* passed to process_capture_result().
*
* After signaling the release_fence for this buffer, the HAL
* should not make any further attempts to access this buffer as the
* ownership has been fully transferred back to the framework.
*
* If a fence of -1 was specified then the ownership of this buffer
* is transferred back immediately upon the call of process_capture_result.
*/
int release_fence;
} camera3_stream_buffer_t;
/**
* camera3_stream_buffer_set_t:
*
* The complete set of gralloc buffers for a stream. This structure is given to
* register_stream_buffers() to allow the camera HAL device to register/map/etc
* newly allocated stream buffers.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Deprecated (and not used). In particular,
* register_stream_buffers is also deprecated and will never be invoked.
*
*/
typedef struct camera3_stream_buffer_set {
/**
* The stream handle for the stream these buffers belong to
*/
camera3_stream_t *stream;
/**
* The number of buffers in this stream. It is guaranteed to be at least
* stream->max_buffers.
*/
uint32_t num_buffers;
/**
* The array of gralloc buffer handles for this stream. If the stream format
* is set to HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, the camera HAL device
* should inspect the passed-in buffers to determine any platform-private
* pixel format information.
*/
buffer_handle_t **buffers;
} camera3_stream_buffer_set_t;
/**
* camera3_jpeg_blob:
*
* Transport header for compressed JPEG buffers in output streams.
*
* To capture JPEG images, a stream is created using the pixel format
* HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the
* framework, based on the static metadata field android.jpeg.maxSize. Since
* compressed JPEG images are of variable size, the HAL needs to include the
* final size of the compressed image using this structure inside the output
* stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID.
*
* Transport header should be at the end of the JPEG output stream buffer. That
* means the jpeg_blob_id must start at byte[buffer_size -
* sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer.
* Any HAL using this transport header must account for it in android.jpeg.maxSize
* The JPEG data itself starts at the beginning of the buffer and should be
* jpeg_size bytes long.
*/
typedef struct camera3_jpeg_blob {
uint16_t jpeg_blob_id;
uint32_t jpeg_size;
} camera3_jpeg_blob_t;
enum {
CAMERA3_JPEG_BLOB_ID = 0x00FF
};
/**********************************************************************
*
* Message definitions for the HAL notify() callback.
*
* These definitions are used for the HAL notify callback, to signal
* asynchronous events from the HAL device to the Android framework.
*
*/
/**
* camera3_msg_type:
*
* Indicates the type of message sent, which specifies which member of the
* message union is valid.
*
*/
typedef enum camera3_msg_type {
/**
* An error has occurred. camera3_notify_msg.message.error contains the
* error information.
*/
CAMERA3_MSG_ERROR = 1,
/**
* The exposure of a given request or processing a reprocess request has
* begun. camera3_notify_msg.message.shutter contains the information
* the capture.
*/
CAMERA3_MSG_SHUTTER = 2,
/**
* Number of framework message types
*/
CAMERA3_NUM_MESSAGES
} camera3_msg_type_t;
/**
* Defined error codes for CAMERA_MSG_ERROR
*/
typedef enum camera3_error_msg_code {
/**
* A serious failure occured. No further frames or buffer streams will
* be produced by the device. Device should be treated as closed. The
* client must reopen the device to use it again. The frame_number field
* is unused.
*/
CAMERA3_MSG_ERROR_DEVICE = 1,
/**
* An error has occurred in processing a request. No output (metadata or
* buffers) will be produced for this request. The frame_number field
* specifies which request has been dropped. Subsequent requests are
* unaffected, and the device remains operational.
*/
CAMERA3_MSG_ERROR_REQUEST = 2,
/**
* An error has occurred in producing an output result metadata buffer
* for a request, but output stream buffers for it will still be
* available. Subsequent requests are unaffected, and the device remains
* operational. The frame_number field specifies the request for which
* result metadata won't be available.
*/
CAMERA3_MSG_ERROR_RESULT = 3,
/**
* An error has occurred in placing an output buffer into a stream for a
* request. The frame metadata and other buffers may still be
* available. Subsequent requests are unaffected, and the device remains
* operational. The frame_number field specifies the request for which the
* buffer was dropped, and error_stream contains a pointer to the stream
* that dropped the frame.
*/
CAMERA3_MSG_ERROR_BUFFER = 4,
/**
* Number of error types
*/
CAMERA3_MSG_NUM_ERRORS
} camera3_error_msg_code_t;
/**
* camera3_error_msg_t:
*
* Message contents for CAMERA3_MSG_ERROR
*/
typedef struct camera3_error_msg {
/**
* Frame number of the request the error applies to. 0 if the frame number
* isn't applicable to the error.
*/
uint32_t frame_number;
/**
* Pointer to the stream that had a failure. NULL if the stream isn't
* applicable to the error.
*/
camera3_stream_t *error_stream;
/**
* The code for this error; one of the CAMERA_MSG_ERROR enum values.
*/
int error_code;
} camera3_error_msg_t;
/**
* camera3_shutter_msg_t:
*
* Message contents for CAMERA3_MSG_SHUTTER
*/
typedef struct camera3_shutter_msg {
/**
* Frame number of the request that has begun exposure or reprocessing.
*/
uint32_t frame_number;
/**
* Timestamp for the start of capture. For a reprocess request, this must
* be input image's start of capture. This must match the capture result
* metadata's sensor exposure start timestamp.
*/
uint64_t timestamp;
} camera3_shutter_msg_t;
/**
* camera3_notify_msg_t:
*
* The message structure sent to camera3_callback_ops_t.notify()
*/
typedef struct camera3_notify_msg {
/**
* The message type. One of camera3_notify_msg_type, or a private extension.
*/
int type;
union {
/**
* Error message contents. Valid if type is CAMERA3_MSG_ERROR
*/
camera3_error_msg_t error;
/**
* Shutter message contents. Valid if type is CAMERA3_MSG_SHUTTER
*/
camera3_shutter_msg_t shutter;
/**
* Generic message contents. Used to ensure a minimum size for custom
* message types.
*/
uint8_t generic[32];
} message;
} camera3_notify_msg_t;
/**********************************************************************
*
* Capture request/result definitions for the HAL process_capture_request()
* method, and the process_capture_result() callback.
*
*/
/**
* camera3_request_template_t:
*
* Available template types for
* camera3_device_ops.construct_default_request_settings()
*/
typedef enum camera3_request_template {
/**
* Standard camera preview operation with 3A on auto.
*/
CAMERA3_TEMPLATE_PREVIEW = 1,
/**
* Standard camera high-quality still capture with 3A and flash on auto.
*/
CAMERA3_TEMPLATE_STILL_CAPTURE = 2,
/**
* Standard video recording plus preview with 3A on auto, torch off.
*/
CAMERA3_TEMPLATE_VIDEO_RECORD = 3,
/**
* High-quality still capture while recording video. Application will
* include preview, video record, and full-resolution YUV or JPEG streams in
* request. Must not cause stuttering on video stream. 3A on auto.
*/
CAMERA3_TEMPLATE_VIDEO_SNAPSHOT = 4,
/**
* Zero-shutter-lag mode. Application will request preview and
* full-resolution data for each frame, and reprocess it to JPEG when a
* still image is requested by user. Settings should provide highest-quality
* full-resolution images without compromising preview frame rate. 3A on
* auto.
*/
CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG = 5,
/**
* A basic template for direct application control of capture
* parameters. All automatic control is disabled (auto-exposure, auto-white
* balance, auto-focus), and post-processing parameters are set to preview
* quality. The manual capture parameters (exposure, sensitivity, etc.)
* are set to reasonable defaults, but should be overridden by the
* application depending on the intended use case.
*/
CAMERA3_TEMPLATE_MANUAL = 6,
/* Total number of templates */
CAMERA3_TEMPLATE_COUNT,
/**
* First value for vendor-defined request templates
*/
CAMERA3_VENDOR_TEMPLATE_START = 0x40000000
} camera3_request_template_t;
/**
* camera3_capture_request_t:
*
* A single request for image capture/buffer reprocessing, sent to the Camera
* HAL device by the framework in process_capture_request().
*
* The request contains the settings to be used for this capture, and the set of
* output buffers to write the resulting image data in. It may optionally
* contain an input buffer, in which case the request is for reprocessing that
* input buffer instead of capturing a new image with the camera sensor. The
* capture is identified by the frame_number.
*
* In response, the camera HAL device must send a camera3_capture_result
* structure asynchronously to the framework, using the process_capture_result()
* callback.
*/
typedef struct camera3_capture_request {
/**
* The frame number is an incrementing integer set by the framework to
* uniquely identify this capture. It needs to be returned in the result
* call, and is also used to identify the request in asynchronous
* notifications sent to camera3_callback_ops_t.notify().
*/
uint32_t frame_number;
/**
* The settings buffer contains the capture and processing parameters for
* the request. As a special case, a NULL settings buffer indicates that the
* settings are identical to the most-recently submitted capture request. A
* NULL buffer cannot be used as the first submitted request after a
* configure_streams() call.
*/
const camera_metadata_t *settings;
/**
* The input stream buffer to use for this request, if any.
*
* If input_buffer is NULL, then the request is for a new capture from the
* imager. If input_buffer is valid, the request is for reprocessing the
* image contained in input_buffer.
*
* In the latter case, the HAL must set the release_fence of the
* input_buffer to a valid sync fence, or to -1 if the HAL does not support
* sync, before process_capture_request() returns.
*
* The HAL is required to wait on the acquire sync fence of the input buffer
* before accessing it.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* Any input buffer included here will have been registered with the HAL
* through register_stream_buffers() before its inclusion in a request.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The buffers will not have been pre-registered with the HAL.
* Subsequent requests may reuse buffers, or provide entirely new buffers.
*/
camera3_stream_buffer_t *input_buffer;
/**
* The number of output buffers for this capture request. Must be at least
* 1.
*/
uint32_t num_output_buffers;
/**
* An array of num_output_buffers stream buffers, to be filled with image
* data from this capture/reprocess. The HAL must wait on the acquire fences
* of each stream buffer before writing to them.
*
* The HAL takes ownership of the actual buffer_handle_t entries in
* output_buffers; the framework does not access them until they are
* returned in a camera3_capture_result_t.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* All the buffers included here will have been registered with the HAL
* through register_stream_buffers() before their inclusion in a request.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Any or all of the buffers included here may be brand new in this
* request (having never before seen by the HAL).
*/
const camera3_stream_buffer_t *output_buffers;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The number of physical camera settings to be applied. If 'num_physcam_settings'
* equals 0 or a physical device is not included, then Hal must decide the
* specific physical device settings based on the default 'settings'.
*/
uint32_t num_physcam_settings;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The physical camera ids. The array will contain 'num_physcam_settings'
* camera id strings for all physical devices that have specific settings.
* In case some id is invalid, the process capture request must fail and return
* -EINVAL.
*/
const char **physcam_id;
/**
* <= CAMERA_DEVICE_API_VERISON_3_4:
*
* Not defined and must not be accessed.
*
* >= CAMERA_DEVICE_API_VERSION_3_5:
* The capture settings for the physical cameras. The array will contain
* 'num_physcam_settings' settings for invididual physical devices. In
* case the settings at some particular index are empty, the process capture
* request must fail and return -EINVAL.
*/
const camera_metadata_t **physcam_settings;
} camera3_capture_request_t;
/**
* camera3_capture_result_t:
*
* The result of a single capture/reprocess by the camera HAL device. This is
* sent to the framework asynchronously with process_capture_result(), in
* response to a single capture request sent to the HAL with
* process_capture_request(). Multiple process_capture_result() calls may be
* performed by the HAL for each request.
*
* Each call, all with the same frame
* number, may contain some subset of the output buffers, and/or the result
* metadata. The metadata may only be provided once for a given frame number;
* all other calls must set the result metadata to NULL.
*
* The result structure contains the output metadata from this capture, and the
* set of output buffers that have been/will be filled for this capture. Each
* output buffer may come with a release sync fence that the framework will wait
* on before reading, in case the buffer has not yet been filled by the HAL.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The metadata may be provided multiple times for a single frame number. The
* framework will accumulate together the final result set by combining each
* partial result together into the total result set.
*
* If an input buffer is given in a request, the HAL must return it in one of
* the process_capture_result calls, and the call may be to just return the input
* buffer, without metadata and output buffers; the sync fences must be handled
* the same way they are done for output buffers.
*
*
* Performance considerations:
*
* Applications will also receive these partial results immediately, so sending
* partial results is a highly recommended performance optimization to avoid
* the total pipeline latency before sending the results for what is known very
* early on in the pipeline.
*
* A typical use case might be calculating the AF state halfway through the
* pipeline; by sending the state back to the framework immediately, we get a
* 50% performance increase and perceived responsiveness of the auto-focus.
*
*/
typedef struct camera3_capture_result {
/**
* The frame number is an incrementing integer set by the framework in the
* submitted request to uniquely identify this capture. It is also used to
* identify the request in asynchronous notifications sent to
* camera3_callback_ops_t.notify().
*/
uint32_t frame_number;
/**
* The result metadata for this capture. This contains information about the
* final capture parameters, the state of the capture and post-processing
* hardware, the state of the 3A algorithms, if enabled, and the output of
* any enabled statistics units.
*
* Only one call to process_capture_result() with a given frame_number may
* include the result metadata. All other calls for the same frame_number
* must set this to NULL.
*
* If there was an error producing the result metadata, result must be an
* empty metadata buffer, and notify() must be called with ERROR_RESULT.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Multiple calls to process_capture_result() with a given frame_number
* may include the result metadata.
*
* Partial metadata submitted should not include any metadata key returned
* in a previous partial result for a given frame. Each new partial result
* for that frame must also set a distinct partial_result value.
*
* If notify has been called with ERROR_RESULT, all further partial
* results for that frame are ignored by the framework.
*/
const camera_metadata_t *result;
/**
* The number of output buffers returned in this result structure. Must be
* less than or equal to the matching capture request's count. If this is
* less than the buffer count in the capture request, at least one more call
* to process_capture_result with the same frame_number must be made, to
* return the remaining output buffers to the framework. This may only be
* zero if the structure includes valid result metadata or an input buffer
* is returned in this result.
*/
uint32_t num_output_buffers;
/**
* The handles for the output stream buffers for this capture. They may not
* yet be filled at the time the HAL calls process_capture_result(); the
* framework will wait on the release sync fences provided by the HAL before
* reading the buffers.
*
* The HAL must set the stream buffer's release sync fence to a valid sync
* fd, or to -1 if the buffer has already been filled.
*
* If the HAL encounters an error while processing the buffer, and the
* buffer is not filled, the buffer's status field must be set to
* CAMERA3_BUFFER_STATUS_ERROR. If the HAL did not wait on the acquire fence
* before encountering the error, the acquire fence should be copied into
* the release fence, to allow the framework to wait on the fence before
* reusing the buffer.
*
* The acquire fence must be set to -1 for all output buffers. If
* num_output_buffers is zero, this may be NULL. In that case, at least one
* more process_capture_result call must be made by the HAL to provide the
* output buffers.
*
* When process_capture_result is called with a new buffer for a frame,
* all previous frames' buffers for that corresponding stream must have been
* already delivered (the fences need not have yet been signaled).
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Gralloc buffers for a frame may be sent to framework before the
* corresponding SHUTTER-notify.
*
* Performance considerations:
*
* Buffers delivered to the framework will not be dispatched to the
* application layer until a start of exposure timestamp has been received
* via a SHUTTER notify() call. It is highly recommended to
* dispatch that call as early as possible.
*/
const camera3_stream_buffer_t *output_buffers;
/**
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The handle for the input stream buffer for this capture. It may not
* yet be consumed at the time the HAL calls process_capture_result(); the
* framework will wait on the release sync fences provided by the HAL before
* reusing the buffer.
*
* The HAL should handle the sync fences the same way they are done for
* output_buffers.
*
* Only one input buffer is allowed to be sent per request. Similarly to
* output buffers, the ordering of returned input buffers must be
* maintained by the HAL.
*
* Performance considerations:
*
* The input buffer should be returned as early as possible. If the HAL
* supports sync fences, it can call process_capture_result to hand it back
* with sync fences being set appropriately. If the sync fences are not
* supported, the buffer can only be returned when it is consumed, which
* may take long time; the HAL may choose to copy this input buffer to make
* the buffer return sooner.
*/
const camera3_stream_buffer_t *input_buffer;
/**
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* In order to take advantage of partial results, the HAL must set the
* static metadata android.request.partialResultCount to the number of
* partial results it will send for each frame.
*
* Each new capture result with a partial result must set
* this field (partial_result) to a distinct inclusive value between
* 1 and android.request.partialResultCount.
*
* HALs not wishing to take advantage of this feature must not
* set an android.request.partialResultCount or partial_result to a value
* other than 1.
*
* This value must be set to 0 when a capture result contains buffers only
* and no metadata.
*/
uint32_t partial_result;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* Specifies the number of physical camera metadata this capture result
* contains. It must be equal to the number of physical cameras being
* requested from.
*
* If the current camera device is not a logical multi-camera, or the
* corresponding capture_request doesn't request on any physical camera,
* this field must be 0.
*/
uint32_t num_physcam_metadata;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* An array of strings containing the physical camera ids for the returned
* physical camera metadata. The length of the array is
* num_physcam_metadata.
*/
const char **physcam_ids;
/**
* >= CAMERA_DEVICE_API_VERSION_3_5:
*
* The array of physical camera metadata for the physical cameras being
* requested upon. This array should have a 1-to-1 mapping with the
* physcam_ids. The length of the array is num_physcam_metadata.
*/
const camera_metadata_t **physcam_metadata;
} camera3_capture_result_t;
/**********************************************************************
*
* Callback methods for the HAL to call into the framework.
*
* These methods are used to return metadata and image buffers for a completed
* or failed captures, and to notify the framework of asynchronous events such
* as errors.
*
* The framework will not call back into the HAL from within these callbacks,
* and these calls will not block for extended periods.
*
*/
typedef struct camera3_callback_ops {
/**
* process_capture_result:
*
* Send results from a completed capture to the framework.
* process_capture_result() may be invoked multiple times by the HAL in
* response to a single capture request. This allows, for example, the
* metadata and low-resolution buffers to be returned in one call, and
* post-processed JPEG buffers in a later call, once it is available. Each
* call must include the frame number of the request it is returning
* metadata or buffers for.
*
* A component (buffer or metadata) of the complete result may only be
* included in one process_capture_result call. A buffer for each stream,
* and the result metadata, must be returned by the HAL for each request in
* one of the process_capture_result calls, even in case of errors producing
* some of the output. A call to process_capture_result() with neither
* output buffers or result metadata is not allowed.
*
* The order of returning metadata and buffers for a single result does not
* matter, but buffers for a given stream must be returned in FIFO order. So
* the buffer for request 5 for stream A must always be returned before the
* buffer for request 6 for stream A. This also applies to the result
* metadata; the metadata for request 5 must be returned before the metadata
* for request 6.
*
* However, different streams are independent of each other, so it is
* acceptable and expected that the buffer for request 5 for stream A may be
* returned after the buffer for request 6 for stream B is. And it is
* acceptable that the result metadata for request 6 for stream B is
* returned before the buffer for request 5 for stream A is.
*
* The HAL retains ownership of result structure, which only needs to be
* valid to access during this call. The framework will copy whatever it
* needs before this call returns.
*
* The output buffers do not need to be filled yet; the framework will wait
* on the stream buffer release sync fence before reading the buffer
* data. Therefore, this method should be called by the HAL as soon as
* possible, even if some or all of the output buffers are still in
* being filled. The HAL must include valid release sync fences into each
* output_buffers stream buffer entry, or -1 if that stream buffer is
* already filled.
*
* If the result buffer cannot be constructed for a request, the HAL should
* return an empty metadata buffer, but still provide the output buffers and
* their sync fences. In addition, notify() must be called with an
* ERROR_RESULT message.
*
* If an output buffer cannot be filled, its status field must be set to
* STATUS_ERROR. In addition, notify() must be called with a ERROR_BUFFER
* message.
*
* If the entire capture has failed, then this method still needs to be
* called to return the output buffers to the framework. All the buffer
* statuses should be STATUS_ERROR, and the result metadata should be an
* empty buffer. In addition, notify() must be called with a ERROR_REQUEST
* message. In this case, individual ERROR_RESULT/ERROR_BUFFER messages
* should not be sent.
*
* Performance requirements:
*
* This is a non-blocking call. The framework will return this call in 5ms.
*
* The pipeline latency (see S7 for definition) should be less than or equal to
* 4 frame intervals, and must be less than or equal to 8 frame intervals.
*
*/
void (*process_capture_result)(const struct camera3_callback_ops *,
const camera3_capture_result_t *result);
/**
* notify:
*
* Asynchronous notification callback from the HAL, fired for various
* reasons. Only for information independent of frame capture, or that
* require specific timing. The ownership of the message structure remains
* with the HAL, and the msg only needs to be valid for the duration of this
* call.
*
* Multiple threads may call notify() simultaneously.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* The notification for the start of exposure for a given request must be
* sent by the HAL before the first call to process_capture_result() for
* that request is made.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Buffers delivered to the framework will not be dispatched to the
* application layer until a start of exposure timestamp (or input image's
* start of exposure timestamp for a reprocess request) has been received
* via a SHUTTER notify() call. It is highly recommended to dispatch this
* call as early as possible.
*
* ------------------------------------------------------------------------
* Performance requirements:
*
* This is a non-blocking call. The framework will return this call in 5ms.
*/
void (*notify)(const struct camera3_callback_ops *,
const camera3_notify_msg_t *msg);
} camera3_callback_ops_t;
/**********************************************************************
*
* Camera device operations
*
*/
typedef struct camera3_device_ops {
/**
* initialize:
*
* One-time initialization to pass framework callback function pointers to
* the HAL. Will be called once after a successful open() call, before any
* other functions are called on the camera3_device_ops structure.
*
* Performance requirements:
*
* This should be a non-blocking call. The HAL should return from this call
* in 5ms, and must return from this call in 10ms.
*
* Return values:
*
* 0: On successful initialization
*
* -ENODEV: If initialization fails. Only close() can be called successfully
* by the framework after this.
*/
int (*initialize)(const struct camera3_device *,
const camera3_callback_ops_t *callback_ops);
/**********************************************************************
* Stream management
*/
/**
* configure_streams:
*
* CAMERA_DEVICE_API_VERSION_3_0 only:
*
* Reset the HAL camera device processing pipeline and set up new input and
* output streams. This call replaces any existing stream configuration with
* the streams defined in the stream_list. This method will be called at
* least once after initialize() before a request is submitted with
* process_capture_request().
*
* The stream_list must contain at least one output-capable stream, and may
* not contain more than one input-capable stream.
*
* The stream_list may contain streams that are also in the currently-active
* set of streams (from the previous call to configure_stream()). These
* streams will already have valid values for usage, max_buffers, and the
* private pointer.
*
* If such a stream has already had its buffers registered,
* register_stream_buffers() will not be called again for the stream, and
* buffers from the stream can be immediately included in input requests.
*
* If the HAL needs to change the stream configuration for an existing
* stream due to the new configuration, it may rewrite the values of usage
* and/or max_buffers during the configure call.
*
* The framework will detect such a change, and will then reallocate the
* stream buffers, and call register_stream_buffers() again before using
* buffers from that stream in a request.
*
* If a currently-active stream is not included in stream_list, the HAL may
* safely remove any references to that stream. It will not be reused in a
* later configure() call by the framework, and all the gralloc buffers for
* it will be freed after the configure_streams() call returns.
*
* The stream_list structure is owned by the framework, and may not be
* accessed once this call completes. The address of an individual
* camera3_stream_t structure will remain valid for access by the HAL until
* the end of the first configure_stream() call which no longer includes
* that camera3_stream_t in the stream_list argument. The HAL may not change
* values in the stream structure outside of the private pointer, except for
* the usage and max_buffers members during the configure_streams() call
* itself.
*
* If the stream is new, the usage, max_buffer, and private pointer fields
* of the stream structure will all be set to 0. The HAL device must set
* these fields before the configure_streams() call returns. These fields
* are then used by the framework and the platform gralloc module to
* allocate the gralloc buffers for each stream.
*
* Before such a new stream can have its buffers included in a capture
* request, the framework will call register_stream_buffers() with that
* stream. However, the framework is not required to register buffers for
* _all_ streams before submitting a request. This allows for quick startup
* of (for example) a preview stream, with allocation for other streams
* happening later or concurrently.
*
* ------------------------------------------------------------------------
* CAMERA_DEVICE_API_VERSION_3_1 only:
*
* Reset the HAL camera device processing pipeline and set up new input and
* output streams. This call replaces any existing stream configuration with
* the streams defined in the stream_list. This method will be called at
* least once after initialize() before a request is submitted with
* process_capture_request().
*
* The stream_list must contain at least one output-capable stream, and may
* not contain more than one input-capable stream.
*
* The stream_list may contain streams that are also in the currently-active
* set of streams (from the previous call to configure_stream()). These
* streams will already have valid values for usage, max_buffers, and the
* private pointer.
*
* If such a stream has already had its buffers registered,
* register_stream_buffers() will not be called again for the stream, and
* buffers from the stream can be immediately included in input requests.
*
* If the HAL needs to change the stream configuration for an existing
* stream due to the new configuration, it may rewrite the values of usage
* and/or max_buffers during the configure call.
*
* The framework will detect such a change, and will then reallocate the
* stream buffers, and call register_stream_buffers() again before using
* buffers from that stream in a request.
*
* If a currently-active stream is not included in stream_list, the HAL may
* safely remove any references to that stream. It will not be reused in a
* later configure() call by the framework, and all the gralloc buffers for
* it will be freed after the configure_streams() call returns.
*
* The stream_list structure is owned by the framework, and may not be
* accessed once this call completes. The address of an individual
* camera3_stream_t structure will remain valid for access by the HAL until
* the end of the first configure_stream() call which no longer includes
* that camera3_stream_t in the stream_list argument. The HAL may not change
* values in the stream structure outside of the private pointer, except for
* the usage and max_buffers members during the configure_streams() call
* itself.
*
* If the stream is new, max_buffer, and private pointer fields of the
* stream structure will all be set to 0. The usage will be set to the
* consumer usage flags. The HAL device must set these fields before the
* configure_streams() call returns. These fields are then used by the
* framework and the platform gralloc module to allocate the gralloc
* buffers for each stream.
*
* Before such a new stream can have its buffers included in a capture
* request, the framework will call register_stream_buffers() with that
* stream. However, the framework is not required to register buffers for
* _all_ streams before submitting a request. This allows for quick startup
* of (for example) a preview stream, with allocation for other streams
* happening later or concurrently.
*
* ------------------------------------------------------------------------
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* Reset the HAL camera device processing pipeline and set up new input and
* output streams. This call replaces any existing stream configuration with
* the streams defined in the stream_list. This method will be called at
* least once after initialize() before a request is submitted with
* process_capture_request().
*
* The stream_list must contain at least one output-capable stream, and may
* not contain more than one input-capable stream.
*
* The stream_list may contain streams that are also in the currently-active
* set of streams (from the previous call to configure_stream()). These
* streams will already have valid values for usage, max_buffers, and the
* private pointer.
*
* If the HAL needs to change the stream configuration for an existing
* stream due to the new configuration, it may rewrite the values of usage
* and/or max_buffers during the configure call.
*
* The framework will detect such a change, and may then reallocate the
* stream buffers before using buffers from that stream in a request.
*
* If a currently-active stream is not included in stream_list, the HAL may
* safely remove any references to that stream. It will not be reused in a
* later configure() call by the framework, and all the gralloc buffers for
* it will be freed after the configure_streams() call returns.
*
* The stream_list structure is owned by the framework, and may not be
* accessed once this call completes. The address of an individual
* camera3_stream_t structure will remain valid for access by the HAL until
* the end of the first configure_stream() call which no longer includes
* that camera3_stream_t in the stream_list argument. The HAL may not change
* values in the stream structure outside of the private pointer, except for
* the usage and max_buffers members during the configure_streams() call
* itself.
*
* If the stream is new, max_buffer, and private pointer fields of the
* stream structure will all be set to 0. The usage will be set to the
* consumer usage flags. The HAL device must set these fields before the
* configure_streams() call returns. These fields are then used by the
* framework and the platform gralloc module to allocate the gralloc
* buffers for each stream.
*
* Newly allocated buffers may be included in a capture request at any time
* by the framework. Once a gralloc buffer is returned to the framework
* with process_capture_result (and its respective release_fence has been
* signaled) the framework may free or reuse it at any time.
*
* ------------------------------------------------------------------------
*
* Preconditions:
*
* The framework will only call this method when no captures are being
* processed. That is, all results have been returned to the framework, and
* all in-flight input and output buffers have been returned and their
* release sync fences have been signaled by the HAL. The framework will not
* submit new requests for capture while the configure_streams() call is
* underway.
*
* Postconditions:
*
* The HAL device must configure itself to provide maximum possible output
* frame rate given the sizes and formats of the output streams, as
* documented in the camera device's static metadata.
*
* Performance requirements:
*
* This call is expected to be heavyweight and possibly take several hundred
* milliseconds to complete, since it may require resetting and
* reconfiguring the image sensor and the camera processing pipeline.
* Nevertheless, the HAL device should attempt to minimize the
* reconfiguration delay to minimize the user-visible pauses during
* application operational mode changes (such as switching from still
* capture to video recording).
*
* The HAL should return from this call in 500ms, and must return from this
* call in 1000ms.
*
* Return values:
*
* 0: On successful stream configuration
*
* -EINVAL: If the requested stream configuration is invalid. Some examples
* of invalid stream configurations include:
*
* - Including more than 1 input-capable stream (INPUT or
* BIDIRECTIONAL)
*
* - Not including any output-capable streams (OUTPUT or
* BIDIRECTIONAL)
*
* - Including streams with unsupported formats, or an unsupported
* size for that format.
*
* - Including too many output streams of a certain format.
*
* - Unsupported rotation configuration (only applies to
* devices with version >= CAMERA_DEVICE_API_VERSION_3_3)
*
* - Stream sizes/formats don't satisfy the
* camera3_stream_configuration_t->operation_mode requirements for non-NORMAL mode,
* or the requested operation_mode is not supported by the HAL.
* (only applies to devices with version >= CAMERA_DEVICE_API_VERSION_3_3)
*
* Note that the framework submitting an invalid stream
* configuration is not normal operation, since stream
* configurations are checked before configure. An invalid
* configuration means that a bug exists in the framework code, or
* there is a mismatch between the HAL's static metadata and the
* requirements on streams.
*
* -ENODEV: If there has been a fatal error and the device is no longer
* operational. Only close() can be called successfully by the
* framework after this error is returned.
*/
int (*configure_streams)(const struct camera3_device *,
camera3_stream_configuration_t *stream_list);
/**
* register_stream_buffers:
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* DEPRECATED. This will not be called and must be set to NULL.
*
* <= CAMERA_DEVICE_API_VERSION_3_1:
*
* Register buffers for a given stream with the HAL device. This method is
* called by the framework after a new stream is defined by
* configure_streams, and before buffers from that stream are included in a
* capture request. If the same stream is listed in a subsequent
* configure_streams() call, register_stream_buffers will _not_ be called
* again for that stream.
*
* The framework does not need to register buffers for all configured
* streams before it submits the first capture request. This allows quick
* startup for preview (or similar use cases) while other streams are still
* being allocated.
*
* This method is intended to allow the HAL device to map or otherwise
* prepare the buffers for later use. The buffers passed in will already be
* locked for use. At the end of the call, all the buffers must be ready to
* be returned to the stream. The buffer_set argument is only valid for the
* duration of this call.
*
* If the stream format was set to HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
* the camera HAL should inspect the passed-in buffers here to determine any
* platform-private pixel format information.
*
* Performance requirements:
*
* This should be a non-blocking call. The HAL should return from this call
* in 1ms, and must return from this call in 5ms.
*
* Return values:
*
* 0: On successful registration of the new stream buffers
*
* -EINVAL: If the stream_buffer_set does not refer to a valid active
* stream, or if the buffers array is invalid.
*
* -ENOMEM: If there was a failure in registering the buffers. The framework
* must consider all the stream buffers to be unregistered, and can
* try to register again later.
*
* -ENODEV: If there is a fatal error, and the device is no longer
* operational. Only close() can be called successfully by the
* framework after this error is returned.
*/
int (*register_stream_buffers)(const struct camera3_device *,
const camera3_stream_buffer_set_t *buffer_set);
/**********************************************************************
* Request creation and submission
*/
/**
* construct_default_request_settings:
*
* Create capture settings for standard camera use cases.
*
* The device must return a settings buffer that is configured to meet the
* requested use case, which must be one of the CAMERA3_TEMPLATE_*
* enums. All request control fields must be included.
*
* The HAL retains ownership of this structure, but the pointer to the
* structure must be valid until the device is closed. The framework and the
* HAL may not modify the buffer once it is returned by this call. The same
* buffer may be returned for subsequent calls for the same template, or for
* other templates.
*
* Performance requirements:
*
* This should be a non-blocking call. The HAL should return from this call
* in 1ms, and must return from this call in 5ms.
*
* Return values:
*
* Valid metadata: On successful creation of a default settings
* buffer.
*
* NULL: In case of a fatal error. After this is returned, only
* the close() method can be called successfully by the
* framework.
*/
const camera_metadata_t* (*construct_default_request_settings)(
const struct camera3_device *,
int type);
/**
* process_capture_request:
*
* Send a new capture request to the HAL. The HAL should not return from
* this call until it is ready to accept the next request to process. Only
* one call to process_capture_request() will be made at a time by the
* framework, and the calls will all be from the same thread. The next call
* to process_capture_request() will be made as soon as a new request and
* its associated buffers are available. In a normal preview scenario, this
* means the function will be called again by the framework almost
* instantly.
*
* The actual request processing is asynchronous, with the results of
* capture being returned by the HAL through the process_capture_result()
* call. This call requires the result metadata to be available, but output
* buffers may simply provide sync fences to wait on. Multiple requests are
* expected to be in flight at once, to maintain full output frame rate.
*
* The framework retains ownership of the request structure. It is only
* guaranteed to be valid during this call. The HAL device must make copies
* of the information it needs to retain for the capture processing. The HAL
* is responsible for waiting on and closing the buffers' fences and
* returning the buffer handles to the framework.
*
* The HAL must write the file descriptor for the input buffer's release
* sync fence into input_buffer->release_fence, if input_buffer is not
* NULL. If the HAL returns -1 for the input buffer release sync fence, the
* framework is free to immediately reuse the input buffer. Otherwise, the
* framework will wait on the sync fence before refilling and reusing the
* input buffer.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
*
* The input/output buffers provided by the framework in each request
* may be brand new (having never before seen by the HAL).
*
* ------------------------------------------------------------------------
* Performance considerations:
*
* Handling a new buffer should be extremely lightweight and there should be
* no frame rate degradation or frame jitter introduced.
*
* This call must return fast enough to ensure that the requested frame
* rate can be sustained, especially for streaming cases (post-processing
* quality settings set to FAST). The HAL should return this call in 1
* frame interval, and must return from this call in 4 frame intervals.
*
* Return values:
*
* 0: On a successful start to processing the capture request
*
* -EINVAL: If the input is malformed (the settings are NULL when not
* allowed, invalid physical camera settings,
* there are 0 output buffers, etc) and capture processing
* cannot start. Failures during request processing should be
* handled by calling camera3_callback_ops_t.notify(). In case of
* this error, the framework will retain responsibility for the
* stream buffers' fences and the buffer handles; the HAL should
* not close the fences or return these buffers with
* process_capture_result.
*
* -ENODEV: If the camera device has encountered a serious error. After this
* error is returned, only the close() method can be successfully
* called by the framework.
*
*/
int (*process_capture_request)(const struct camera3_device *,
camera3_capture_request_t *request);
/**********************************************************************
* Miscellaneous methods
*/
/**
* get_metadata_vendor_tag_ops:
*
* Get methods to query for vendor extension metadata tag information. The
* HAL should fill in all the vendor tag operation methods, or leave ops
* unchanged if no vendor tags are defined.
*
* The definition of vendor_tag_query_ops_t can be found in
* system/media/camera/include/system/camera_metadata.h.
*
* >= CAMERA_DEVICE_API_VERSION_3_2:
* DEPRECATED. This function has been deprecated and should be set to
* NULL by the HAL. Please implement get_vendor_tag_ops in camera_common.h
* instead.
*/
void (*get_metadata_vendor_tag_ops)(const struct camera3_device*,
vendor_tag_query_ops_t* ops);
/**
* dump:
*
* Print out debugging state for the camera device. This will be called by
* the framework when the camera service is asked for a debug dump, which
* happens when using the dumpsys tool, or when capturing a bugreport.
*
* The passed-in file descriptor can be used to write debugging text using
* dprintf() or write(). The text should be in ASCII encoding only.
*
* Performance requirements:
*
* This must be a non-blocking call. The HAL should return from this call
* in 1ms, must return from this call in 10ms. This call must avoid
* deadlocks, as it may be called at any point during camera operation.
* Any synchronization primitives used (such as mutex locks or semaphores)
* should be acquired with a timeout.
*/
void (*dump)(const struct camera3_device *, int fd);
/**
* flush:
*
* Flush all currently in-process captures and all buffers in the pipeline
* on the given device. The framework will use this to dump all state as
* quickly as possible in order to prepare for a configure_streams() call.
*
* No buffers are required to be successfully returned, so every buffer
* held at the time of flush() (whether successfully filled or not) may be
* returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed
* to return valid (CAMERA3_BUFFER_STATUS_OK) buffers during this call,
* provided they are successfully filled.
*
* All requests currently in the HAL are expected to be returned as soon as
* possible. Not-in-process requests should return errors immediately. Any
* interruptible hardware blocks should be stopped, and any uninterruptible
* blocks should be waited on.
*
* flush() may be called concurrently to process_capture_request(), with the expectation that
* process_capture_request will return quickly and the request submitted in that
* process_capture_request call is treated like all other in-flight requests. Due to
* concurrency issues, it is possible that from the HAL's point of view, a
* process_capture_request() call may be started after flush has been invoked but has not
* returned yet. If such a call happens before flush() returns, the HAL should treat the new
* capture request like other in-flight pending requests (see #4 below).
*
* More specifically, the HAL must follow below requirements for various cases:
*
* 1. For captures that are too late for the HAL to cancel/stop, and will be
* completed normally by the HAL; i.e. the HAL can send shutter/notify and
* process_capture_result and buffers as normal.
*
* 2. For pending requests that have not done any processing, the HAL must call notify
* CAMERA3_MSG_ERROR_REQUEST, and return all the output buffers with
* process_capture_result in the error state (CAMERA3_BUFFER_STATUS_ERROR).
* The HAL must not place the release fence into an error state, instead,
* the release fences must be set to the acquire fences passed by the framework,
* or -1 if they have been waited on by the HAL already. This is also the path
* to follow for any captures for which the HAL already called notify() with
* CAMERA3_MSG_SHUTTER but won't be producing any metadata/valid buffers for.
* After CAMERA3_MSG_ERROR_REQUEST, for a given frame, only process_capture_results with
* buffers in CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifys or
* process_capture_result with non-null metadata is allowed.
*
* 3. For partially completed pending requests that will not have all the output
* buffers or perhaps missing metadata, the HAL should follow below:
*
* 3.1. Call notify with CAMERA3_MSG_ERROR_RESULT if some of the expected result
* metadata (i.e. one or more partial metadata) won't be available for the capture.
*
* 3.2. Call notify with CAMERA3_MSG_ERROR_BUFFER for every buffer that won't
* be produced for the capture.
*
* 3.3 Call notify with CAMERA3_MSG_SHUTTER with the capture timestamp before
* any buffers/metadata are returned with process_capture_result.
*
* 3.4 For captures that will produce some results, the HAL must not call
* CAMERA3_MSG_ERROR_REQUEST, since that indicates complete failure.
*
* 3.5. Valid buffers/metadata should be passed to the framework as normal.
*
* 3.6. Failed buffers should be returned to the framework as described for case 2.
* But failed buffers do not have to follow the strict ordering valid buffers do,
* and may be out-of-order with respect to valid buffers. For example, if buffers
* A, B, C, D, E are sent, D and E are failed, then A, E, B, D, C is an acceptable
* return order.
*
* 3.7. For fully-missing metadata, calling CAMERA3_MSG_ERROR_RESULT is sufficient, no
* need to call process_capture_result with NULL metadata or equivalent.
*
* 4. If a flush() is invoked while a process_capture_request() invocation is active, that
* process call should return as soon as possible. In addition, if a process_capture_request()
* call is made after flush() has been invoked but before flush() has returned, the
* capture request provided by the late process_capture_request call should be treated like
* a pending request in case #2 above.
*
* flush() should only return when there are no more outstanding buffers or
* requests left in the HAL. The framework may call configure_streams (as
* the HAL state is now quiesced) or may issue new requests.
*
* Note that it's sufficient to only support fully-succeeded and fully-failed result cases.
* However, it is highly desirable to support the partial failure cases as well, as it
* could help improve the flush call overall performance.
*
* Performance requirements:
*
* The HAL should return from this call in 100ms, and must return from this
* call in 1000ms. And this call must not be blocked longer than pipeline
* latency (see S7 for definition).
*
* Version information:
*
* only available if device version >= CAMERA_DEVICE_API_VERSION_3_1.
*
* Return values:
*
* 0: On a successful flush of the camera HAL.
*
* -EINVAL: If the input is malformed (the device is not valid).
*
* -ENODEV: If the camera device has encountered a serious error. After this
* error is returned, only the close() method can be successfully
* called by the framework.
*/
int (*flush)(const struct camera3_device *);
/* reserved for future use */
void *reserved[8];
} camera3_device_ops_t;
/**********************************************************************
*
* Camera device definition
*
*/
typedef struct camera3_device {
/**
* common.version must equal CAMERA_DEVICE_API_VERSION_3_0 to identify this
* device as implementing version 3.0 of the camera device HAL.
*
* Performance requirements:
*
* Camera open (common.module->common.methods->open) should return in 200ms, and must return
* in 500ms.
* Camera close (common.close) should return in 200ms, and must return in 500ms.
*
*/
hw_device_t common;
camera3_device_ops_t *ops;
void *priv;
} camera3_device_t;
__END_DECLS
#endif /* #ifdef ANDROID_INCLUDE_CAMERA3_H */
|
0 | repos/libcamera/include/android | repos/libcamera/include/android/metadata/camera_metadata_hidden.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H
#define SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H
#include <system/camera_vendor_tags.h>
/**
* Error codes returned by vendor tags ops operations. These are intended
* to be used by all framework code that uses the return values from the
* vendor operations object.
*/
#define VENDOR_SECTION_NAME_ERR NULL
#define VENDOR_TAG_NAME_ERR NULL
#define VENDOR_TAG_COUNT_ERR (-1)
#define VENDOR_TAG_TYPE_ERR (-1)
#ifdef __cplusplus
extern "C" {
#endif
/** **These are private functions for use only by the camera framework.** **/
/**
* Set the global vendor tag operations object used to define vendor tag
* structure when parsing camera metadata with functions defined in
* system/media/camera/include/camera_metadata.h.
*/
ANDROID_API
int set_camera_metadata_vendor_ops(const vendor_tag_ops_t *query_ops);
/**
* Set the global vendor tag cache operations object used to define vendor tag
* structure when parsing camera metadata with functions defined in
* system/media/camera/include/camera_metadata.h.
*/
ANDROID_API
int set_camera_metadata_vendor_cache_ops(
const struct vendor_tag_cache_ops *query_cache_ops);
/**
* Set the vendor id for a particular metadata buffer.
*/
ANDROID_API
void set_camera_metadata_vendor_id(camera_metadata_t *meta,
metadata_vendor_id_t id);
/**
* Retrieve the vendor id for a particular metadata buffer.
*/
ANDROID_API
metadata_vendor_id_t get_camera_metadata_vendor_id(
const camera_metadata_t *meta);
/**
* Retrieve the type of a tag. Returns -1 if no such tag is defined.
*/
ANDROID_API
int get_local_camera_metadata_tag_type_vendor_id(uint32_t tag,
metadata_vendor_id_t id);
/**
* Retrieve the name of a tag. Returns NULL if no such tag is defined.
*/
ANDROID_API
const char *get_local_camera_metadata_tag_name_vendor_id(uint32_t tag,
metadata_vendor_id_t id);
/**
* Retrieve the name of a tag section. Returns NULL if no such tag is defined.
*/
ANDROID_API
const char *get_local_camera_metadata_section_name_vendor_id(uint32_t tag,
metadata_vendor_id_t id);
/**
* Retrieve the type of a tag. Returns -1 if no such tag is defined.
*/
ANDROID_API
int get_local_camera_metadata_tag_type_vendor_id(uint32_t tag,
metadata_vendor_id_t id);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* SYSTEM_MEDIA_PRIVATE_INCLUDE_CAMERA_METADATA_HIDDEN_H */
|
0 | repos/libcamera/include/android/metadata | repos/libcamera/include/android/metadata/system/camera_vendor_tags.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H
#define SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H
#ifdef __cplusplus
extern "C" {
#endif
#define CAMERA_METADATA_VENDOR_TAG_BOUNDARY 0x80000000u
#define CAMERA_METADATA_INVALID_VENDOR_ID UINT64_MAX
typedef uint64_t metadata_vendor_id_t;
/**
* Vendor tags:
*
* This structure contains basic functions for enumerating an immutable set of
* vendor-defined camera metadata tags, and querying static information about
* their structure/type. The intended use of this information is to validate
* the structure of metadata returned by the camera HAL, and to allow vendor-
* defined metadata tags to be visible in application facing camera API.
*/
typedef struct vendor_tag_ops vendor_tag_ops_t;
struct vendor_tag_ops {
/**
* Get the number of vendor tags supported on this platform. Used to
* calculate the size of buffer needed for holding the array of all tags
* returned by get_all_tags(). This must return -1 on error.
*/
int (*get_tag_count)(const vendor_tag_ops_t *v);
/**
* Fill an array with all of the supported vendor tags on this platform.
* get_tag_count() must return the number of tags supported, and
* tag_array will be allocated with enough space to hold the number of tags
* returned by get_tag_count().
*/
void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array);
/**
* Get the vendor section name for a vendor-specified entry tag. This will
* only be called for vendor-defined tags.
*
* The naming convention for the vendor-specific section names should
* follow a style similar to the Java package style. For example,
* CameraZoom Inc. must prefix their sections with "com.camerazoom."
* This must return NULL if the tag is outside the bounds of
* vendor-defined sections.
*
* There may be different vendor-defined tag sections, for example the
* phone maker, the chipset maker, and the camera module maker may each
* have their own "com.vendor."-prefixed section.
*
* The memory pointed to by the return value must remain valid for the
* lifetime of the module, and is owned by the module.
*/
const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag);
/**
* Get the tag name for a vendor-specified entry tag. This is only called
* for vendor-defined tags, and must return NULL if it is not a
* vendor-defined tag.
*
* The memory pointed to by the return value must remain valid for the
* lifetime of the module, and is owned by the module.
*/
const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag);
/**
* Get tag type for a vendor-specified entry tag. The type returned must be
* a valid type defined in camera_metadata.h. This method is only called
* for tags >= CAMERA_METADATA_VENDOR_TAG_BOUNDARY, and must return
* -1 if the tag is outside the bounds of the vendor-defined sections.
*/
int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag);
/* Reserved for future use. These must be initialized to NULL. */
void* reserved[8];
};
struct vendor_tag_cache_ops {
/**
* Get the number of vendor tags supported on this platform. Used to
* calculate the size of buffer needed for holding the array of all tags
* returned by get_all_tags(). This must return -1 on error.
*/
int (*get_tag_count)(metadata_vendor_id_t id);
/**
* Fill an array with all of the supported vendor tags on this platform.
* get_tag_count() must return the number of tags supported, and
* tag_array will be allocated with enough space to hold the number of tags
* returned by get_tag_count().
*/
void (*get_all_tags)(uint32_t *tag_array, metadata_vendor_id_t id);
/**
* Get the vendor section name for a vendor-specified entry tag. This will
* only be called for vendor-defined tags.
*
* The naming convention for the vendor-specific section names should
* follow a style similar to the Java package style. For example,
* CameraZoom Inc. must prefix their sections with "com.camerazoom."
* This must return NULL if the tag is outside the bounds of
* vendor-defined sections.
*
* There may be different vendor-defined tag sections, for example the
* phone maker, the chipset maker, and the camera module maker may each
* have their own "com.vendor."-prefixed section.
*
* The memory pointed to by the return value must remain valid for the
* lifetime of the module, and is owned by the module.
*/
const char *(*get_section_name)(uint32_t tag, metadata_vendor_id_t id);
/**
* Get the tag name for a vendor-specified entry tag. This is only called
* for vendor-defined tags, and must return NULL if it is not a
* vendor-defined tag.
*
* The memory pointed to by the return value must remain valid for the
* lifetime of the module, and is owned by the module.
*/
const char *(*get_tag_name)(uint32_t tag, metadata_vendor_id_t id);
/**
* Get tag type for a vendor-specified entry tag. The type returned must be
* a valid type defined in camera_metadata.h. This method is only called
* for tags >= CAMERA_METADATA_VENDOR_TAG_BOUNDARY, and must return
* -1 if the tag is outside the bounds of the vendor-defined sections.
*/
int (*get_tag_type)(uint32_t tag, metadata_vendor_id_t id);
/* Reserved for future use. These must be initialized to NULL. */
void* reserved[8];
};
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_VENDOR_TAGS_H */
|
0 | repos/libcamera/include/android/metadata | repos/libcamera/include/android/metadata/system/camera_metadata.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_METADATA_H
#define SYSTEM_MEDIA_INCLUDE_ANDROID_CAMERA_METADATA_H
#include <string.h>
#include <stdint.h>
#include <cutils/compiler.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* Tag hierarchy and enum definitions for camera_metadata_entry
* =============================================================================
*/
/**
* Main enum definitions are in a separate file to make it easy to
* maintain
*/
#include "camera_metadata_tags.h"
/**
* Enum range for each top-level category
*/
ANDROID_API
extern unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2];
ANDROID_API
extern const char *camera_metadata_section_names[ANDROID_SECTION_COUNT];
/**
* Type definitions for camera_metadata_entry
* =============================================================================
*/
enum {
// Unsigned 8-bit integer (uint8_t)
TYPE_BYTE = 0,
// Signed 32-bit integer (int32_t)
TYPE_INT32 = 1,
// 32-bit float (float)
TYPE_FLOAT = 2,
// Signed 64-bit integer (int64_t)
TYPE_INT64 = 3,
// 64-bit float (double)
TYPE_DOUBLE = 4,
// A 64-bit fraction (camera_metadata_rational_t)
TYPE_RATIONAL = 5,
// Number of type fields
NUM_TYPES
};
typedef struct camera_metadata_rational {
int32_t numerator;
int32_t denominator;
} camera_metadata_rational_t;
/**
* A reference to a metadata entry in a buffer.
*
* The data union pointers point to the real data in the buffer, and can be
* modified in-place if the count does not need to change. The count is the
* number of entries in data of the entry's type, not a count of bytes.
*/
typedef struct camera_metadata_entry {
size_t index;
uint32_t tag;
uint8_t type;
size_t count;
union {
uint8_t *u8;
int32_t *i32;
float *f;
int64_t *i64;
double *d;
camera_metadata_rational_t *r;
} data;
} camera_metadata_entry_t;
/**
* A read-only reference to a metadata entry in a buffer. Identical to
* camera_metadata_entry in layout
*/
typedef struct camera_metadata_ro_entry {
size_t index;
uint32_t tag;
uint8_t type;
size_t count;
union {
const uint8_t *u8;
const int32_t *i32;
const float *f;
const int64_t *i64;
const double *d;
const camera_metadata_rational_t *r;
} data;
} camera_metadata_ro_entry_t;
/**
* Size in bytes of each entry type
*/
ANDROID_API
extern const size_t camera_metadata_type_size[NUM_TYPES];
/**
* Human-readable name of each entry type
*/
ANDROID_API
extern const char* camera_metadata_type_names[NUM_TYPES];
/**
* Main definitions for the metadata entry and array structures
* =============================================================================
*/
/**
* A packet of metadata. This is a list of metadata entries, each of which has
* an integer tag to identify its meaning, 'type' and 'count' field, and the
* data, which contains a 'count' number of entries of type 'type'. The packet
* has a fixed capacity for entries and for extra data. A new entry uses up one
* entry slot, and possibly some amount of data capacity; the function
* calculate_camera_metadata_entry_data_size() provides the amount of data
* capacity that would be used up by an entry.
*
* Entries are not sorted by default, and are not forced to be unique - multiple
* entries with the same tag are allowed. The packet will not dynamically resize
* when full.
*
* The packet is contiguous in memory, with size in bytes given by
* get_camera_metadata_size(). Therefore, it can be copied safely with memcpy()
* to a buffer of sufficient size. The copy_camera_metadata() function is
* intended for eliminating unused capacity in the destination packet.
*/
struct camera_metadata;
typedef struct camera_metadata camera_metadata_t;
/**
* Functions for manipulating camera metadata
* =============================================================================
*
* NOTE: Unless otherwise specified, functions that return type "int"
* return 0 on success, and non-0 value on error.
*/
/**
* Allocate a new camera_metadata structure, with some initial space for entries
* and extra data. The entry_capacity is measured in entry counts, and
* data_capacity in bytes. The resulting structure is all contiguous in memory,
* and can be freed with free_camera_metadata().
*/
ANDROID_API
camera_metadata_t *allocate_camera_metadata(size_t entry_capacity,
size_t data_capacity);
/**
* Get the required alignment of a packet of camera metadata, which is the
* maximal alignment of the embedded camera_metadata, camera_metadata_buffer_entry,
* and camera_metadata_data.
*/
ANDROID_API
size_t get_camera_metadata_alignment();
/**
* Allocate a new camera_metadata structure of size src_size. Copy the data,
* ignoring alignment, and then attempt validation. If validation
* fails, free the memory and return NULL. Otherwise return the pointer.
*
* The resulting pointer can be freed with free_camera_metadata().
*/
ANDROID_API
camera_metadata_t *allocate_copy_camera_metadata_checked(
const camera_metadata_t *src,
size_t src_size);
/**
* Place a camera metadata structure into an existing buffer. Returns NULL if
* the buffer is too small for the requested number of reserved entries and
* bytes of data. The entry_capacity is measured in entry counts, and
* data_capacity in bytes. If the buffer is larger than the required space,
* unused space will be left at the end. If successful, returns a pointer to the
* metadata header placed at the start of the buffer. It is the caller's
* responsibility to free the original buffer; do not call
* free_camera_metadata() with the returned pointer.
*/
ANDROID_API
camera_metadata_t *place_camera_metadata(void *dst, size_t dst_size,
size_t entry_capacity,
size_t data_capacity);
/**
* Free a camera_metadata structure. Should only be used with structures
* allocated with allocate_camera_metadata().
*/
ANDROID_API
void free_camera_metadata(camera_metadata_t *metadata);
/**
* Calculate the buffer size needed for a metadata structure of entry_count
* metadata entries, needing a total of data_count bytes of extra data storage.
*/
ANDROID_API
size_t calculate_camera_metadata_size(size_t entry_count,
size_t data_count);
/**
* Get current size of entire metadata structure in bytes, including reserved
* but unused space.
*/
ANDROID_API
size_t get_camera_metadata_size(const camera_metadata_t *metadata);
/**
* Get size of entire metadata buffer in bytes, not including reserved but
* unused space. This is the amount of space needed by copy_camera_metadata for
* its dst buffer.
*/
ANDROID_API
size_t get_camera_metadata_compact_size(const camera_metadata_t *metadata);
/**
* Get the current number of entries in the metadata packet.
*
* metadata packet must be valid, which can be checked before the call with
* validate_camera_metadata_structure().
*/
ANDROID_API
size_t get_camera_metadata_entry_count(const camera_metadata_t *metadata);
/**
* Get the maximum number of entries that could fit in the metadata packet.
*/
ANDROID_API
size_t get_camera_metadata_entry_capacity(const camera_metadata_t *metadata);
/**
* Get the current count of bytes used for value storage in the metadata packet.
*/
ANDROID_API
size_t get_camera_metadata_data_count(const camera_metadata_t *metadata);
/**
* Get the maximum count of bytes that could be used for value storage in the
* metadata packet.
*/
ANDROID_API
size_t get_camera_metadata_data_capacity(const camera_metadata_t *metadata);
/**
* Copy a metadata structure to a memory buffer, compacting it along the
* way. That is, in the copied structure, entry_count == entry_capacity, and
* data_count == data_capacity.
*
* If dst_size > get_camera_metadata_compact_size(), the unused bytes are at the
* end of the buffer. If dst_size < get_camera_metadata_compact_size(), returns
* NULL. Otherwise returns a pointer to the metadata structure header placed at
* the start of dst.
*
* Since the buffer was not allocated by allocate_camera_metadata, the caller is
* responsible for freeing the underlying buffer when needed; do not call
* free_camera_metadata.
*/
ANDROID_API
camera_metadata_t *copy_camera_metadata(void *dst, size_t dst_size,
const camera_metadata_t *src);
// Non-zero return values for validate_camera_metadata_structure
enum {
CAMERA_METADATA_VALIDATION_ERROR = 1,
CAMERA_METADATA_VALIDATION_SHIFTED = 2,
};
/**
* Validate that a metadata is structurally sane. That is, its internal
* state is such that we won't get buffer overflows or run into other
* 'impossible' issues when calling the other API functions.
*
* This is useful in particular after copying the binary metadata blob
* from an untrusted source, since passing this check means the data is at least
* consistent.
*
* The expected_size argument is optional.
*
* Returns 0: on success
* CAMERA_METADATA_VALIDATION_ERROR: on error
* CAMERA_METADATA_VALIDATION_SHIFTED: when the data is not properly aligned, but can be
* used as input of clone_camera_metadata and the returned metadata will be valid.
*
*/
ANDROID_API
int validate_camera_metadata_structure(const camera_metadata_t *metadata,
const size_t *expected_size);
/**
* Append camera metadata in src to an existing metadata structure in dst. This
* does not resize the destination structure, so if it is too small, a non-zero
* value is returned. On success, 0 is returned. Appending onto a sorted
* structure results in a non-sorted combined structure.
*/
ANDROID_API
int append_camera_metadata(camera_metadata_t *dst, const camera_metadata_t *src);
/**
* Clone an existing metadata buffer, compacting along the way. This is
* equivalent to allocating a new buffer of the minimum needed size, then
* appending the buffer to be cloned into the new buffer. The resulting buffer
* can be freed with free_camera_metadata(). Returns NULL if cloning failed.
*/
ANDROID_API
camera_metadata_t *clone_camera_metadata(const camera_metadata_t *src);
/**
* Calculate the number of bytes of extra data a given metadata entry will take
* up. That is, if entry of 'type' with a payload of 'data_count' values is
* added, how much will the value returned by get_camera_metadata_data_count()
* be increased? This value may be zero, if no extra data storage is needed.
*/
ANDROID_API
size_t calculate_camera_metadata_entry_data_size(uint8_t type,
size_t data_count);
/**
* Add a metadata entry to a metadata structure. Returns 0 if the addition
* succeeded. Returns a non-zero value if there is insufficient reserved space
* left to add the entry, or if the tag is unknown. data_count is the number of
* entries in the data array of the tag's type, not a count of
* bytes. Vendor-defined tags can not be added using this method, unless
* set_vendor_tag_query_ops() has been called first. Entries are always added to
* the end of the structure (highest index), so after addition, a
* previously-sorted array will be marked as unsorted.
*
* Returns 0 on success. A non-0 value is returned on error.
*/
ANDROID_API
int add_camera_metadata_entry(camera_metadata_t *dst,
uint32_t tag,
const void *data,
size_t data_count);
/**
* Sort the metadata buffer for fast searching. If already marked as sorted,
* does nothing. Adding or appending entries to the buffer will place the buffer
* back into an unsorted state.
*
* Returns 0 on success. A non-0 value is returned on error.
*/
ANDROID_API
int sort_camera_metadata(camera_metadata_t *dst);
/**
* Get metadata entry at position index in the metadata buffer.
* Index must be less than entry count, which is returned by
* get_camera_metadata_entry_count().
*
* src and index are inputs; the passed-in entry is updated with the details of
* the entry. The data pointer points to the real data in the buffer, and can be
* updated as long as the data count does not change.
*
* Returns 0 on success. A non-0 value is returned on error.
*/
ANDROID_API
int get_camera_metadata_entry(camera_metadata_t *src,
size_t index,
camera_metadata_entry_t *entry);
/**
* Get metadata entry at position index, but disallow editing the data.
*/
ANDROID_API
int get_camera_metadata_ro_entry(const camera_metadata_t *src,
size_t index,
camera_metadata_ro_entry_t *entry);
/**
* Find an entry with given tag value. If not found, returns -ENOENT. Otherwise,
* returns entry contents like get_camera_metadata_entry.
*
* If multiple entries with the same tag exist, does not have any guarantees on
* which is returned. To speed up searching for tags, sort the metadata
* structure first by calling sort_camera_metadata().
*/
ANDROID_API
int find_camera_metadata_entry(camera_metadata_t *src,
uint32_t tag,
camera_metadata_entry_t *entry);
/**
* Find an entry with given tag value, but disallow editing the data
*/
ANDROID_API
int find_camera_metadata_ro_entry(const camera_metadata_t *src,
uint32_t tag,
camera_metadata_ro_entry_t *entry);
/**
* Delete an entry at given index. This is an expensive operation, since it
* requires repacking entries and possibly entry data. This also invalidates any
* existing camera_metadata_entry.data pointers to this buffer. Sorting is
* maintained.
*/
ANDROID_API
int delete_camera_metadata_entry(camera_metadata_t *dst,
size_t index);
/**
* Updates a metadata entry with new data. If the data size is changing, may
* need to adjust the data array, making this an O(N) operation. If the data
* size is the same or still fits in the entry space, this is O(1). Maintains
* sorting, but invalidates camera_metadata_entry instances that point to the
* updated entry. If a non-NULL value is passed in to entry, the entry structure
* is updated to match the new buffer state. Returns a non-zero value if there
* is no room for the new data in the buffer.
*/
ANDROID_API
int update_camera_metadata_entry(camera_metadata_t *dst,
size_t index,
const void *data,
size_t data_count,
camera_metadata_entry_t *updated_entry);
/**
* Retrieve human-readable name of section the tag is in. Returns NULL if
* no such tag is defined. Returns NULL for tags in the vendor section, unless
* set_vendor_tag_query_ops() has been used.
*/
ANDROID_API
const char *get_camera_metadata_section_name(uint32_t tag);
/**
* Retrieve human-readable name of tag (not including section). Returns NULL if
* no such tag is defined. Returns NULL for tags in the vendor section, unless
* set_vendor_tag_query_ops() has been used.
*/
ANDROID_API
const char *get_camera_metadata_tag_name(uint32_t tag);
/**
* Retrieve the type of a tag. Returns -1 if no such tag is defined. Returns -1
* for tags in the vendor section, unless set_vendor_tag_query_ops() has been
* used.
*/
ANDROID_API
int get_camera_metadata_tag_type(uint32_t tag);
/**
* Retrieve human-readable name of section the tag is in. Returns NULL if
* no such tag is defined.
*/
ANDROID_API
const char *get_local_camera_metadata_section_name(uint32_t tag,
const camera_metadata_t *meta);
/**
* Retrieve human-readable name of tag (not including section). Returns NULL if
* no such tag is defined.
*/
ANDROID_API
const char *get_local_camera_metadata_tag_name(uint32_t tag,
const camera_metadata_t *meta);
/**
* Retrieve the type of a tag. Returns -1 if no such tag is defined.
*/
ANDROID_API
int get_local_camera_metadata_tag_type(uint32_t tag,
const camera_metadata_t *meta);
/**
* Set up vendor-specific tag query methods. These are needed to properly add
* entries with vendor-specified tags and to use the
* get_camera_metadata_section_name, _tag_name, and _tag_type methods with
* vendor tags. Returns 0 on success.
*
* **DEPRECATED** - Please use vendor_tag_ops defined in camera_vendor_tags.h
* instead.
*/
typedef struct vendor_tag_query_ops vendor_tag_query_ops_t;
struct vendor_tag_query_ops {
/**
* Get vendor section name for a vendor-specified entry tag. Only called for
* tags >= 0x80000000. The section name must start with the name of the
* vendor in the Java package style. For example, CameraZoom inc must prefix
* their sections with "com.camerazoom." Must return NULL if the tag is
* outside the bounds of vendor-defined sections.
*/
const char *(*get_camera_vendor_section_name)(
const vendor_tag_query_ops_t *v,
uint32_t tag);
/**
* Get tag name for a vendor-specified entry tag. Only called for tags >=
* 0x80000000. Must return NULL if the tag is outside the bounds of
* vendor-defined sections.
*/
const char *(*get_camera_vendor_tag_name)(
const vendor_tag_query_ops_t *v,
uint32_t tag);
/**
* Get tag type for a vendor-specified entry tag. Only called for tags >=
* 0x80000000. Must return -1 if the tag is outside the bounds of
* vendor-defined sections.
*/
int (*get_camera_vendor_tag_type)(
const vendor_tag_query_ops_t *v,
uint32_t tag);
/**
* Get the number of vendor tags supported on this platform. Used to
* calculate the size of buffer needed for holding the array of all tags
* returned by get_camera_vendor_tags().
*/
int (*get_camera_vendor_tag_count)(
const vendor_tag_query_ops_t *v);
/**
* Fill an array with all the supported vendor tags on this platform.
* get_camera_vendor_tag_count() returns the number of tags supported, and
* tag_array should be allocated with enough space to hold all of the tags.
*/
void (*get_camera_vendor_tags)(
const vendor_tag_query_ops_t *v,
uint32_t *tag_array);
};
/**
* **DEPRECATED** - This should only be used by the camera framework. Camera
* metadata will transition to using vendor_tag_ops defined in
* camera_vendor_tags.h instead.
*/
ANDROID_API
int set_camera_metadata_vendor_tag_ops(const vendor_tag_query_ops_t *query_ops);
/**
* Print fields in the metadata to the log.
* verbosity = 0: Only tag entry information
* verbosity = 1: Tag entry information plus at most 16 data values
* verbosity = 2: All information
*/
ANDROID_API
void dump_camera_metadata(const camera_metadata_t *metadata,
int fd,
int verbosity);
/**
* Print fields in the metadata to the log; adds indentation parameter, which
* specifies the number of spaces to insert before each line of the dump
*/
ANDROID_API
void dump_indented_camera_metadata(const camera_metadata_t *metadata,
int fd,
int verbosity,
int indentation);
/**
* Prints the specified tag value as a string. Only works for enum tags.
* Returns 0 on success, -1 on failure.
*/
ANDROID_API
int camera_metadata_enum_snprint(uint32_t tag,
uint32_t value,
char *dst,
size_t size);
#ifdef __cplusplus
}
#endif
#endif
|
0 | repos/libcamera/include/android/metadata | repos/libcamera/include/android/metadata/system/camera_metadata_tags.h | /* SPDX-License-Identifier: Apache-2.0 */
/*
* Copyright (C) 2012 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* !! Do not include this file directly !!
*
* Include camera_metadata.h instead.
*/
/**
* ! Do not edit this file directly !
*
* Generated automatically from camera_metadata_tags.mako
*/
/** TODO: Nearly every enum in this file needs a description */
/**
* Top level hierarchy definitions for camera metadata. *_INFO sections are for
* the static metadata that can be retrived without opening the camera device.
* New sections must be added right before ANDROID_SECTION_COUNT to maintain
* existing enumerations.
*/
typedef enum camera_metadata_section {
ANDROID_COLOR_CORRECTION,
ANDROID_CONTROL,
ANDROID_DEMOSAIC,
ANDROID_EDGE,
ANDROID_FLASH,
ANDROID_FLASH_INFO,
ANDROID_HOT_PIXEL,
ANDROID_JPEG,
ANDROID_LENS,
ANDROID_LENS_INFO,
ANDROID_NOISE_REDUCTION,
ANDROID_QUIRKS,
ANDROID_REQUEST,
ANDROID_SCALER,
ANDROID_SENSOR,
ANDROID_SENSOR_INFO,
ANDROID_SHADING,
ANDROID_STATISTICS,
ANDROID_STATISTICS_INFO,
ANDROID_TONEMAP,
ANDROID_LED,
ANDROID_INFO,
ANDROID_BLACK_LEVEL,
ANDROID_SYNC,
ANDROID_REPROCESS,
ANDROID_DEPTH,
ANDROID_LOGICAL_MULTI_CAMERA,
ANDROID_DISTORTION_CORRECTION,
ANDROID_SECTION_COUNT,
VENDOR_SECTION = 0x8000
} camera_metadata_section_t;
/**
* Hierarchy positions in enum space. All vendor extension tags must be
* defined with tag >= VENDOR_SECTION_START
*/
typedef enum camera_metadata_section_start {
ANDROID_COLOR_CORRECTION_START = ANDROID_COLOR_CORRECTION << 16,
ANDROID_CONTROL_START = ANDROID_CONTROL << 16,
ANDROID_DEMOSAIC_START = ANDROID_DEMOSAIC << 16,
ANDROID_EDGE_START = ANDROID_EDGE << 16,
ANDROID_FLASH_START = ANDROID_FLASH << 16,
ANDROID_FLASH_INFO_START = ANDROID_FLASH_INFO << 16,
ANDROID_HOT_PIXEL_START = ANDROID_HOT_PIXEL << 16,
ANDROID_JPEG_START = ANDROID_JPEG << 16,
ANDROID_LENS_START = ANDROID_LENS << 16,
ANDROID_LENS_INFO_START = ANDROID_LENS_INFO << 16,
ANDROID_NOISE_REDUCTION_START = ANDROID_NOISE_REDUCTION << 16,
ANDROID_QUIRKS_START = ANDROID_QUIRKS << 16,
ANDROID_REQUEST_START = ANDROID_REQUEST << 16,
ANDROID_SCALER_START = ANDROID_SCALER << 16,
ANDROID_SENSOR_START = ANDROID_SENSOR << 16,
ANDROID_SENSOR_INFO_START = ANDROID_SENSOR_INFO << 16,
ANDROID_SHADING_START = ANDROID_SHADING << 16,
ANDROID_STATISTICS_START = ANDROID_STATISTICS << 16,
ANDROID_STATISTICS_INFO_START = ANDROID_STATISTICS_INFO << 16,
ANDROID_TONEMAP_START = ANDROID_TONEMAP << 16,
ANDROID_LED_START = ANDROID_LED << 16,
ANDROID_INFO_START = ANDROID_INFO << 16,
ANDROID_BLACK_LEVEL_START = ANDROID_BLACK_LEVEL << 16,
ANDROID_SYNC_START = ANDROID_SYNC << 16,
ANDROID_REPROCESS_START = ANDROID_REPROCESS << 16,
ANDROID_DEPTH_START = ANDROID_DEPTH << 16,
ANDROID_LOGICAL_MULTI_CAMERA_START
= ANDROID_LOGICAL_MULTI_CAMERA
<< 16,
ANDROID_DISTORTION_CORRECTION_START
= ANDROID_DISTORTION_CORRECTION
<< 16,
VENDOR_SECTION_START = VENDOR_SECTION << 16
} camera_metadata_section_start_t;
/**
* Main enum for defining camera metadata tags. New entries must always go
* before the section _END tag to preserve existing enumeration values. In
* addition, the name and type of the tag needs to be added to
* system/media/camera/src/camera_metadata_tag_info.c
*/
typedef enum camera_metadata_tag {
ANDROID_COLOR_CORRECTION_MODE = // enum | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_START,
ANDROID_COLOR_CORRECTION_TRANSFORM, // rational[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_GAINS, // float[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_ABERRATION_MODE, // enum | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
// byte[] | public | HIDL v3.2
ANDROID_COLOR_CORRECTION_END,
ANDROID_CONTROL_AE_ANTIBANDING_MODE = // enum | public | HIDL v3.2
ANDROID_CONTROL_START,
ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, // int32 | public | HIDL v3.2
ANDROID_CONTROL_AE_LOCK, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_TARGET_FPS_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_LOCK, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT, // enum | public | HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_SCENE_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_COMPENSATION_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_AE_COMPENSATION_STEP, // rational | public | HIDL v3.2
ANDROID_CONTROL_AF_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_EFFECTS, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_SCENE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
// byte[] | public | HIDL v3.2
ANDROID_CONTROL_AWB_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_MAX_REGIONS, // int32[] | ndk_public | HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_OVERRIDES, // byte[] | system | HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_ID, // int32 | system | HIDL v3.2
ANDROID_CONTROL_AE_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER_ID, // int32 | system | HIDL v3.2
ANDROID_CONTROL_AWB_STATE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
// int32[] | hidden | HIDL v3.2
ANDROID_CONTROL_AE_LOCK_AVAILABLE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AWB_LOCK_AVAILABLE, // enum | public | HIDL v3.2
ANDROID_CONTROL_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE, // int32[] | public | HIDL v3.2
ANDROID_CONTROL_POST_RAW_SENSITIVITY_BOOST, // int32 | public | HIDL v3.2
ANDROID_CONTROL_ENABLE_ZSL, // enum | public | HIDL v3.2
ANDROID_CONTROL_AF_SCENE_CHANGE, // enum | public | HIDL v3.3
ANDROID_CONTROL_END,
ANDROID_DEMOSAIC_MODE = // enum | system | HIDL v3.2
ANDROID_DEMOSAIC_START,
ANDROID_DEMOSAIC_END,
ANDROID_EDGE_MODE = // enum | public | HIDL v3.2
ANDROID_EDGE_START,
ANDROID_EDGE_STRENGTH, // byte | system | HIDL v3.2
ANDROID_EDGE_AVAILABLE_EDGE_MODES, // byte[] | public | HIDL v3.2
ANDROID_EDGE_END,
ANDROID_FLASH_FIRING_POWER = // byte | system | HIDL v3.2
ANDROID_FLASH_START,
ANDROID_FLASH_FIRING_TIME, // int64 | system | HIDL v3.2
ANDROID_FLASH_MODE, // enum | public | HIDL v3.2
ANDROID_FLASH_COLOR_TEMPERATURE, // byte | system | HIDL v3.2
ANDROID_FLASH_MAX_ENERGY, // byte | system | HIDL v3.2
ANDROID_FLASH_STATE, // enum | public | HIDL v3.2
ANDROID_FLASH_END,
ANDROID_FLASH_INFO_AVAILABLE = // enum | public | HIDL v3.2
ANDROID_FLASH_INFO_START,
ANDROID_FLASH_INFO_CHARGE_DURATION, // int64 | system | HIDL v3.2
ANDROID_FLASH_INFO_END,
ANDROID_HOT_PIXEL_MODE = // enum | public | HIDL v3.2
ANDROID_HOT_PIXEL_START,
ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES, // byte[] | public | HIDL v3.2
ANDROID_HOT_PIXEL_END,
ANDROID_JPEG_GPS_COORDINATES = // double[] | ndk_public | HIDL v3.2
ANDROID_JPEG_START,
ANDROID_JPEG_GPS_PROCESSING_METHOD, // byte | ndk_public | HIDL v3.2
ANDROID_JPEG_GPS_TIMESTAMP, // int64 | ndk_public | HIDL v3.2
ANDROID_JPEG_ORIENTATION, // int32 | public | HIDL v3.2
ANDROID_JPEG_QUALITY, // byte | public | HIDL v3.2
ANDROID_JPEG_THUMBNAIL_QUALITY, // byte | public | HIDL v3.2
ANDROID_JPEG_THUMBNAIL_SIZE, // int32[] | public | HIDL v3.2
ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, // int32[] | public | HIDL v3.2
ANDROID_JPEG_MAX_SIZE, // int32 | system | HIDL v3.2
ANDROID_JPEG_SIZE, // int32 | system | HIDL v3.2
ANDROID_JPEG_END,
ANDROID_LENS_APERTURE = // float | public | HIDL v3.2
ANDROID_LENS_START,
ANDROID_LENS_FILTER_DENSITY, // float | public | HIDL v3.2
ANDROID_LENS_FOCAL_LENGTH, // float | public | HIDL v3.2
ANDROID_LENS_FOCUS_DISTANCE, // float | public | HIDL v3.2
ANDROID_LENS_OPTICAL_STABILIZATION_MODE, // enum | public | HIDL v3.2
ANDROID_LENS_FACING, // enum | public | HIDL v3.2
ANDROID_LENS_POSE_ROTATION, // float[] | public | HIDL v3.2
ANDROID_LENS_POSE_TRANSLATION, // float[] | public | HIDL v3.2
ANDROID_LENS_FOCUS_RANGE, // float[] | public | HIDL v3.2
ANDROID_LENS_STATE, // enum | public | HIDL v3.2
ANDROID_LENS_INTRINSIC_CALIBRATION, // float[] | public | HIDL v3.2
ANDROID_LENS_RADIAL_DISTORTION, // float[] | public | HIDL v3.2
ANDROID_LENS_POSE_REFERENCE, // enum | public | HIDL v3.3
ANDROID_LENS_DISTORTION, // float[] | public | HIDL v3.3
ANDROID_LENS_END,
ANDROID_LENS_INFO_AVAILABLE_APERTURES = // float[] | public | HIDL v3.2
ANDROID_LENS_INFO_START,
ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES, // float[] | public | HIDL v3.2
ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, // float[] | public | HIDL v3.2
ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,// byte[] | public | HIDL v3.2
ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, // float | public | HIDL v3.2
ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, // float | public | HIDL v3.2
ANDROID_LENS_INFO_SHADING_MAP_SIZE, // int32[] | ndk_public | HIDL v3.2
ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, // enum | public | HIDL v3.2
ANDROID_LENS_INFO_END,
ANDROID_NOISE_REDUCTION_MODE = // enum | public | HIDL v3.2
ANDROID_NOISE_REDUCTION_START,
ANDROID_NOISE_REDUCTION_STRENGTH, // byte | system | HIDL v3.2
ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
// byte[] | public | HIDL v3.2
ANDROID_NOISE_REDUCTION_END,
ANDROID_QUIRKS_METERING_CROP_REGION = // byte | system | HIDL v3.2
ANDROID_QUIRKS_START,
ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO, // byte | system | HIDL v3.2
ANDROID_QUIRKS_USE_ZSL_FORMAT, // byte | system | HIDL v3.2
ANDROID_QUIRKS_USE_PARTIAL_RESULT, // byte | hidden | HIDL v3.2
ANDROID_QUIRKS_PARTIAL_RESULT, // enum | hidden | HIDL v3.2
ANDROID_QUIRKS_END,
ANDROID_REQUEST_FRAME_COUNT = // int32 | hidden | HIDL v3.2
ANDROID_REQUEST_START,
ANDROID_REQUEST_ID, // int32 | hidden | HIDL v3.2
ANDROID_REQUEST_INPUT_STREAMS, // int32[] | system | HIDL v3.2
ANDROID_REQUEST_METADATA_MODE, // enum | system | HIDL v3.2
ANDROID_REQUEST_OUTPUT_STREAMS, // int32[] | system | HIDL v3.2
ANDROID_REQUEST_TYPE, // enum | system | HIDL v3.2
ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, // int32[] | ndk_public | HIDL v3.2
ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS, // int32[] | system | HIDL v3.2
ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, // int32 | java_public | HIDL v3.2
ANDROID_REQUEST_PIPELINE_DEPTH, // byte | public | HIDL v3.2
ANDROID_REQUEST_PIPELINE_MAX_DEPTH, // byte | public | HIDL v3.2
ANDROID_REQUEST_PARTIAL_RESULT_COUNT, // int32 | public | HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES, // enum[] | public | HIDL v3.2
ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, // int32[] | ndk_public | HIDL v3.2
ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, // int32[] | ndk_public | HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, // int32[] | ndk_public | HIDL v3.2
ANDROID_REQUEST_AVAILABLE_SESSION_KEYS, // int32[] | ndk_public | HIDL v3.3
ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS,
// int32[] | hidden | HIDL v3.3
ANDROID_REQUEST_END,
ANDROID_SCALER_CROP_REGION = // int32[] | public | HIDL v3.2
ANDROID_SCALER_START,
ANDROID_SCALER_AVAILABLE_FORMATS, // enum[] | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS, // int64[] | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_JPEG_SIZES, // int32[] | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, // float | public | HIDL v3.2
ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS, // int64[] | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, // int32[] | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS, // int64[] | system | HIDL v3.2
ANDROID_SCALER_AVAILABLE_RAW_SIZES, // int32[] | system | HIDL v3.2
ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP,// int32 | hidden | HIDL v3.2
ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, // enum[] | ndk_public | HIDL v3.2
ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, // int64[] | ndk_public | HIDL v3.2
ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, // int64[] | ndk_public | HIDL v3.2
ANDROID_SCALER_CROPPING_TYPE, // enum | public | HIDL v3.2
ANDROID_SCALER_END,
ANDROID_SENSOR_EXPOSURE_TIME = // int64 | public | HIDL v3.2
ANDROID_SENSOR_START,
ANDROID_SENSOR_FRAME_DURATION, // int64 | public | HIDL v3.2
ANDROID_SENSOR_SENSITIVITY, // int32 | public | HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1, // enum | public | HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT2, // byte | public | HIDL v3.2
ANDROID_SENSOR_CALIBRATION_TRANSFORM1, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_CALIBRATION_TRANSFORM2, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_COLOR_TRANSFORM1, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_COLOR_TRANSFORM2, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_FORWARD_MATRIX1, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_FORWARD_MATRIX2, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_BASE_GAIN_FACTOR, // rational | system | HIDL v3.2
ANDROID_SENSOR_BLACK_LEVEL_PATTERN, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY, // int32 | public | HIDL v3.2
ANDROID_SENSOR_ORIENTATION, // int32 | public | HIDL v3.2
ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS, // int32[] | system | HIDL v3.2
ANDROID_SENSOR_TIMESTAMP, // int64 | public | HIDL v3.2
ANDROID_SENSOR_TEMPERATURE, // float | system | HIDL v3.2
ANDROID_SENSOR_NEUTRAL_COLOR_POINT, // rational[] | public | HIDL v3.2
ANDROID_SENSOR_NOISE_PROFILE, // double[] | public | HIDL v3.2
ANDROID_SENSOR_PROFILE_HUE_SAT_MAP, // float[] | system | HIDL v3.2
ANDROID_SENSOR_PROFILE_TONE_CURVE, // float[] | system | HIDL v3.2
ANDROID_SENSOR_GREEN_SPLIT, // float | public | HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_DATA, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE, // enum | public | HIDL v3.2
ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_ROLLING_SHUTTER_SKEW, // int64 | public | HIDL v3.2
ANDROID_SENSOR_OPTICAL_BLACK_REGIONS, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL, // float[] | public | HIDL v3.2
ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL, // int32 | public | HIDL v3.2
ANDROID_SENSOR_OPAQUE_RAW_SIZE, // int32[] | system | HIDL v3.2
ANDROID_SENSOR_END,
ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE = // int32[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_START,
ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, // enum | public | HIDL v3.2
ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, // int64[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, // int64 | public | HIDL v3.2
ANDROID_SENSOR_INFO_PHYSICAL_SIZE, // float[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, // int32[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_WHITE_LEVEL, // int32 | public | HIDL v3.2
ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, // enum | public | HIDL v3.2
ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED, // enum | public | HIDL v3.2
ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE,
// int32[] | public | HIDL v3.2
ANDROID_SENSOR_INFO_END,
ANDROID_SHADING_MODE = // enum | public | HIDL v3.2
ANDROID_SHADING_START,
ANDROID_SHADING_STRENGTH, // byte | system | HIDL v3.2
ANDROID_SHADING_AVAILABLE_MODES, // byte[] | public | HIDL v3.2
ANDROID_SHADING_END,
ANDROID_STATISTICS_FACE_DETECT_MODE = // enum | public | HIDL v3.2
ANDROID_STATISTICS_START,
ANDROID_STATISTICS_HISTOGRAM_MODE, // enum | system | HIDL v3.2
ANDROID_STATISTICS_SHARPNESS_MAP_MODE, // enum | system | HIDL v3.2
ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, // enum | public | HIDL v3.2
ANDROID_STATISTICS_FACE_IDS, // int32[] | ndk_public | HIDL v3.2
ANDROID_STATISTICS_FACE_LANDMARKS, // int32[] | ndk_public | HIDL v3.2
ANDROID_STATISTICS_FACE_RECTANGLES, // int32[] | ndk_public | HIDL v3.2
ANDROID_STATISTICS_FACE_SCORES, // byte[] | ndk_public | HIDL v3.2
ANDROID_STATISTICS_HISTOGRAM, // int32[] | system | HIDL v3.2
ANDROID_STATISTICS_SHARPNESS_MAP, // int32[] | system | HIDL v3.2
ANDROID_STATISTICS_LENS_SHADING_CORRECTION_MAP, // byte | java_public | HIDL v3.2
ANDROID_STATISTICS_LENS_SHADING_MAP, // float[] | ndk_public | HIDL v3.2
ANDROID_STATISTICS_PREDICTED_COLOR_GAINS, // float[] | hidden | HIDL v3.2
ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM, // rational[] | hidden | HIDL v3.2
ANDROID_STATISTICS_SCENE_FLICKER, // enum | public | HIDL v3.2
ANDROID_STATISTICS_HOT_PIXEL_MAP, // int32[] | public | HIDL v3.2
ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, // enum | public | HIDL v3.2
ANDROID_STATISTICS_OIS_DATA_MODE, // enum | public | HIDL v3.3
ANDROID_STATISTICS_OIS_TIMESTAMPS, // int64[] | ndk_public | HIDL v3.3
ANDROID_STATISTICS_OIS_X_SHIFTS, // float[] | ndk_public | HIDL v3.3
ANDROID_STATISTICS_OIS_Y_SHIFTS, // float[] | ndk_public | HIDL v3.3
ANDROID_STATISTICS_END,
ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES =
// byte[] | public | HIDL v3.2
ANDROID_STATISTICS_INFO_START,
ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT, // int32 | system | HIDL v3.2
ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, // int32 | public | HIDL v3.2
ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT, // int32 | system | HIDL v3.2
ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE, // int32 | system | HIDL v3.2
ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, // int32[] | system | HIDL v3.2
ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES,
// byte[] | public | HIDL v3.2
ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
// byte[] | public | HIDL v3.2
ANDROID_STATISTICS_INFO_AVAILABLE_OIS_DATA_MODES, // byte[] | public | HIDL v3.3
ANDROID_STATISTICS_INFO_END,
ANDROID_TONEMAP_CURVE_BLUE = // float[] | ndk_public | HIDL v3.2
ANDROID_TONEMAP_START,
ANDROID_TONEMAP_CURVE_GREEN, // float[] | ndk_public | HIDL v3.2
ANDROID_TONEMAP_CURVE_RED, // float[] | ndk_public | HIDL v3.2
ANDROID_TONEMAP_MODE, // enum | public | HIDL v3.2
ANDROID_TONEMAP_MAX_CURVE_POINTS, // int32 | public | HIDL v3.2
ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES, // byte[] | public | HIDL v3.2
ANDROID_TONEMAP_GAMMA, // float | public | HIDL v3.2
ANDROID_TONEMAP_PRESET_CURVE, // enum | public | HIDL v3.2
ANDROID_TONEMAP_END,
ANDROID_LED_TRANSMIT = // enum | hidden | HIDL v3.2
ANDROID_LED_START,
ANDROID_LED_AVAILABLE_LEDS, // enum[] | hidden | HIDL v3.2
ANDROID_LED_END,
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL = // enum | public | HIDL v3.2
ANDROID_INFO_START,
ANDROID_INFO_VERSION, // byte | public | HIDL v3.3
ANDROID_INFO_END,
ANDROID_BLACK_LEVEL_LOCK = // enum | public | HIDL v3.2
ANDROID_BLACK_LEVEL_START,
ANDROID_BLACK_LEVEL_END,
ANDROID_SYNC_FRAME_NUMBER = // enum | ndk_public | HIDL v3.2
ANDROID_SYNC_START,
ANDROID_SYNC_MAX_LATENCY, // enum | public | HIDL v3.2
ANDROID_SYNC_END,
ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR = // float | java_public | HIDL v3.2
ANDROID_REPROCESS_START,
ANDROID_REPROCESS_MAX_CAPTURE_STALL, // int32 | java_public | HIDL v3.2
ANDROID_REPROCESS_END,
ANDROID_DEPTH_MAX_DEPTH_SAMPLES = // int32 | system | HIDL v3.2
ANDROID_DEPTH_START,
ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
// enum[] | ndk_public | HIDL v3.2
ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,// int64[] | ndk_public | HIDL v3.2
ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS, // int64[] | ndk_public | HIDL v3.2
ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE, // enum | public | HIDL v3.2
ANDROID_DEPTH_END,
ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS = // byte[] | hidden | HIDL v3.3
ANDROID_LOGICAL_MULTI_CAMERA_START,
ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE, // enum | public | HIDL v3.3
ANDROID_LOGICAL_MULTI_CAMERA_END,
ANDROID_DISTORTION_CORRECTION_MODE = // enum | public | HIDL v3.3
ANDROID_DISTORTION_CORRECTION_START,
ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES, // byte[] | public | HIDL v3.3
ANDROID_DISTORTION_CORRECTION_END,
} camera_metadata_tag_t;
/**
* Enumeration definitions for the various entries that need them
*/
// ANDROID_COLOR_CORRECTION_MODE
typedef enum camera_metadata_enum_android_color_correction_mode {
ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX , // HIDL v3.2
ANDROID_COLOR_CORRECTION_MODE_FAST , // HIDL v3.2
ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY , // HIDL v3.2
} camera_metadata_enum_android_color_correction_mode_t;
// ANDROID_COLOR_CORRECTION_ABERRATION_MODE
typedef enum camera_metadata_enum_android_color_correction_aberration_mode {
ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF , // HIDL v3.2
ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST , // HIDL v3.2
ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY , // HIDL v3.2
} camera_metadata_enum_android_color_correction_aberration_mode_t;
// ANDROID_CONTROL_AE_ANTIBANDING_MODE
typedef enum camera_metadata_enum_android_control_ae_antibanding_mode {
ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ , // HIDL v3.2
ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ , // HIDL v3.2
ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO , // HIDL v3.2
} camera_metadata_enum_android_control_ae_antibanding_mode_t;
// ANDROID_CONTROL_AE_LOCK
typedef enum camera_metadata_enum_android_control_ae_lock {
ANDROID_CONTROL_AE_LOCK_OFF , // HIDL v3.2
ANDROID_CONTROL_AE_LOCK_ON , // HIDL v3.2
} camera_metadata_enum_android_control_ae_lock_t;
// ANDROID_CONTROL_AE_MODE
typedef enum camera_metadata_enum_android_control_ae_mode {
ANDROID_CONTROL_AE_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_AE_MODE_ON , // HIDL v3.2
ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH , // HIDL v3.2
ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH , // HIDL v3.2
ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE , // HIDL v3.2
ANDROID_CONTROL_AE_MODE_ON_EXTERNAL_FLASH , // HIDL v3.3
} camera_metadata_enum_android_control_ae_mode_t;
// ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER
typedef enum camera_metadata_enum_android_control_ae_precapture_trigger {
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE , // HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START , // HIDL v3.2
ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL , // HIDL v3.2
} camera_metadata_enum_android_control_ae_precapture_trigger_t;
// ANDROID_CONTROL_AF_MODE
typedef enum camera_metadata_enum_android_control_af_mode {
ANDROID_CONTROL_AF_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_AF_MODE_AUTO , // HIDL v3.2
ANDROID_CONTROL_AF_MODE_MACRO , // HIDL v3.2
ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO , // HIDL v3.2
ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE , // HIDL v3.2
ANDROID_CONTROL_AF_MODE_EDOF , // HIDL v3.2
} camera_metadata_enum_android_control_af_mode_t;
// ANDROID_CONTROL_AF_TRIGGER
typedef enum camera_metadata_enum_android_control_af_trigger {
ANDROID_CONTROL_AF_TRIGGER_IDLE , // HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER_START , // HIDL v3.2
ANDROID_CONTROL_AF_TRIGGER_CANCEL , // HIDL v3.2
} camera_metadata_enum_android_control_af_trigger_t;
// ANDROID_CONTROL_AWB_LOCK
typedef enum camera_metadata_enum_android_control_awb_lock {
ANDROID_CONTROL_AWB_LOCK_OFF , // HIDL v3.2
ANDROID_CONTROL_AWB_LOCK_ON , // HIDL v3.2
} camera_metadata_enum_android_control_awb_lock_t;
// ANDROID_CONTROL_AWB_MODE
typedef enum camera_metadata_enum_android_control_awb_mode {
ANDROID_CONTROL_AWB_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_AUTO , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_INCANDESCENT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_FLUORESCENT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_DAYLIGHT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_TWILIGHT , // HIDL v3.2
ANDROID_CONTROL_AWB_MODE_SHADE , // HIDL v3.2
} camera_metadata_enum_android_control_awb_mode_t;
// ANDROID_CONTROL_CAPTURE_INTENT
typedef enum camera_metadata_enum_android_control_capture_intent {
ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_MANUAL , // HIDL v3.2
ANDROID_CONTROL_CAPTURE_INTENT_MOTION_TRACKING , // HIDL v3.3
} camera_metadata_enum_android_control_capture_intent_t;
// ANDROID_CONTROL_EFFECT_MODE
typedef enum camera_metadata_enum_android_control_effect_mode {
ANDROID_CONTROL_EFFECT_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_MONO , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_NEGATIVE , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_SOLARIZE , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_SEPIA , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_POSTERIZE , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD , // HIDL v3.2
ANDROID_CONTROL_EFFECT_MODE_AQUA , // HIDL v3.2
} camera_metadata_enum_android_control_effect_mode_t;
// ANDROID_CONTROL_MODE
typedef enum camera_metadata_enum_android_control_mode {
ANDROID_CONTROL_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_MODE_AUTO , // HIDL v3.2
ANDROID_CONTROL_MODE_USE_SCENE_MODE , // HIDL v3.2
ANDROID_CONTROL_MODE_OFF_KEEP_STATE , // HIDL v3.2
} camera_metadata_enum_android_control_mode_t;
// ANDROID_CONTROL_SCENE_MODE
typedef enum camera_metadata_enum_android_control_scene_mode {
ANDROID_CONTROL_SCENE_MODE_DISABLED = 0, // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_ACTION , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_PORTRAIT , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_LANDSCAPE , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_NIGHT , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_THEATRE , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_BEACH , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_SNOW , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_SUNSET , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_FIREWORKS , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_SPORTS , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_PARTY , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_BARCODE , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_HDR , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY_LOW_LIGHT , // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_START = 100, // HIDL v3.2
ANDROID_CONTROL_SCENE_MODE_DEVICE_CUSTOM_END = 127, // HIDL v3.2
} camera_metadata_enum_android_control_scene_mode_t;
// ANDROID_CONTROL_VIDEO_STABILIZATION_MODE
typedef enum camera_metadata_enum_android_control_video_stabilization_mode {
ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF , // HIDL v3.2
ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_control_video_stabilization_mode_t;
// ANDROID_CONTROL_AE_STATE
typedef enum camera_metadata_enum_android_control_ae_state {
ANDROID_CONTROL_AE_STATE_INACTIVE , // HIDL v3.2
ANDROID_CONTROL_AE_STATE_SEARCHING , // HIDL v3.2
ANDROID_CONTROL_AE_STATE_CONVERGED , // HIDL v3.2
ANDROID_CONTROL_AE_STATE_LOCKED , // HIDL v3.2
ANDROID_CONTROL_AE_STATE_FLASH_REQUIRED , // HIDL v3.2
ANDROID_CONTROL_AE_STATE_PRECAPTURE , // HIDL v3.2
} camera_metadata_enum_android_control_ae_state_t;
// ANDROID_CONTROL_AF_STATE
typedef enum camera_metadata_enum_android_control_af_state {
ANDROID_CONTROL_AF_STATE_INACTIVE , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED , // HIDL v3.2
ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED , // HIDL v3.2
} camera_metadata_enum_android_control_af_state_t;
// ANDROID_CONTROL_AWB_STATE
typedef enum camera_metadata_enum_android_control_awb_state {
ANDROID_CONTROL_AWB_STATE_INACTIVE , // HIDL v3.2
ANDROID_CONTROL_AWB_STATE_SEARCHING , // HIDL v3.2
ANDROID_CONTROL_AWB_STATE_CONVERGED , // HIDL v3.2
ANDROID_CONTROL_AWB_STATE_LOCKED , // HIDL v3.2
} camera_metadata_enum_android_control_awb_state_t;
// ANDROID_CONTROL_AE_LOCK_AVAILABLE
typedef enum camera_metadata_enum_android_control_ae_lock_available {
ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE , // HIDL v3.2
ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE , // HIDL v3.2
} camera_metadata_enum_android_control_ae_lock_available_t;
// ANDROID_CONTROL_AWB_LOCK_AVAILABLE
typedef enum camera_metadata_enum_android_control_awb_lock_available {
ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE , // HIDL v3.2
ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE , // HIDL v3.2
} camera_metadata_enum_android_control_awb_lock_available_t;
// ANDROID_CONTROL_ENABLE_ZSL
typedef enum camera_metadata_enum_android_control_enable_zsl {
ANDROID_CONTROL_ENABLE_ZSL_FALSE , // HIDL v3.2
ANDROID_CONTROL_ENABLE_ZSL_TRUE , // HIDL v3.2
} camera_metadata_enum_android_control_enable_zsl_t;
// ANDROID_CONTROL_AF_SCENE_CHANGE
typedef enum camera_metadata_enum_android_control_af_scene_change {
ANDROID_CONTROL_AF_SCENE_CHANGE_NOT_DETECTED , // HIDL v3.3
ANDROID_CONTROL_AF_SCENE_CHANGE_DETECTED , // HIDL v3.3
} camera_metadata_enum_android_control_af_scene_change_t;
// ANDROID_DEMOSAIC_MODE
typedef enum camera_metadata_enum_android_demosaic_mode {
ANDROID_DEMOSAIC_MODE_FAST , // HIDL v3.2
ANDROID_DEMOSAIC_MODE_HIGH_QUALITY , // HIDL v3.2
} camera_metadata_enum_android_demosaic_mode_t;
// ANDROID_EDGE_MODE
typedef enum camera_metadata_enum_android_edge_mode {
ANDROID_EDGE_MODE_OFF , // HIDL v3.2
ANDROID_EDGE_MODE_FAST , // HIDL v3.2
ANDROID_EDGE_MODE_HIGH_QUALITY , // HIDL v3.2
ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG , // HIDL v3.2
} camera_metadata_enum_android_edge_mode_t;
// ANDROID_FLASH_MODE
typedef enum camera_metadata_enum_android_flash_mode {
ANDROID_FLASH_MODE_OFF , // HIDL v3.2
ANDROID_FLASH_MODE_SINGLE , // HIDL v3.2
ANDROID_FLASH_MODE_TORCH , // HIDL v3.2
} camera_metadata_enum_android_flash_mode_t;
// ANDROID_FLASH_STATE
typedef enum camera_metadata_enum_android_flash_state {
ANDROID_FLASH_STATE_UNAVAILABLE , // HIDL v3.2
ANDROID_FLASH_STATE_CHARGING , // HIDL v3.2
ANDROID_FLASH_STATE_READY , // HIDL v3.2
ANDROID_FLASH_STATE_FIRED , // HIDL v3.2
ANDROID_FLASH_STATE_PARTIAL , // HIDL v3.2
} camera_metadata_enum_android_flash_state_t;
// ANDROID_FLASH_INFO_AVAILABLE
typedef enum camera_metadata_enum_android_flash_info_available {
ANDROID_FLASH_INFO_AVAILABLE_FALSE , // HIDL v3.2
ANDROID_FLASH_INFO_AVAILABLE_TRUE , // HIDL v3.2
} camera_metadata_enum_android_flash_info_available_t;
// ANDROID_HOT_PIXEL_MODE
typedef enum camera_metadata_enum_android_hot_pixel_mode {
ANDROID_HOT_PIXEL_MODE_OFF , // HIDL v3.2
ANDROID_HOT_PIXEL_MODE_FAST , // HIDL v3.2
ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY , // HIDL v3.2
} camera_metadata_enum_android_hot_pixel_mode_t;
// ANDROID_LENS_OPTICAL_STABILIZATION_MODE
typedef enum camera_metadata_enum_android_lens_optical_stabilization_mode {
ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF , // HIDL v3.2
ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_lens_optical_stabilization_mode_t;
// ANDROID_LENS_FACING
typedef enum camera_metadata_enum_android_lens_facing {
ANDROID_LENS_FACING_FRONT , // HIDL v3.2
ANDROID_LENS_FACING_BACK , // HIDL v3.2
ANDROID_LENS_FACING_EXTERNAL , // HIDL v3.2
} camera_metadata_enum_android_lens_facing_t;
// ANDROID_LENS_STATE
typedef enum camera_metadata_enum_android_lens_state {
ANDROID_LENS_STATE_STATIONARY , // HIDL v3.2
ANDROID_LENS_STATE_MOVING , // HIDL v3.2
} camera_metadata_enum_android_lens_state_t;
// ANDROID_LENS_POSE_REFERENCE
typedef enum camera_metadata_enum_android_lens_pose_reference {
ANDROID_LENS_POSE_REFERENCE_PRIMARY_CAMERA , // HIDL v3.3
ANDROID_LENS_POSE_REFERENCE_GYROSCOPE , // HIDL v3.3
} camera_metadata_enum_android_lens_pose_reference_t;
// ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION
typedef enum camera_metadata_enum_android_lens_info_focus_distance_calibration {
ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED , // HIDL v3.2
ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE , // HIDL v3.2
ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED , // HIDL v3.2
} camera_metadata_enum_android_lens_info_focus_distance_calibration_t;
// ANDROID_NOISE_REDUCTION_MODE
typedef enum camera_metadata_enum_android_noise_reduction_mode {
ANDROID_NOISE_REDUCTION_MODE_OFF , // HIDL v3.2
ANDROID_NOISE_REDUCTION_MODE_FAST , // HIDL v3.2
ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY , // HIDL v3.2
ANDROID_NOISE_REDUCTION_MODE_MINIMAL , // HIDL v3.2
ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG , // HIDL v3.2
} camera_metadata_enum_android_noise_reduction_mode_t;
// ANDROID_QUIRKS_PARTIAL_RESULT
typedef enum camera_metadata_enum_android_quirks_partial_result {
ANDROID_QUIRKS_PARTIAL_RESULT_FINAL , // HIDL v3.2
ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL , // HIDL v3.2
} camera_metadata_enum_android_quirks_partial_result_t;
// ANDROID_REQUEST_METADATA_MODE
typedef enum camera_metadata_enum_android_request_metadata_mode {
ANDROID_REQUEST_METADATA_MODE_NONE , // HIDL v3.2
ANDROID_REQUEST_METADATA_MODE_FULL , // HIDL v3.2
} camera_metadata_enum_android_request_metadata_mode_t;
// ANDROID_REQUEST_TYPE
typedef enum camera_metadata_enum_android_request_type {
ANDROID_REQUEST_TYPE_CAPTURE , // HIDL v3.2
ANDROID_REQUEST_TYPE_REPROCESS , // HIDL v3.2
} camera_metadata_enum_android_request_type_t;
// ANDROID_REQUEST_AVAILABLE_CAPABILITIES
typedef enum camera_metadata_enum_android_request_available_capabilities {
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT , // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO
, // HIDL v3.2
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING , // HIDL v3.3
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA , // HIDL v3.3
ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME , // HIDL v3.3
} camera_metadata_enum_android_request_available_capabilities_t;
// ANDROID_SCALER_AVAILABLE_FORMATS
typedef enum camera_metadata_enum_android_scaler_available_formats {
ANDROID_SCALER_AVAILABLE_FORMATS_RAW16 = 0x20, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE = 0x24, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_YV12 = 0x32315659, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_YCrCb_420_SP = 0x11, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED = 0x22, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888 = 0x23, // HIDL v3.2
ANDROID_SCALER_AVAILABLE_FORMATS_BLOB = 0x21, // HIDL v3.2
} camera_metadata_enum_android_scaler_available_formats_t;
// ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
typedef enum camera_metadata_enum_android_scaler_available_stream_configurations {
ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT , // HIDL v3.2
ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT , // HIDL v3.2
} camera_metadata_enum_android_scaler_available_stream_configurations_t;
// ANDROID_SCALER_CROPPING_TYPE
typedef enum camera_metadata_enum_android_scaler_cropping_type {
ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY , // HIDL v3.2
ANDROID_SCALER_CROPPING_TYPE_FREEFORM , // HIDL v3.2
} camera_metadata_enum_android_scaler_cropping_type_t;
// ANDROID_SENSOR_REFERENCE_ILLUMINANT1
typedef enum camera_metadata_enum_android_sensor_reference_illuminant1 {
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT = 1, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT = 2, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN = 3, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLASH = 4, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER = 9, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER = 10, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE = 11, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT = 12, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT = 13, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT = 14, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT = 15, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A = 17, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_B = 18, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_C = 19, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55 = 20, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65 = 21, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75 = 22, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50 = 23, // HIDL v3.2
ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN = 24, // HIDL v3.2
} camera_metadata_enum_android_sensor_reference_illuminant1_t;
// ANDROID_SENSOR_TEST_PATTERN_MODE
typedef enum camera_metadata_enum_android_sensor_test_pattern_mode {
ANDROID_SENSOR_TEST_PATTERN_MODE_OFF , // HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR , // HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS , // HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY , // HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE_PN9 , // HIDL v3.2
ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1 = 256, // HIDL v3.2
} camera_metadata_enum_android_sensor_test_pattern_mode_t;
// ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT
typedef enum camera_metadata_enum_android_sensor_info_color_filter_arrangement {
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB , // HIDL v3.2
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GRBG , // HIDL v3.2
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG , // HIDL v3.2
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR , // HIDL v3.2
ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGB , // HIDL v3.2
} camera_metadata_enum_android_sensor_info_color_filter_arrangement_t;
// ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE
typedef enum camera_metadata_enum_android_sensor_info_timestamp_source {
ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN , // HIDL v3.2
ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME , // HIDL v3.2
} camera_metadata_enum_android_sensor_info_timestamp_source_t;
// ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED
typedef enum camera_metadata_enum_android_sensor_info_lens_shading_applied {
ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_FALSE , // HIDL v3.2
ANDROID_SENSOR_INFO_LENS_SHADING_APPLIED_TRUE , // HIDL v3.2
} camera_metadata_enum_android_sensor_info_lens_shading_applied_t;
// ANDROID_SHADING_MODE
typedef enum camera_metadata_enum_android_shading_mode {
ANDROID_SHADING_MODE_OFF , // HIDL v3.2
ANDROID_SHADING_MODE_FAST , // HIDL v3.2
ANDROID_SHADING_MODE_HIGH_QUALITY , // HIDL v3.2
} camera_metadata_enum_android_shading_mode_t;
// ANDROID_STATISTICS_FACE_DETECT_MODE
typedef enum camera_metadata_enum_android_statistics_face_detect_mode {
ANDROID_STATISTICS_FACE_DETECT_MODE_OFF , // HIDL v3.2
ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE , // HIDL v3.2
ANDROID_STATISTICS_FACE_DETECT_MODE_FULL , // HIDL v3.2
} camera_metadata_enum_android_statistics_face_detect_mode_t;
// ANDROID_STATISTICS_HISTOGRAM_MODE
typedef enum camera_metadata_enum_android_statistics_histogram_mode {
ANDROID_STATISTICS_HISTOGRAM_MODE_OFF , // HIDL v3.2
ANDROID_STATISTICS_HISTOGRAM_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_statistics_histogram_mode_t;
// ANDROID_STATISTICS_SHARPNESS_MAP_MODE
typedef enum camera_metadata_enum_android_statistics_sharpness_map_mode {
ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF , // HIDL v3.2
ANDROID_STATISTICS_SHARPNESS_MAP_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_statistics_sharpness_map_mode_t;
// ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE
typedef enum camera_metadata_enum_android_statistics_hot_pixel_map_mode {
ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF , // HIDL v3.2
ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_statistics_hot_pixel_map_mode_t;
// ANDROID_STATISTICS_SCENE_FLICKER
typedef enum camera_metadata_enum_android_statistics_scene_flicker {
ANDROID_STATISTICS_SCENE_FLICKER_NONE , // HIDL v3.2
ANDROID_STATISTICS_SCENE_FLICKER_50HZ , // HIDL v3.2
ANDROID_STATISTICS_SCENE_FLICKER_60HZ , // HIDL v3.2
} camera_metadata_enum_android_statistics_scene_flicker_t;
// ANDROID_STATISTICS_LENS_SHADING_MAP_MODE
typedef enum camera_metadata_enum_android_statistics_lens_shading_map_mode {
ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF , // HIDL v3.2
ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON , // HIDL v3.2
} camera_metadata_enum_android_statistics_lens_shading_map_mode_t;
// ANDROID_STATISTICS_OIS_DATA_MODE
typedef enum camera_metadata_enum_android_statistics_ois_data_mode {
ANDROID_STATISTICS_OIS_DATA_MODE_OFF , // HIDL v3.3
ANDROID_STATISTICS_OIS_DATA_MODE_ON , // HIDL v3.3
} camera_metadata_enum_android_statistics_ois_data_mode_t;
// ANDROID_TONEMAP_MODE
typedef enum camera_metadata_enum_android_tonemap_mode {
ANDROID_TONEMAP_MODE_CONTRAST_CURVE , // HIDL v3.2
ANDROID_TONEMAP_MODE_FAST , // HIDL v3.2
ANDROID_TONEMAP_MODE_HIGH_QUALITY , // HIDL v3.2
ANDROID_TONEMAP_MODE_GAMMA_VALUE , // HIDL v3.2
ANDROID_TONEMAP_MODE_PRESET_CURVE , // HIDL v3.2
} camera_metadata_enum_android_tonemap_mode_t;
// ANDROID_TONEMAP_PRESET_CURVE
typedef enum camera_metadata_enum_android_tonemap_preset_curve {
ANDROID_TONEMAP_PRESET_CURVE_SRGB , // HIDL v3.2
ANDROID_TONEMAP_PRESET_CURVE_REC709 , // HIDL v3.2
} camera_metadata_enum_android_tonemap_preset_curve_t;
// ANDROID_LED_TRANSMIT
typedef enum camera_metadata_enum_android_led_transmit {
ANDROID_LED_TRANSMIT_OFF , // HIDL v3.2
ANDROID_LED_TRANSMIT_ON , // HIDL v3.2
} camera_metadata_enum_android_led_transmit_t;
// ANDROID_LED_AVAILABLE_LEDS
typedef enum camera_metadata_enum_android_led_available_leds {
ANDROID_LED_AVAILABLE_LEDS_TRANSMIT , // HIDL v3.2
} camera_metadata_enum_android_led_available_leds_t;
// ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL
typedef enum camera_metadata_enum_android_info_supported_hardware_level {
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED , // HIDL v3.2
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL , // HIDL v3.2
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY , // HIDL v3.2
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_3 , // HIDL v3.2
ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL , // HIDL v3.3
} camera_metadata_enum_android_info_supported_hardware_level_t;
// ANDROID_BLACK_LEVEL_LOCK
typedef enum camera_metadata_enum_android_black_level_lock {
ANDROID_BLACK_LEVEL_LOCK_OFF , // HIDL v3.2
ANDROID_BLACK_LEVEL_LOCK_ON , // HIDL v3.2
} camera_metadata_enum_android_black_level_lock_t;
// ANDROID_SYNC_FRAME_NUMBER
typedef enum camera_metadata_enum_android_sync_frame_number {
ANDROID_SYNC_FRAME_NUMBER_CONVERGING = -1, // HIDL v3.2
ANDROID_SYNC_FRAME_NUMBER_UNKNOWN = -2, // HIDL v3.2
} camera_metadata_enum_android_sync_frame_number_t;
// ANDROID_SYNC_MAX_LATENCY
typedef enum camera_metadata_enum_android_sync_max_latency {
ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL = 0, // HIDL v3.2
ANDROID_SYNC_MAX_LATENCY_UNKNOWN = -1, // HIDL v3.2
} camera_metadata_enum_android_sync_max_latency_t;
// ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS
typedef enum camera_metadata_enum_android_depth_available_depth_stream_configurations {
ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT , // HIDL v3.2
ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_INPUT , // HIDL v3.2
} camera_metadata_enum_android_depth_available_depth_stream_configurations_t;
// ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE
typedef enum camera_metadata_enum_android_depth_depth_is_exclusive {
ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE , // HIDL v3.2
ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_TRUE , // HIDL v3.2
} camera_metadata_enum_android_depth_depth_is_exclusive_t;
// ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
typedef enum camera_metadata_enum_android_logical_multi_camera_sensor_sync_type {
ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_APPROXIMATE , // HIDL v3.3
ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE_CALIBRATED , // HIDL v3.3
} camera_metadata_enum_android_logical_multi_camera_sensor_sync_type_t;
// ANDROID_DISTORTION_CORRECTION_MODE
typedef enum camera_metadata_enum_android_distortion_correction_mode {
ANDROID_DISTORTION_CORRECTION_MODE_OFF , // HIDL v3.3
ANDROID_DISTORTION_CORRECTION_MODE_FAST , // HIDL v3.3
ANDROID_DISTORTION_CORRECTION_MODE_HIGH_QUALITY , // HIDL v3.3
} camera_metadata_enum_android_distortion_correction_mode_t;
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/framebuffer_allocator.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* FrameBuffer allocator
*/
#pragma once
#include <map>
#include <memory>
#include <vector>
#include <libcamera/base/class.h>
namespace libcamera {
class Camera;
class FrameBuffer;
class Stream;
class FrameBufferAllocator
{
public:
FrameBufferAllocator(std::shared_ptr<Camera> camera);
~FrameBufferAllocator();
int allocate(Stream *stream);
int free(Stream *stream);
bool allocated() const { return !buffers_.empty(); }
const std::vector<std::unique_ptr<FrameBuffer>> &buffers(Stream *stream) const;
private:
LIBCAMERA_DISABLE_COPY(FrameBufferAllocator)
std::shared_ptr<Camera> camera_;
std::map<Stream *, std::vector<std::unique_ptr<FrameBuffer>>> buffers_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/camera.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* Camera object interface
*/
#pragma once
#include <initializer_list>
#include <memory>
#include <optional>
#include <set>
#include <stdint.h>
#include <string>
#include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/object.h>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
#include <libcamera/geometry.h>
#include <libcamera/orientation.h>
#include <libcamera/request.h>
#include <libcamera/stream.h>
namespace libcamera {
class FrameBuffer;
class FrameBufferAllocator;
class PipelineHandler;
class Request;
class SensorConfiguration
{
public:
unsigned int bitDepth = 0;
Rectangle analogCrop;
struct {
unsigned int binX = 1;
unsigned int binY = 1;
} binning;
struct {
unsigned int xOddInc = 1;
unsigned int xEvenInc = 1;
unsigned int yOddInc = 1;
unsigned int yEvenInc = 1;
} skipping;
Size outputSize;
bool isValid() const;
};
class CameraConfiguration
{
public:
enum Status {
Valid,
Adjusted,
Invalid,
};
using iterator = std::vector<StreamConfiguration>::iterator;
using const_iterator = std::vector<StreamConfiguration>::const_iterator;
virtual ~CameraConfiguration();
void addConfiguration(const StreamConfiguration &cfg);
virtual Status validate() = 0;
StreamConfiguration &at(unsigned int index);
const StreamConfiguration &at(unsigned int index) const;
StreamConfiguration &operator[](unsigned int index)
{
return at(index);
}
const StreamConfiguration &operator[](unsigned int index) const
{
return at(index);
}
iterator begin();
const_iterator begin() const;
iterator end();
const_iterator end() const;
bool empty() const;
std::size_t size() const;
std::optional<SensorConfiguration> sensorConfig;
Orientation orientation;
protected:
CameraConfiguration();
enum class ColorSpaceFlag {
None,
StreamsShareColorSpace,
};
using ColorSpaceFlags = Flags<ColorSpaceFlag>;
Status validateColorSpaces(ColorSpaceFlags flags = ColorSpaceFlag::None);
std::vector<StreamConfiguration> config_;
};
class Camera final : public Object, public std::enable_shared_from_this<Camera>,
public Extensible
{
LIBCAMERA_DECLARE_PRIVATE()
public:
static std::shared_ptr<Camera> create(std::unique_ptr<Private> d,
const std::string &id,
const std::set<Stream *> &streams);
const std::string &id() const;
Signal<Request *, FrameBuffer *> bufferCompleted;
Signal<Request *> requestCompleted;
Signal<> disconnected;
int acquire();
int release();
const ControlInfoMap &controls() const;
const ControlList &properties() const;
const std::set<Stream *> &streams() const;
std::unique_ptr<CameraConfiguration>
generateConfiguration(Span<const StreamRole> roles = {});
std::unique_ptr<CameraConfiguration>
generateConfiguration(std::initializer_list<StreamRole> roles)
{
return generateConfiguration(Span(roles.begin(), roles.end()));
}
int configure(CameraConfiguration *config);
std::unique_ptr<Request> createRequest(uint64_t cookie = 0);
int queueRequest(Request *request);
int start(const ControlList *controls = nullptr);
int stop();
private:
LIBCAMERA_DISABLE_COPY(Camera)
Camera(std::unique_ptr<Private> d, const std::string &id,
const std::set<Stream *> &streams);
~Camera();
friend class PipelineHandler;
void disconnect();
void requestComplete(Request *request);
friend class FrameBufferAllocator;
int exportFrameBuffers(Stream *stream,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/version.h.in | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Library version information
*
* This file is auto-generated. Do not edit.
*/
#pragma once
#define LIBCAMERA_VERSION_MAJOR @LIBCAMERA_VERSION_MAJOR@
#define LIBCAMERA_VERSION_MINOR @LIBCAMERA_VERSION_MINOR@
#define LIBCAMERA_VERSION_PATCH @LIBCAMERA_VERSION_PATCH@
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/color_space.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Raspberry Pi Ltd
*
* color space definitions
*/
#pragma once
#include <optional>
#include <string>
namespace libcamera {
class PixelFormat;
class ColorSpace
{
public:
enum class Primaries {
Raw,
Smpte170m,
Rec709,
Rec2020,
};
enum class TransferFunction {
Linear,
Srgb,
Rec709,
};
enum class YcbcrEncoding {
None,
Rec601,
Rec709,
Rec2020,
};
enum class Range {
Full,
Limited,
};
constexpr ColorSpace(Primaries p, TransferFunction t, YcbcrEncoding e, Range r)
: primaries(p), transferFunction(t), ycbcrEncoding(e), range(r)
{
}
static const ColorSpace Raw;
static const ColorSpace Srgb;
static const ColorSpace Sycc;
static const ColorSpace Smpte170m;
static const ColorSpace Rec709;
static const ColorSpace Rec2020;
Primaries primaries;
TransferFunction transferFunction;
YcbcrEncoding ycbcrEncoding;
Range range;
std::string toString() const;
static std::string toString(const std::optional<ColorSpace> &colorSpace);
static std::optional<ColorSpace> fromString(const std::string &str);
bool adjust(PixelFormat format);
};
bool operator==(const ColorSpace &lhs, const ColorSpace &rhs);
static inline bool operator!=(const ColorSpace &lhs, const ColorSpace &rhs)
{
return !(lhs == rhs);
}
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/framebuffer.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Frame buffer handling
*/
#pragma once
#include <assert.h>
#include <limits>
#include <memory>
#include <stdint.h>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/base/span.h>
namespace libcamera {
class Fence;
class Request;
struct FrameMetadata {
enum Status {
FrameSuccess,
FrameError,
FrameCancelled,
};
struct Plane {
unsigned int bytesused;
};
Status status;
unsigned int sequence;
uint64_t timestamp;
Span<Plane> planes() { return planes_; }
Span<const Plane> planes() const { return planes_; }
private:
friend class FrameBuffer;
std::vector<Plane> planes_;
};
class FrameBuffer : public Extensible
{
LIBCAMERA_DECLARE_PRIVATE()
public:
struct Plane {
static constexpr unsigned int kInvalidOffset = std::numeric_limits<unsigned int>::max();
SharedFD fd;
unsigned int offset = kInvalidOffset;
unsigned int length;
};
FrameBuffer(const std::vector<Plane> &planes, unsigned int cookie = 0);
FrameBuffer(std::unique_ptr<Private> d);
virtual ~FrameBuffer() {}
const std::vector<Plane> &planes() const;
Request *request() const;
const FrameMetadata &metadata() const;
uint64_t cookie() const;
void setCookie(uint64_t cookie);
std::unique_ptr<Fence> releaseFence();
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(FrameBuffer)
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/controls.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Control handling
*/
#pragma once
#include <assert.h>
#include <optional>
#include <set>
#include <stdint.h>
#include <string>
#include <unordered_map>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/span.h>
#include <libcamera/geometry.h>
namespace libcamera {
class ControlValidator;
enum ControlType {
ControlTypeNone,
ControlTypeBool,
ControlTypeByte,
ControlTypeInteger32,
ControlTypeInteger64,
ControlTypeFloat,
ControlTypeString,
ControlTypeRectangle,
ControlTypeSize,
};
namespace details {
template<typename T>
struct control_type {
};
template<>
struct control_type<void> {
static constexpr ControlType value = ControlTypeNone;
};
template<>
struct control_type<bool> {
static constexpr ControlType value = ControlTypeBool;
};
template<>
struct control_type<uint8_t> {
static constexpr ControlType value = ControlTypeByte;
};
template<>
struct control_type<int32_t> {
static constexpr ControlType value = ControlTypeInteger32;
};
template<>
struct control_type<int64_t> {
static constexpr ControlType value = ControlTypeInteger64;
};
template<>
struct control_type<float> {
static constexpr ControlType value = ControlTypeFloat;
};
template<>
struct control_type<std::string> {
static constexpr ControlType value = ControlTypeString;
};
template<>
struct control_type<Rectangle> {
static constexpr ControlType value = ControlTypeRectangle;
};
template<>
struct control_type<Size> {
static constexpr ControlType value = ControlTypeSize;
};
template<typename T, std::size_t N>
struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
};
} /* namespace details */
class ControlValue
{
public:
ControlValue();
#ifndef __DOXYGEN__
template<typename T, std::enable_if_t<!details::is_span<T>::value &&
details::control_type<T>::value &&
!std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
ControlValue(const T &value)
: type_(ControlTypeNone), numElements_(0)
{
set(details::control_type<std::remove_cv_t<T>>::value, false,
&value, 1, sizeof(T));
}
template<typename T, std::enable_if_t<details::is_span<T>::value ||
std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
ControlValue(const T &value)
: type_(ControlTypeNone), numElements_(0)
{
set(details::control_type<std::remove_cv_t<T>>::value, true,
value.data(), value.size(), sizeof(typename T::value_type));
}
~ControlValue();
ControlValue(const ControlValue &other);
ControlValue &operator=(const ControlValue &other);
ControlType type() const { return type_; }
bool isNone() const { return type_ == ControlTypeNone; }
bool isArray() const { return isArray_; }
std::size_t numElements() const { return numElements_; }
Span<const uint8_t> data() const;
Span<uint8_t> data();
std::string toString() const;
bool operator==(const ControlValue &other) const;
bool operator!=(const ControlValue &other) const
{
return !(*this == other);
}
#ifndef __DOXYGEN__
template<typename T, std::enable_if_t<!details::is_span<T>::value &&
!std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
T get() const
{
assert(type_ == details::control_type<std::remove_cv_t<T>>::value);
assert(!isArray_);
return *reinterpret_cast<const T *>(data().data());
}
template<typename T, std::enable_if_t<details::is_span<T>::value ||
std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
T get() const
{
assert(type_ == details::control_type<std::remove_cv_t<T>>::value);
assert(isArray_);
using V = typename T::value_type;
const V *value = reinterpret_cast<const V *>(data().data());
return T{ value, numElements_ };
}
#ifndef __DOXYGEN__
template<typename T, std::enable_if_t<!details::is_span<T>::value &&
!std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
void set(const T &value)
{
set(details::control_type<std::remove_cv_t<T>>::value, false,
reinterpret_cast<const void *>(&value), 1, sizeof(T));
}
template<typename T, std::enable_if_t<details::is_span<T>::value ||
std::is_same<std::string, std::remove_cv_t<T>>::value,
std::nullptr_t> = nullptr>
#else
template<typename T>
#endif
void set(const T &value)
{
set(details::control_type<std::remove_cv_t<T>>::value, true,
value.data(), value.size(), sizeof(typename T::value_type));
}
void reserve(ControlType type, bool isArray = false,
std::size_t numElements = 1);
private:
ControlType type_ : 8;
bool isArray_;
std::size_t numElements_ : 32;
union {
uint64_t value_;
void *storage_;
};
void release();
void set(ControlType type, bool isArray, const void *data,
std::size_t numElements, std::size_t elementSize);
};
class ControlId
{
public:
ControlId(unsigned int id, const std::string &name, ControlType type)
: id_(id), name_(name), type_(type)
{
}
unsigned int id() const { return id_; }
const std::string &name() const { return name_; }
ControlType type() const { return type_; }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId)
unsigned int id_;
std::string name_;
ControlType type_;
};
static inline bool operator==(unsigned int lhs, const ControlId &rhs)
{
return lhs == rhs.id();
}
static inline bool operator!=(unsigned int lhs, const ControlId &rhs)
{
return !(lhs == rhs);
}
static inline bool operator==(const ControlId &lhs, unsigned int rhs)
{
return lhs.id() == rhs;
}
static inline bool operator!=(const ControlId &lhs, unsigned int rhs)
{
return !(lhs == rhs);
}
template<typename T>
class Control : public ControlId
{
public:
using type = T;
Control(unsigned int id, const char *name)
: ControlId(id, name, details::control_type<std::remove_cv_t<T>>::value)
{
}
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Control)
};
class ControlInfo
{
public:
explicit ControlInfo(const ControlValue &min = {},
const ControlValue &max = {},
const ControlValue &def = {});
explicit ControlInfo(Span<const ControlValue> values,
const ControlValue &def = {});
explicit ControlInfo(std::set<bool> values, bool def);
explicit ControlInfo(bool value);
const ControlValue &min() const { return min_; }
const ControlValue &max() const { return max_; }
const ControlValue &def() const { return def_; }
const std::vector<ControlValue> &values() const { return values_; }
std::string toString() const;
bool operator==(const ControlInfo &other) const
{
return min_ == other.min_ && max_ == other.max_;
}
bool operator!=(const ControlInfo &other) const
{
return !(*this == other);
}
private:
ControlValue min_;
ControlValue max_;
ControlValue def_;
std::vector<ControlValue> values_;
};
using ControlIdMap = std::unordered_map<unsigned int, const ControlId *>;
class ControlInfoMap : private std::unordered_map<const ControlId *, ControlInfo>
{
public:
using Map = std::unordered_map<const ControlId *, ControlInfo>;
ControlInfoMap() = default;
ControlInfoMap(const ControlInfoMap &other) = default;
ControlInfoMap(std::initializer_list<Map::value_type> init,
const ControlIdMap &idmap);
ControlInfoMap(Map &&info, const ControlIdMap &idmap);
ControlInfoMap &operator=(const ControlInfoMap &other) = default;
using Map::key_type;
using Map::mapped_type;
using Map::value_type;
using Map::size_type;
using Map::iterator;
using Map::const_iterator;
using Map::begin;
using Map::cbegin;
using Map::end;
using Map::cend;
using Map::at;
using Map::empty;
using Map::size;
using Map::count;
using Map::find;
mapped_type &at(unsigned int key);
const mapped_type &at(unsigned int key) const;
size_type count(unsigned int key) const;
iterator find(unsigned int key);
const_iterator find(unsigned int key) const;
const ControlIdMap &idmap() const { return *idmap_; }
private:
bool validate();
const ControlIdMap *idmap_ = nullptr;
};
class ControlList
{
private:
using ControlListMap = std::unordered_map<unsigned int, ControlValue>;
public:
enum class MergePolicy {
KeepExisting = 0,
OverwriteExisting,
};
ControlList();
ControlList(const ControlIdMap &idmap, const ControlValidator *validator = nullptr);
ControlList(const ControlInfoMap &infoMap, const ControlValidator *validator = nullptr);
using iterator = ControlListMap::iterator;
using const_iterator = ControlListMap::const_iterator;
iterator begin() { return controls_.begin(); }
iterator end() { return controls_.end(); }
const_iterator begin() const { return controls_.begin(); }
const_iterator end() const { return controls_.end(); }
bool empty() const { return controls_.empty(); }
std::size_t size() const { return controls_.size(); }
void clear() { controls_.clear(); }
void merge(const ControlList &source, MergePolicy policy = MergePolicy::KeepExisting);
bool contains(unsigned int id) const;
template<typename T>
std::optional<T> get(const Control<T> &ctrl) const
{
const auto entry = controls_.find(ctrl.id());
if (entry == controls_.end())
return std::nullopt;
const ControlValue &val = entry->second;
return val.get<T>();
}
template<typename T, typename V>
void set(const Control<T> &ctrl, const V &value)
{
ControlValue *val = find(ctrl.id());
if (!val)
return;
val->set<T>(value);
}
template<typename T, typename V, size_t Size>
void set(const Control<Span<T, Size>> &ctrl, const std::initializer_list<V> &value)
{
ControlValue *val = find(ctrl.id());
if (!val)
return;
val->set(Span<const typename std::remove_cv_t<V>, Size>{ value.begin(), value.size() });
}
const ControlValue &get(unsigned int id) const;
void set(unsigned int id, const ControlValue &value);
const ControlInfoMap *infoMap() const { return infoMap_; }
const ControlIdMap *idMap() const { return idmap_; }
private:
const ControlValue *find(unsigned int id) const;
ControlValue *find(unsigned int id);
const ControlValidator *validator_;
const ControlIdMap *idmap_;
const ControlInfoMap *infoMap_;
ControlListMap controls_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/fence.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Synchronization fence
*/
#pragma once
#include <libcamera/base/class.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class Fence
{
public:
Fence(UniqueFD fd);
bool isValid() const { return fd_.isValid(); }
const UniqueFD &fd() const { return fd_; }
UniqueFD release() { return std::move(fd_); }
private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Fence)
UniqueFD fd_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/formats.h.in | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Google Inc.
*
* Formats
*
* This file is auto-generated. Do not edit.
*/
#pragma once
#include <stdint.h>
#include <libcamera/pixel_format.h>
namespace libcamera {
namespace formats {
namespace {
constexpr uint32_t __fourcc(char a, char b, char c, char d)
{
return (static_cast<uint32_t>(a) << 0) |
(static_cast<uint32_t>(b) << 8) |
(static_cast<uint32_t>(c) << 16) |
(static_cast<uint32_t>(d) << 24);
}
constexpr uint64_t __mod(unsigned int vendor, unsigned int mod)
{
return (static_cast<uint64_t>(vendor) << 56) |
(static_cast<uint64_t>(mod) << 0);
}
} /* namespace */
${formats}
} /* namespace formats */
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/control_ids.h.in | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Control ID list
*
* This file is auto-generated. Do not edit.
*/
#pragma once
#include <array>
#include <map>
#include <stdint.h>
#include <string>
#include <libcamera/controls.h>
namespace libcamera {
namespace controls {
enum {
${ids}
};
${controls}
extern const ControlIdMap controls;
${vendor_controls}
} /* namespace controls */
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/stream.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Video stream for a Camera
*/
#pragma once
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <libcamera/color_space.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
#include <libcamera/pixel_format.h>
namespace libcamera {
class Camera;
class Stream;
class StreamFormats
{
public:
StreamFormats();
StreamFormats(const std::map<PixelFormat, std::vector<SizeRange>> &formats);
std::vector<PixelFormat> pixelformats() const;
std::vector<Size> sizes(const PixelFormat &pixelformat) const;
SizeRange range(const PixelFormat &pixelformat) const;
private:
std::map<PixelFormat, std::vector<SizeRange>> formats_;
};
struct StreamConfiguration {
StreamConfiguration();
StreamConfiguration(const StreamFormats &formats);
PixelFormat pixelFormat;
Size size;
unsigned int stride;
unsigned int frameSize;
unsigned int bufferCount;
std::optional<ColorSpace> colorSpace;
Stream *stream() const { return stream_; }
void setStream(Stream *stream) { stream_ = stream; }
const StreamFormats &formats() const { return formats_; }
std::string toString() const;
private:
Stream *stream_;
StreamFormats formats_;
};
enum class StreamRole {
Raw,
StillCapture,
VideoRecording,
Viewfinder,
};
std::ostream &operator<<(std::ostream &out, StreamRole role);
class Stream
{
public:
Stream();
const StreamConfiguration &configuration() const { return configuration_; }
protected:
friend class Camera;
StreamConfiguration configuration_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/logging.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Logging infrastructure
*/
#pragma once
namespace libcamera {
enum LoggingTarget {
LoggingTargetNone,
LoggingTargetSyslog,
LoggingTargetFile,
LoggingTargetStream,
};
int logSetFile(const char *path, bool color = false);
int logSetStream(std::ostream *stream, bool color = false);
int logSetTarget(LoggingTarget target);
void logSetLevel(const char *category, const char *level);
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/transform.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* 2D plane transforms
*/
#pragma once
#include <string>
namespace libcamera {
enum class Orientation;
enum class Transform : int {
Identity = 0,
Rot0 = Identity,
HFlip = 1,
VFlip = 2,
HVFlip = HFlip | VFlip,
Rot180 = HVFlip,
Transpose = 4,
Rot270 = HFlip | Transpose,
Rot90 = VFlip | Transpose,
Rot180Transpose = HFlip | VFlip | Transpose
};
constexpr Transform operator&(Transform t0, Transform t1)
{
return static_cast<Transform>(static_cast<int>(t0) & static_cast<int>(t1));
}
constexpr Transform operator|(Transform t0, Transform t1)
{
return static_cast<Transform>(static_cast<int>(t0) | static_cast<int>(t1));
}
constexpr Transform operator^(Transform t0, Transform t1)
{
return static_cast<Transform>(static_cast<int>(t0) ^ static_cast<int>(t1));
}
constexpr Transform &operator&=(Transform &t0, Transform t1)
{
return t0 = t0 & t1;
}
constexpr Transform &operator|=(Transform &t0, Transform t1)
{
return t0 = t0 | t1;
}
constexpr Transform &operator^=(Transform &t0, Transform t1)
{
return t0 = t0 ^ t1;
}
Transform operator*(Transform t0, Transform t1);
Transform operator-(Transform t);
constexpr bool operator!(Transform t)
{
return t == Transform::Identity;
}
constexpr Transform operator~(Transform t)
{
return static_cast<Transform>(~static_cast<int>(t) & 7);
}
Transform transformFromRotation(int angle, bool *success = nullptr);
Transform operator/(const Orientation &o1, const Orientation &o2);
Orientation operator*(const Orientation &o, const Transform &t);
const char *transformToString(Transform t);
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/request.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Capture request handling
*/
#pragma once
#include <map>
#include <memory>
#include <ostream>
#include <stdint.h>
#include <string>
#include <unordered_set>
#include <libcamera/base/class.h>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
#include <libcamera/fence.h>
namespace libcamera {
class Camera;
class CameraControlValidator;
class FrameBuffer;
class Stream;
class Request : public Extensible
{
LIBCAMERA_DECLARE_PRIVATE()
public:
enum Status {
RequestPending,
RequestComplete,
RequestCancelled,
};
enum ReuseFlag {
Default = 0,
ReuseBuffers = (1 << 0),
};
using BufferMap = std::map<const Stream *, FrameBuffer *>;
Request(Camera *camera, uint64_t cookie = 0);
~Request();
void reuse(ReuseFlag flags = Default);
ControlList &controls() { return *controls_; }
ControlList &metadata() { return *metadata_; }
const BufferMap &buffers() const { return bufferMap_; }
int addBuffer(const Stream *stream, FrameBuffer *buffer,
std::unique_ptr<Fence> fence = nullptr);
FrameBuffer *findBuffer(const Stream *stream) const;
uint32_t sequence() const;
uint64_t cookie() const { return cookie_; }
Status status() const { return status_; }
bool hasPendingBuffers() const;
std::string toString() const;
private:
LIBCAMERA_DISABLE_COPY(Request)
ControlList *controls_;
ControlList *metadata_;
BufferMap bufferMap_;
const uint64_t cookie_;
Status status_;
};
std::ostream &operator<<(std::ostream &out, const Request &r);
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/property_ids.h.in | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Property ID list
*
* This file is auto-generated. Do not edit.
*/
#pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <libcamera/controls.h>
namespace libcamera {
namespace properties {
enum {
${ids}
};
${controls}
extern const ControlIdMap properties;
${vendor_controls}
} /* namespace properties */
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/orientation.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2023, Ideas On Board Oy
*
* Image orientation
*/
#pragma once
#include <iostream>
namespace libcamera {
enum class Orientation {
/* EXIF tag 274 starts from '1' */
Rotate0 = 1,
Rotate0Mirror,
Rotate180,
Rotate180Mirror,
Rotate90Mirror,
Rotate270,
Rotate270Mirror,
Rotate90,
};
Orientation orientationFromRotation(int angle, bool *success = nullptr);
std::ostream &operator<<(std::ostream &out, const Orientation &orientation);
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/geometry.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* Geometry-related classes
*/
#pragma once
#include <algorithm>
#include <ostream>
#include <string>
#include <libcamera/base/compiler.h>
namespace libcamera {
class Rectangle;
class Point
{
public:
constexpr Point()
: x(0), y(0)
{
}
constexpr Point(int xpos, int ypos)
: x(xpos), y(ypos)
{
}
int x;
int y;
const std::string toString() const;
constexpr Point operator-() const
{
return { -x, -y };
}
};
bool operator==(const Point &lhs, const Point &rhs);
static inline bool operator!=(const Point &lhs, const Point &rhs)
{
return !(lhs == rhs);
}
std::ostream &operator<<(std::ostream &out, const Point &p);
class Size
{
public:
constexpr Size()
: Size(0, 0)
{
}
constexpr Size(unsigned int w, unsigned int h)
: width(w), height(h)
{
}
unsigned int width;
unsigned int height;
bool isNull() const { return !width && !height; }
const std::string toString() const;
Size &alignDownTo(unsigned int hAlignment, unsigned int vAlignment)
{
width = width / hAlignment * hAlignment;
height = height / vAlignment * vAlignment;
return *this;
}
Size &alignUpTo(unsigned int hAlignment, unsigned int vAlignment)
{
width = (width + hAlignment - 1) / hAlignment * hAlignment;
height = (height + vAlignment - 1) / vAlignment * vAlignment;
return *this;
}
Size &boundTo(const Size &bound)
{
width = std::min(width, bound.width);
height = std::min(height, bound.height);
return *this;
}
Size &expandTo(const Size &expand)
{
width = std::max(width, expand.width);
height = std::max(height, expand.height);
return *this;
}
Size &growBy(const Size &margins)
{
width += margins.width;
height += margins.height;
return *this;
}
Size &shrinkBy(const Size &margins)
{
width = width > margins.width ? width - margins.width : 0;
height = height > margins.height ? height - margins.height : 0;
return *this;
}
__nodiscard constexpr Size alignedDownTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
width / hAlignment * hAlignment,
height / vAlignment * vAlignment
};
}
__nodiscard constexpr Size alignedUpTo(unsigned int hAlignment,
unsigned int vAlignment) const
{
return {
(width + hAlignment - 1) / hAlignment * hAlignment,
(height + vAlignment - 1) / vAlignment * vAlignment
};
}
__nodiscard constexpr Size boundedTo(const Size &bound) const
{
return {
std::min(width, bound.width),
std::min(height, bound.height)
};
}
__nodiscard constexpr Size expandedTo(const Size &expand) const
{
return {
std::max(width, expand.width),
std::max(height, expand.height)
};
}
__nodiscard constexpr Size grownBy(const Size &margins) const
{
return {
width + margins.width,
height + margins.height
};
}
__nodiscard constexpr Size shrunkBy(const Size &margins) const
{
return {
width > margins.width ? width - margins.width : 0,
height > margins.height ? height - margins.height : 0
};
}
__nodiscard Size boundedToAspectRatio(const Size &ratio) const;
__nodiscard Size expandedToAspectRatio(const Size &ratio) const;
__nodiscard Rectangle centeredTo(const Point ¢er) const;
Size operator*(float factor) const;
Size operator/(float factor) const;
Size &operator*=(float factor);
Size &operator/=(float factor);
};
bool operator==(const Size &lhs, const Size &rhs);
bool operator<(const Size &lhs, const Size &rhs);
static inline bool operator!=(const Size &lhs, const Size &rhs)
{
return !(lhs == rhs);
}
static inline bool operator<=(const Size &lhs, const Size &rhs)
{
return lhs < rhs || lhs == rhs;
}
static inline bool operator>(const Size &lhs, const Size &rhs)
{
return !(lhs <= rhs);
}
static inline bool operator>=(const Size &lhs, const Size &rhs)
{
return !(lhs < rhs);
}
std::ostream &operator<<(std::ostream &out, const Size &s);
class SizeRange
{
public:
SizeRange()
: hStep(0), vStep(0)
{
}
SizeRange(const Size &size)
: min(size), max(size), hStep(1), vStep(1)
{
}
SizeRange(const Size &minSize, const Size &maxSize)
: min(minSize), max(maxSize), hStep(1), vStep(1)
{
}
SizeRange(const Size &minSize, const Size &maxSize,
unsigned int hstep, unsigned int vstep)
: min(minSize), max(maxSize), hStep(hstep), vStep(vstep)
{
}
bool contains(const Size &size) const;
std::string toString() const;
Size min;
Size max;
unsigned int hStep;
unsigned int vStep;
};
bool operator==(const SizeRange &lhs, const SizeRange &rhs);
static inline bool operator!=(const SizeRange &lhs, const SizeRange &rhs)
{
return !(lhs == rhs);
}
std::ostream &operator<<(std::ostream &out, const SizeRange &sr);
class Rectangle
{
public:
constexpr Rectangle()
: Rectangle(0, 0, 0, 0)
{
}
constexpr Rectangle(int xpos, int ypos, const Size &size)
: x(xpos), y(ypos), width(size.width), height(size.height)
{
}
constexpr Rectangle(int xpos, int ypos, unsigned int w, unsigned int h)
: x(xpos), y(ypos), width(w), height(h)
{
}
constexpr explicit Rectangle(const Size &size)
: x(0), y(0), width(size.width), height(size.height)
{
}
int x;
int y;
unsigned int width;
unsigned int height;
bool isNull() const { return !width && !height; }
const std::string toString() const;
Point center() const;
Size size() const
{
return { width, height };
}
Point topLeft() const
{
return { x, y };
}
Rectangle &scaleBy(const Size &numerator, const Size &denominator);
Rectangle &translateBy(const Point &point);
__nodiscard Rectangle boundedTo(const Rectangle &bound) const;
__nodiscard Rectangle enclosedIn(const Rectangle &boundary) const;
__nodiscard Rectangle scaledBy(const Size &numerator,
const Size &denominator) const;
__nodiscard Rectangle translatedBy(const Point &point) const;
};
bool operator==(const Rectangle &lhs, const Rectangle &rhs);
static inline bool operator!=(const Rectangle &lhs, const Rectangle &rhs)
{
return !(lhs == rhs);
}
std::ostream &operator<<(std::ostream &out, const Rectangle &r);
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/camera_manager.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2018, Google Inc.
*
* Camera management
*/
#pragma once
#include <memory>
#include <string>
#include <sys/types.h>
#include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/object.h>
#include <libcamera/base/signal.h>
namespace libcamera {
class Camera;
class CameraManager : public Object, public Extensible
{
LIBCAMERA_DECLARE_PRIVATE()
public:
CameraManager();
~CameraManager();
int start();
void stop();
std::vector<std::shared_ptr<Camera>> cameras() const;
std::shared_ptr<Camera> get(const std::string &id);
static const std::string &version() { return version_; }
Signal<std::shared_ptr<Camera>> cameraAdded;
Signal<std::shared_ptr<Camera>> cameraRemoved;
private:
LIBCAMERA_DISABLE_COPY(CameraManager)
static const std::string version_;
static CameraManager *self_;
};
} /* namespace libcamera */
|
0 | repos/libcamera/include | repos/libcamera/include/libcamera/pixel_format.h | /* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2019, Google Inc.
*
* libcamera Pixel Format
*/
#pragma once
#include <ostream>
#include <set>
#include <stdint.h>
#include <string>
namespace libcamera {
class PixelFormat
{
public:
constexpr PixelFormat()
: fourcc_(0), modifier_(0)
{
}
explicit constexpr PixelFormat(uint32_t fourcc, uint64_t modifier = 0)
: fourcc_(fourcc), modifier_(modifier)
{
}
bool operator==(const PixelFormat &other) const;
bool operator!=(const PixelFormat &other) const { return !(*this == other); }
bool operator<(const PixelFormat &other) const;
constexpr bool isValid() const { return fourcc_ != 0; }
constexpr operator uint32_t() const { return fourcc_; }
constexpr uint32_t fourcc() const { return fourcc_; }
constexpr uint64_t modifier() const { return modifier_; }
std::string toString() const;
static PixelFormat fromString(const std::string &name);
private:
uint32_t fourcc_;
uint64_t modifier_;
};
std::ostream &operator<<(std::ostream &out, const PixelFormat &f);
} /* namespace libcamera */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.